blob: f7c06b06972ca3cd60c74da7a7f9431173908f8a [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
gcalvino35be9152018-12-20 09:33:12 +010025from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno69f0d382020-05-07 13:08:09 +000028from osm_lcm.ng_ro import NgRoClient, NgRoException
tierno744303e2020-01-13 16:46:31 +000029from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010030from n2vc.k8s_helm_conn import K8sHelmConnector
Adam Israelbaacc302019-12-01 12:41:39 -050031from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020032
tierno27246d82018-09-27 15:59:09 +020033from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020034from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020035
36from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000037from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020038
tierno27246d82018-09-27 15:59:09 +020039from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020040from http import HTTPStatus
41from time import time
tierno27246d82018-09-27 15:59:09 +020042from uuid import uuid4
tiernob9018152020-04-16 14:18:24 +000043from functools import partial
tierno59d22d22018-09-25 18:10:19 +020044
tierno69f0d382020-05-07 13:08:09 +000045__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +020046
47
48class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010049 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000050 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000051 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020052 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +020053 timeout_primitive = 30 * 60 # timeout for primitive execution
54 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020055
kuuseac3a8882019-10-03 10:48:06 +020056 SUBOPERATION_STATUS_NOT_FOUND = -1
57 SUBOPERATION_STATUS_NEW = -2
58 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000059 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020060
tierno744303e2020-01-13 16:46:31 +000061 def __init__(self, db, msg, fs, lcm_tasks, config, loop):
tierno59d22d22018-09-25 18:10:19 +020062 """
63 Init, Connect to database, filesystem storage, and messaging
64 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
65 :return: None
66 """
quilesj7e13aeb2019-10-08 13:34:55 +020067 super().__init__(
68 db=db,
69 msg=msg,
70 fs=fs,
71 logger=logging.getLogger('lcm.ns')
72 )
73
tierno59d22d22018-09-25 18:10:19 +020074 self.loop = loop
75 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +000076 self.timeout = config["timeout"]
77 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +000078 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +000079 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +020080
quilesj7e13aeb2019-10-08 13:34:55 +020081 # create N2VC connector
82 self.n2vc = N2VCJujuConnector(
83 db=self.db,
84 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +020085 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +020086 loop=self.loop,
87 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
88 username=self.vca_config.get('user', None),
89 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +000090 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +020091 )
quilesj7e13aeb2019-10-08 13:34:55 +020092
calvinosanch9f9c6f22019-11-04 13:37:39 +010093 self.k8sclusterhelm = K8sHelmConnector(
94 kubectl_command=self.vca_config.get("kubectlpath"),
95 helm_command=self.vca_config.get("helmpath"),
96 fs=self.fs,
97 log=self.logger,
98 db=self.db,
99 on_update_db=None,
100 )
101
Adam Israelbaacc302019-12-01 12:41:39 -0500102 self.k8sclusterjuju = K8sJujuConnector(
103 kubectl_command=self.vca_config.get("kubectlpath"),
104 juju_command=self.vca_config.get("jujupath"),
105 fs=self.fs,
106 log=self.logger,
107 db=self.db,
108 on_update_db=None,
109 )
110
tiernoa2143262020-03-27 16:20:40 +0000111 self.k8scluster_map = {
112 "helm-chart": self.k8sclusterhelm,
113 "chart": self.k8sclusterhelm,
114 "juju-bundle": self.k8sclusterjuju,
115 "juju": self.k8sclusterjuju,
116 }
quilesj7e13aeb2019-10-08 13:34:55 +0200117 # create RO client
tierno69f0d382020-05-07 13:08:09 +0000118 if self.ng_ro:
119 self.RO = NgRoClient(self.loop, **self.ro_config)
120 else:
121 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200122
quilesj3655ae02019-12-12 16:08:35 +0000123 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200124
quilesj3655ae02019-12-12 16:08:35 +0000125 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
126
127 try:
128 # TODO filter RO descriptor fields...
129
130 # write to database
131 db_dict = dict()
132 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
133 db_dict['deploymentStatus'] = ro_descriptor
134 self.update_db_2("nsrs", nsrs_id, db_dict)
135
136 except Exception as e:
137 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
138
139 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
140
quilesj69a722c2020-01-09 08:30:17 +0000141 # remove last dot from path (if exists)
142 if path.endswith('.'):
143 path = path[:-1]
144
quilesj3655ae02019-12-12 16:08:35 +0000145 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
146 # .format(table, filter, path, updated_data))
147
148 try:
149
150 nsr_id = filter.get('_id')
151
152 # read ns record from database
153 nsr = self.db.get_one(table='nsrs', q_filter=filter)
154 current_ns_status = nsr.get('nsState')
155
156 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000157 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000158
159 # vcaStatus
160 db_dict = dict()
161 db_dict['vcaStatus'] = status_dict
162
163 # update configurationStatus for this VCA
164 try:
165 vca_index = int(path[path.rfind(".")+1:])
166
167 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
168 vca_status = vca_list[vca_index].get('status')
169
170 configuration_status_list = nsr.get('configurationStatus')
171 config_status = configuration_status_list[vca_index].get('status')
172
173 if config_status == 'BROKEN' and vca_status != 'failed':
174 db_dict['configurationStatus'][vca_index] = 'READY'
175 elif config_status != 'BROKEN' and vca_status == 'failed':
176 db_dict['configurationStatus'][vca_index] = 'BROKEN'
177 except Exception as e:
178 # not update configurationStatus
179 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
180
181 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
182 # if nsState = 'DEGRADED' check if all is OK
183 is_degraded = False
184 if current_ns_status in ('READY', 'DEGRADED'):
185 error_description = ''
186 # check machines
187 if status_dict.get('machines'):
188 for machine_id in status_dict.get('machines'):
189 machine = status_dict.get('machines').get(machine_id)
190 # check machine agent-status
191 if machine.get('agent-status'):
192 s = machine.get('agent-status').get('status')
193 if s != 'started':
194 is_degraded = True
195 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
196 # check machine instance status
197 if machine.get('instance-status'):
198 s = machine.get('instance-status').get('status')
199 if s != 'running':
200 is_degraded = True
201 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
202 # check applications
203 if status_dict.get('applications'):
204 for app_id in status_dict.get('applications'):
205 app = status_dict.get('applications').get(app_id)
206 # check application status
207 if app.get('status'):
208 s = app.get('status').get('status')
209 if s != 'active':
210 is_degraded = True
211 error_description += 'application {} status={} ; '.format(app_id, s)
212
213 if error_description:
214 db_dict['errorDescription'] = error_description
215 if current_ns_status == 'READY' and is_degraded:
216 db_dict['nsState'] = 'DEGRADED'
217 if current_ns_status == 'DEGRADED' and not is_degraded:
218 db_dict['nsState'] = 'READY'
219
220 # write to database
221 self.update_db_2("nsrs", nsr_id, db_dict)
222
tierno51183952020-04-03 15:48:18 +0000223 except (asyncio.CancelledError, asyncio.TimeoutError):
224 raise
quilesj3655ae02019-12-12 16:08:35 +0000225 except Exception as e:
226 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200227
gcalvino35be9152018-12-20 09:33:12 +0100228 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200229 """
230 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
231 :param vnfd: input vnfd
232 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000233 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100234 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200235 :return: copy of vnfd
236 """
tierno59d22d22018-09-25 18:10:19 +0200237 try:
238 vnfd_RO = deepcopy(vnfd)
tierno8a518872018-12-21 13:42:14 +0000239 # remove unused by RO configuration, monitoring, scaling and internal keys
tierno59d22d22018-09-25 18:10:19 +0200240 vnfd_RO.pop("_id", None)
241 vnfd_RO.pop("_admin", None)
tierno8a518872018-12-21 13:42:14 +0000242 vnfd_RO.pop("vnf-configuration", None)
243 vnfd_RO.pop("monitoring-param", None)
244 vnfd_RO.pop("scaling-group-descriptor", None)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100245 vnfd_RO.pop("kdu", None)
246 vnfd_RO.pop("k8s-cluster", None)
tierno59d22d22018-09-25 18:10:19 +0200247 if new_id:
248 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000249
250 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
251 for vdu in get_iterable(vnfd_RO, "vdu"):
252 cloud_init_file = None
253 if vdu.get("cloud-init-file"):
tierno59d22d22018-09-25 18:10:19 +0200254 base_folder = vnfd["_admin"]["storage"]
gcalvino35be9152018-12-20 09:33:12 +0100255 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
256 vdu["cloud-init-file"])
257 with self.fs.file_open(cloud_init_file, "r") as ci_file:
258 cloud_init_content = ci_file.read()
tierno59d22d22018-09-25 18:10:19 +0200259 vdu.pop("cloud-init-file", None)
tierno8a518872018-12-21 13:42:14 +0000260 elif vdu.get("cloud-init"):
gcalvino35be9152018-12-20 09:33:12 +0100261 cloud_init_content = vdu["cloud-init"]
tierno8a518872018-12-21 13:42:14 +0000262 else:
263 continue
264
265 env = Environment()
266 ast = env.parse(cloud_init_content)
267 mandatory_vars = meta.find_undeclared_variables(ast)
268 if mandatory_vars:
269 for var in mandatory_vars:
270 if not additionalParams or var not in additionalParams.keys():
271 raise LcmException("Variable '{}' defined at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
272 "file, must be provided in the instantiation parameters inside the "
273 "'additionalParamsForVnf' block".format(var, vnfd["id"], vdu["id"]))
274 template = Template(cloud_init_content)
tierno2b611dd2019-01-11 10:30:57 +0000275 cloud_init_content = template.render(additionalParams or {})
gcalvino35be9152018-12-20 09:33:12 +0100276 vdu["cloud-init"] = cloud_init_content
tierno8a518872018-12-21 13:42:14 +0000277
tierno59d22d22018-09-25 18:10:19 +0200278 return vnfd_RO
279 except FsException as e:
tierno8a518872018-12-21 13:42:14 +0000280 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
tiernoda964822019-01-14 15:53:47 +0000281 format(vnfd["id"], vdu["id"], cloud_init_file, e))
tierno8a518872018-12-21 13:42:14 +0000282 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
283 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
284 format(vnfd["id"], vdu["id"], e))
tierno59d22d22018-09-25 18:10:19 +0200285
tiernoe95ed362020-04-23 08:24:57 +0000286 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200287 """
tierno27246d82018-09-27 15:59:09 +0200288 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200289 :param ns_params: OSM instantiate params
tiernoe95ed362020-04-23 08:24:57 +0000290 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
291 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
tierno59d22d22018-09-25 18:10:19 +0200292 :return: The RO ns descriptor
293 """
294 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000295 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200296 # TODO feature 1417: Check that no instantiation is set over PDU
297 # check if PDU forces a concrete vim-network-id and add it
298 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200299
300 def vim_account_2_RO(vim_account):
301 if vim_account in vim_2_RO:
302 return vim_2_RO[vim_account]
303
304 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
305 if db_vim["_admin"]["operationalState"] != "ENABLED":
306 raise LcmException("VIM={} is not available. operationalState={}".format(
307 vim_account, db_vim["_admin"]["operationalState"]))
308 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
309 vim_2_RO[vim_account] = RO_vim_id
310 return RO_vim_id
311
tiernob7f3f0d2019-03-20 17:17:21 +0000312 def wim_account_2_RO(wim_account):
313 if isinstance(wim_account, str):
314 if wim_account in wim_2_RO:
315 return wim_2_RO[wim_account]
316
317 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
318 if db_wim["_admin"]["operationalState"] != "ENABLED":
319 raise LcmException("WIM={} is not available. operationalState={}".format(
320 wim_account, db_wim["_admin"]["operationalState"]))
321 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
322 wim_2_RO[wim_account] = RO_wim_id
323 return RO_wim_id
324 else:
325 return wim_account
326
tierno59d22d22018-09-25 18:10:19 +0200327 def ip_profile_2_RO(ip_profile):
328 RO_ip_profile = deepcopy((ip_profile))
329 if "dns-server" in RO_ip_profile:
330 if isinstance(RO_ip_profile["dns-server"], list):
331 RO_ip_profile["dns-address"] = []
332 for ds in RO_ip_profile.pop("dns-server"):
333 RO_ip_profile["dns-address"].append(ds['address'])
334 else:
335 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
336 if RO_ip_profile.get("ip-version") == "ipv4":
337 RO_ip_profile["ip-version"] = "IPv4"
338 if RO_ip_profile.get("ip-version") == "ipv6":
339 RO_ip_profile["ip-version"] = "IPv6"
340 if "dhcp-params" in RO_ip_profile:
341 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
342 return RO_ip_profile
343
344 if not ns_params:
345 return None
346 RO_ns_params = {
347 # "name": ns_params["nsName"],
348 # "description": ns_params.get("nsDescription"),
349 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000350 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200351 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200352 }
tiernoe95ed362020-04-23 08:24:57 +0000353 # set vim_account of each vnf if different from general vim_account.
354 # Get this information from <vnfr> database content, key vim-account-id
355 # Vim account can be set by placement_engine and it may be different from
356 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
357 for vnf_index, vnfr in db_vnfrs.items():
358 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
359 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
quilesj7e13aeb2019-10-08 13:34:55 +0200360
tiernoe64f7fb2019-09-11 08:55:52 +0000361 n2vc_key_list = n2vc_key_list or []
362 for vnfd_ref, vnfd in vnfd_dict.items():
363 vdu_needed_access = []
364 mgmt_cp = None
365 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000366 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000367 if ssh_required and vnfd.get("mgmt-interface"):
368 if vnfd["mgmt-interface"].get("vdu-id"):
369 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
370 elif vnfd["mgmt-interface"].get("cp"):
371 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200372
tiernoe64f7fb2019-09-11 08:55:52 +0000373 for vdu in vnfd.get("vdu", ()):
374 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000375 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000376 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200377 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000378 elif mgmt_cp:
379 for vdu_interface in vdu.get("interface"):
380 if vdu_interface.get("external-connection-point-ref") and \
381 vdu_interface["external-connection-point-ref"] == mgmt_cp:
382 vdu_needed_access.append(vdu["id"])
383 mgmt_cp = None
384 break
tierno27246d82018-09-27 15:59:09 +0200385
tiernoe64f7fb2019-09-11 08:55:52 +0000386 if vdu_needed_access:
387 for vnf_member in nsd.get("constituent-vnfd"):
388 if vnf_member["vnfd-id-ref"] != vnfd_ref:
389 continue
390 for vdu in vdu_needed_access:
391 populate_dict(RO_ns_params,
392 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
393 n2vc_key_list)
tierno27246d82018-09-27 15:59:09 +0200394
tierno25ec7732018-10-24 18:47:11 +0200395 if ns_params.get("vduImage"):
396 RO_ns_params["vduImage"] = ns_params["vduImage"]
397
tiernoc255a822018-10-31 09:41:53 +0100398 if ns_params.get("ssh_keys"):
399 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200400 for vnf_params in get_iterable(ns_params, "vnf"):
401 for constituent_vnfd in nsd["constituent-vnfd"]:
402 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
403 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
404 break
405 else:
406 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
407 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
tierno59d22d22018-09-25 18:10:19 +0200408
tierno27246d82018-09-27 15:59:09 +0200409 for vdu_params in get_iterable(vnf_params, "vdu"):
410 # TODO feature 1417: check that this VDU exist and it is not a PDU
411 if vdu_params.get("volume"):
412 for volume_params in vdu_params["volume"]:
413 if volume_params.get("vim-volume-id"):
414 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
415 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
416 volume_params["vim-volume-id"])
417 if vdu_params.get("interface"):
418 for interface_params in vdu_params["interface"]:
419 if interface_params.get("ip-address"):
420 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
421 vdu_params["id"], "interfaces", interface_params["name"],
422 "ip_address"),
423 interface_params["ip-address"])
424 if interface_params.get("mac-address"):
425 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
426 vdu_params["id"], "interfaces", interface_params["name"],
427 "mac_address"),
428 interface_params["mac-address"])
429 if interface_params.get("floating-ip-required"):
430 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
431 vdu_params["id"], "interfaces", interface_params["name"],
432 "floating-ip"),
433 interface_params["floating-ip-required"])
434
435 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
436 if internal_vld_params.get("vim-network-name"):
437 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
438 internal_vld_params["name"], "vim-network-name"),
439 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100440 if internal_vld_params.get("vim-network-id"):
441 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
442 internal_vld_params["name"], "vim-network-id"),
443 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200444 if internal_vld_params.get("ip-profile"):
445 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
446 internal_vld_params["name"], "ip-profile"),
447 ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000448 if internal_vld_params.get("provider-network"):
449
450 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
451 internal_vld_params["name"], "provider-network"),
452 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200453
454 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
455 # look for interface
456 iface_found = False
457 for vdu_descriptor in vnf_descriptor["vdu"]:
458 for vdu_interface in vdu_descriptor["interface"]:
459 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
460 if icp_params.get("ip-address"):
461 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
462 vdu_descriptor["id"], "interfaces",
463 vdu_interface["name"], "ip_address"),
464 icp_params["ip-address"])
465
466 if icp_params.get("mac-address"):
467 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
468 vdu_descriptor["id"], "interfaces",
469 vdu_interface["name"], "mac_address"),
470 icp_params["mac-address"])
471 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200472 break
tierno27246d82018-09-27 15:59:09 +0200473 if iface_found:
474 break
475 else:
476 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
477 "internal-vld:id-ref={} is not present at vnfd:internal-"
478 "connection-point".format(vnf_params["member-vnf-index"],
479 icp_params["id-ref"]))
480
481 for vld_params in get_iterable(ns_params, "vld"):
482 if "ip-profile" in vld_params:
483 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
484 ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000485
kbsub4d761eb2019-10-17 16:28:48 +0000486 if vld_params.get("provider-network"):
487
488 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
489 vld_params["provider-network"].copy())
490
tiernob7f3f0d2019-03-20 17:17:21 +0000491 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
492 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
493 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200494 if vld_params.get("vim-network-name"):
495 RO_vld_sites = []
496 if isinstance(vld_params["vim-network-name"], dict):
497 for vim_account, vim_net in vld_params["vim-network-name"].items():
498 RO_vld_sites.append({
499 "netmap-use": vim_net,
500 "datacenter": vim_account_2_RO(vim_account)
501 })
502 else: # isinstance str
503 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
504 if RO_vld_sites:
505 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000506
gcalvino0d7ac8d2018-12-17 16:24:08 +0100507 if vld_params.get("vim-network-id"):
508 RO_vld_sites = []
509 if isinstance(vld_params["vim-network-id"], dict):
510 for vim_account, vim_net in vld_params["vim-network-id"].items():
511 RO_vld_sites.append({
512 "netmap-use": vim_net,
513 "datacenter": vim_account_2_RO(vim_account)
514 })
515 else: # isinstance str
516 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
517 if RO_vld_sites:
518 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100519 if vld_params.get("ns-net"):
520 if isinstance(vld_params["ns-net"], dict):
521 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
522 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100523 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200524 if "vnfd-connection-point-ref" in vld_params:
525 for cp_params in vld_params["vnfd-connection-point-ref"]:
526 # look for interface
527 for constituent_vnfd in nsd["constituent-vnfd"]:
528 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
529 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
530 break
531 else:
532 raise LcmException(
533 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
534 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
535 match_cp = False
536 for vdu_descriptor in vnf_descriptor["vdu"]:
537 for interface_descriptor in vdu_descriptor["interface"]:
538 if interface_descriptor.get("external-connection-point-ref") == \
539 cp_params["vnfd-connection-point-ref"]:
540 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200541 break
tierno27246d82018-09-27 15:59:09 +0200542 if match_cp:
543 break
544 else:
545 raise LcmException(
546 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
547 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
548 cp_params["member-vnf-index-ref"],
549 cp_params["vnfd-connection-point-ref"],
550 vnf_descriptor["id"]))
551 if cp_params.get("ip-address"):
552 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
553 vdu_descriptor["id"], "interfaces",
554 interface_descriptor["name"], "ip_address"),
555 cp_params["ip-address"])
556 if cp_params.get("mac-address"):
557 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
558 vdu_descriptor["id"], "interfaces",
559 interface_descriptor["name"], "mac_address"),
560 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200561 return RO_ns_params
562
tierno27246d82018-09-27 15:59:09 +0200563 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None):
564 # make a copy to do not change
565 vdu_create = copy(vdu_create)
566 vdu_delete = copy(vdu_delete)
567
568 vdurs = db_vnfr.get("vdur")
569 if vdurs is None:
570 vdurs = []
571 vdu_index = len(vdurs)
572 while vdu_index:
573 vdu_index -= 1
574 vdur = vdurs[vdu_index]
575 if vdur.get("pdu-type"):
576 continue
577 vdu_id_ref = vdur["vdu-id-ref"]
578 if vdu_create and vdu_create.get(vdu_id_ref):
579 for index in range(0, vdu_create[vdu_id_ref]):
580 vdur = deepcopy(vdur)
581 vdur["_id"] = str(uuid4())
582 vdur["count-index"] += 1
583 vdurs.insert(vdu_index+1+index, vdur)
584 del vdu_create[vdu_id_ref]
585 if vdu_delete and vdu_delete.get(vdu_id_ref):
586 del vdurs[vdu_index]
587 vdu_delete[vdu_id_ref] -= 1
588 if not vdu_delete[vdu_id_ref]:
589 del vdu_delete[vdu_id_ref]
590 # check all operations are done
591 if vdu_create or vdu_delete:
592 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
593 vdu_create))
594 if vdu_delete:
595 raise LcmException("Error scaling IN VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
596 vdu_delete))
597
598 vnfr_update = {"vdur": vdurs}
599 db_vnfr["vdur"] = vdurs
600 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
601
tiernof578e552018-11-08 19:07:20 +0100602 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
603 """
604 Updates database nsr with the RO info for the created vld
605 :param ns_update_nsr: dictionary to be filled with the updated info
606 :param db_nsr: content of db_nsr. This is also modified
607 :param nsr_desc_RO: nsr descriptor from RO
608 :return: Nothing, LcmException is raised on errors
609 """
610
611 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
612 for net_RO in get_iterable(nsr_desc_RO, "nets"):
613 if vld["id"] != net_RO.get("ns_net_osm_id"):
614 continue
615 vld["vim-id"] = net_RO.get("vim_net_id")
616 vld["name"] = net_RO.get("vim_name")
617 vld["status"] = net_RO.get("status")
618 vld["status-detailed"] = net_RO.get("error_msg")
619 ns_update_nsr["vld.{}".format(vld_index)] = vld
620 break
621 else:
622 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
623
tiernoe876f672020-02-13 14:34:48 +0000624 def set_vnfr_at_error(self, db_vnfrs, error_text):
625 try:
626 for db_vnfr in db_vnfrs.values():
627 vnfr_update = {"status": "ERROR"}
628 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
629 if "status" not in vdur:
630 vdur["status"] = "ERROR"
631 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
632 if error_text:
633 vdur["status-detailed"] = str(error_text)
634 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
635 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
636 except DbException as e:
637 self.logger.error("Cannot update vnf. {}".format(e))
638
tierno59d22d22018-09-25 18:10:19 +0200639 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
640 """
641 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200642 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
643 :param nsr_desc_RO: nsr descriptor from RO
644 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200645 """
646 for vnf_index, db_vnfr in db_vnfrs.items():
647 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200648 if vnf_RO["member_vnf_index"] != vnf_index:
649 continue
650 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100651 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000652 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100653 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000654 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
655 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200656
tierno27246d82018-09-27 15:59:09 +0200657 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
658 vdur_RO_count_index = 0
659 if vdur.get("pdu-type"):
660 continue
661 for vdur_RO in get_iterable(vnf_RO, "vms"):
662 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
663 continue
664 if vdur["count-index"] != vdur_RO_count_index:
665 vdur_RO_count_index += 1
666 continue
667 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000668 if vdur_RO.get("ip_address"):
669 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000670 else:
671 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200672 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
673 vdur["name"] = vdur_RO.get("vim_name")
674 vdur["status"] = vdur_RO.get("status")
675 vdur["status-detailed"] = vdur_RO.get("error_msg")
676 for ifacer in get_iterable(vdur, "interfaces"):
677 for interface_RO in get_iterable(vdur_RO, "interfaces"):
678 if ifacer["name"] == interface_RO.get("internal_name"):
679 ifacer["ip-address"] = interface_RO.get("ip_address")
680 ifacer["mac-address"] = interface_RO.get("mac_address")
681 break
682 else:
683 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200684 "from VIM info"
685 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200686 vnfr_update["vdur.{}".format(vdu_index)] = vdur
687 break
688 else:
tierno15b1cf12019-08-29 13:21:40 +0000689 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
690 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100691
692 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
693 for net_RO in get_iterable(nsr_desc_RO, "nets"):
694 if vld["id"] != net_RO.get("vnf_net_osm_id"):
695 continue
696 vld["vim-id"] = net_RO.get("vim_net_id")
697 vld["name"] = net_RO.get("vim_name")
698 vld["status"] = net_RO.get("status")
699 vld["status-detailed"] = net_RO.get("error_msg")
700 vnfr_update["vld.{}".format(vld_index)] = vld
701 break
702 else:
tierno15b1cf12019-08-29 13:21:40 +0000703 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100704 vnf_index, vld["id"]))
705
tierno27246d82018-09-27 15:59:09 +0200706 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
707 break
tierno59d22d22018-09-25 18:10:19 +0200708
709 else:
tierno15b1cf12019-08-29 13:21:40 +0000710 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200711
tierno5ee02052019-12-05 19:55:02 +0000712 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000713 """
714 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000715 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000716 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
717 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
718 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
719 """
tierno5ee02052019-12-05 19:55:02 +0000720 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
721 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000722 mapping = {}
723 ns_config_info = {"osm-config-mapping": mapping}
724 for vca in vca_deployed_list:
725 if not vca["member-vnf-index"]:
726 continue
727 if not vca["vdu_id"]:
728 mapping[vca["member-vnf-index"]] = vca["application"]
729 else:
730 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
731 vca["application"]
732 return ns_config_info
733
734 @staticmethod
735 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed):
736 """
737 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
738 primitives as verify-ssh-credentials, or config when needed
739 :param desc_primitive_list: information of the descriptor
740 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
741 this element contains a ssh public key
742 :return: The modified list. Can ba an empty list, but always a list
743 """
744 if desc_primitive_list:
745 primitive_list = desc_primitive_list.copy()
746 else:
747 primitive_list = []
748 # look for primitive config, and get the position. None if not present
749 config_position = None
750 for index, primitive in enumerate(primitive_list):
751 if primitive["name"] == "config":
752 config_position = index
753 break
754
755 # for NS, add always a config primitive if not present (bug 874)
756 if not vca_deployed["member-vnf-index"] and config_position is None:
757 primitive_list.insert(0, {"name": "config", "parameter": []})
758 config_position = 0
759 # for VNF/VDU add verify-ssh-credentials after config
760 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
761 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
762 return primitive_list
763
tierno69f0d382020-05-07 13:08:09 +0000764 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
765 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
766 nslcmop_id = db_nslcmop["_id"]
767 target = {
768 "name": db_nsr["name"],
769 "ns": {"vld": []},
770 "vnf": [],
771 "image": deepcopy(db_nsr["image"]),
772 "flavor": deepcopy(db_nsr["flavor"]),
773 "action_id": nslcmop_id,
774 }
775 for image in target["image"]:
776 image["vim_info"] = []
777 for flavor in target["flavor"]:
778 flavor["vim_info"] = []
779
780 ns_params = db_nslcmop.get("operationParams")
781 ssh_keys = []
782 if ns_params.get("ssh_keys"):
783 ssh_keys += ns_params.get("ssh_keys")
784 if n2vc_key_list:
785 ssh_keys += n2vc_key_list
786
787 cp2target = {}
788 for vld_index, vld in enumerate(nsd.get("vld")):
789 target_vld = {"id": vld["id"],
790 "name": vld["name"],
791 "mgmt-network": vld.get("mgmt-network", False),
792 "type": vld.get("type"),
793 "vim_info": [{"vim-network-name": vld.get("vim-network-name"),
794 "vim_account_id": ns_params["vimAccountId"]}],
795 }
796 for cp in vld["vnfd-connection-point-ref"]:
797 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
798 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
799 target["ns"]["vld"].append(target_vld)
800 for vnfr in db_vnfrs.values():
801 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
802 target_vnf = deepcopy(vnfr)
803 for vld in target_vnf.get("vld", ()):
804 # check if connected to a ns.vld
805 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
806 cp.get("internal-vld-ref") == vld["id"]), None)
807 if vnf_cp:
808 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
809 if cp2target.get(ns_cp):
810 vld["target"] = cp2target[ns_cp]
811 vld["vim_info"] = [{"vim-network-name": vld.get("vim-network-name"),
812 "vim_account_id": vnfr["vim-account-id"]}]
813
814 for vdur in target_vnf.get("vdur", ()):
815 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
816 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
817 # vdur["additionalParams"] = vnfr.get("additionalParamsForVnf") # TODO additional params for VDU
818
819 if ssh_keys:
820 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
821 vdur["ssh-keys"] = ssh_keys
822 vdur["ssh-access-required"] = True
823 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
824 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
825 vdur["ssh-keys"] = ssh_keys
826 vdur["ssh-access-required"] = True
827
828 # cloud-init
829 if vdud.get("cloud-init-file"):
830 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
831 elif vdud.get("cloud-init"):
832 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
833
834 # flavor
835 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
836 if not next((vi for vi in ns_flavor["vim_info"] if
837 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
838 ns_flavor["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
839 # image
840 ns_image = target["image"][int(vdur["ns-image-id"])]
841 if not next((vi for vi in ns_image["vim_info"] if
842 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
843 ns_image["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
844
845 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
846 target["vnf"].append(target_vnf)
847
848 desc = await self.RO.deploy(nsr_id, target)
849 action_id = desc["action_id"]
850 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
851
852 # Updating NSR
853 db_nsr_update = {
854 "_admin.deployed.RO.operational-status": "running",
855 "detailed-status": " ".join(stage)
856 }
857 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
858 self.update_db_2("nsrs", nsr_id, db_nsr_update)
859 self._write_op_status(nslcmop_id, stage)
860 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
861 return
862
863 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_time, timeout, stage):
864 detailed_status_old = None
865 db_nsr_update = {}
866 while time() <= start_time + timeout:
867 desc_status = await self.RO.status(nsr_id, action_id)
868 if desc_status["status"] == "FAILED":
869 raise NgRoException(desc_status["details"])
870 elif desc_status["status"] == "BUILD":
871 stage[2] = "VIM: ({})".format(desc_status["details"])
872 elif desc_status["status"] == "DONE":
873 stage[2] = "Deployed at VIM"
874 break
875 else:
876 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
877 if stage[2] != detailed_status_old:
878 detailed_status_old = stage[2]
879 db_nsr_update["detailed-status"] = " ".join(stage)
880 self.update_db_2("nsrs", nsr_id, db_nsr_update)
881 self._write_op_status(nslcmop_id, stage)
882 await asyncio.sleep(5, loop=self.loop)
883 else: # timeout_ns_deploy
884 raise NgRoException("Timeout waiting ns to deploy")
885
886 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
887 db_nsr_update = {}
888 failed_detail = []
889 action_id = None
890 start_deploy = time()
891 try:
892 target = {
893 "ns": {"vld": []},
894 "vnf": [],
895 "image": [],
896 "flavor": [],
897 }
898 desc = await self.RO.deploy(nsr_id, target)
899 action_id = desc["action_id"]
900 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
901 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
902 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
903
904 # wait until done
905 delete_timeout = 20 * 60 # 20 minutes
906 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
907
908 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
909 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
910 # delete all nsr
911 await self.RO.delete(nsr_id)
912 except Exception as e:
913 if isinstance(e, NgRoException) and e.http_code == 404: # not found
914 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
915 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
916 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
917 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
918 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
919 failed_detail.append("delete conflict: {}".format(e))
920 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
921 else:
922 failed_detail.append("delete error: {}".format(e))
923 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
924
925 if failed_detail:
926 stage[2] = "Error deleting from VIM"
927 else:
928 stage[2] = "Deleted from VIM"
929 db_nsr_update["detailed-status"] = " ".join(stage)
930 self.update_db_2("nsrs", nsr_id, db_nsr_update)
931 self._write_op_status(nslcmop_id, stage)
932
933 if failed_detail:
934 raise LcmException("; ".join(failed_detail))
935 return
936
tiernoe876f672020-02-13 14:34:48 +0000937 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
938 n2vc_key_list, stage):
tiernoe95ed362020-04-23 08:24:57 +0000939 """
940 Instantiate at RO
941 :param logging_text: preffix text to use at logging
942 :param nsr_id: nsr identity
943 :param nsd: database content of ns descriptor
944 :param db_nsr: database content of ns record
945 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
946 :param db_vnfrs:
947 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
948 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
949 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
950 :return: None or exception
951 """
tiernoe876f672020-02-13 14:34:48 +0000952 try:
953 db_nsr_update = {}
954 RO_descriptor_number = 0 # number of descriptors created at RO
955 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
956 nslcmop_id = db_nslcmop["_id"]
957 start_deploy = time()
958 ns_params = db_nslcmop.get("operationParams")
959 if ns_params and ns_params.get("timeout_ns_deploy"):
960 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
961 else:
962 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +0200963
tiernoe876f672020-02-13 14:34:48 +0000964 # Check for and optionally request placement optimization. Database will be updated if placement activated
965 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +0000966 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
967 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
968 for vnfr in db_vnfrs.values():
969 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
970 break
971 else:
972 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +0200973
tierno69f0d382020-05-07 13:08:09 +0000974 if self.ng_ro:
975 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
976 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
977 timeout_ns_deploy)
tiernoe876f672020-02-13 14:34:48 +0000978 # deploy RO
tiernoe876f672020-02-13 14:34:48 +0000979 # get vnfds, instantiate at RO
980 for c_vnf in nsd.get("constituent-vnfd", ()):
981 member_vnf_index = c_vnf["member-vnf-index"]
982 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
983 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +0200984
tiernoe876f672020-02-13 14:34:48 +0000985 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
986 db_nsr_update["detailed-status"] = " ".join(stage)
987 self.update_db_2("nsrs", nsr_id, db_nsr_update)
988 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100989
tiernoe876f672020-02-13 14:34:48 +0000990 # self.logger.debug(logging_text + stage[2])
991 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
992 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
993 RO_descriptor_number += 1
994
995 # look position at deployed.RO.vnfd if not present it will be appended at the end
996 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
997 if vnf_deployed["member-vnf-index"] == member_vnf_index:
998 break
999 else:
1000 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1001 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1002
1003 # look if present
1004 RO_update = {"member-vnf-index": member_vnf_index}
1005 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1006 if vnfd_list:
1007 RO_update["id"] = vnfd_list[0]["uuid"]
1008 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1009 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1010 else:
1011 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1012 get("additionalParamsForVnf"), nsr_id)
1013 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1014 RO_update["id"] = desc["uuid"]
1015 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1016 vnfd_ref, member_vnf_index, desc["uuid"]))
1017 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1018 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1019
1020 # create nsd at RO
1021 nsd_ref = nsd["id"]
1022
1023 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1024 db_nsr_update["detailed-status"] = " ".join(stage)
1025 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1026 self._write_op_status(nslcmop_id, stage)
1027
1028 # self.logger.debug(logging_text + stage[2])
1029 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +00001030 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +00001031 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1032 if nsd_list:
1033 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1034 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1035 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001036 else:
tiernoe876f672020-02-13 14:34:48 +00001037 nsd_RO = deepcopy(nsd)
1038 nsd_RO["id"] = RO_osm_nsd_id
1039 nsd_RO.pop("_id", None)
1040 nsd_RO.pop("_admin", None)
1041 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1042 member_vnf_index = c_vnf["member-vnf-index"]
1043 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1044 for c_vld in nsd_RO.get("vld", ()):
1045 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1046 member_vnf_index = cp["member-vnf-index-ref"]
1047 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +00001048
tiernoe876f672020-02-13 14:34:48 +00001049 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1050 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1051 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1052 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001053 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1054
tiernoe876f672020-02-13 14:34:48 +00001055 # Crate ns at RO
1056 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1057 db_nsr_update["detailed-status"] = " ".join(stage)
1058 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1059 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +00001060
tiernoe876f672020-02-13 14:34:48 +00001061 # if present use it unless in error status
1062 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1063 if RO_nsr_id:
1064 try:
1065 stage[2] = "Looking for existing ns at RO"
1066 db_nsr_update["detailed-status"] = " ".join(stage)
1067 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1068 self._write_op_status(nslcmop_id, stage)
1069 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1070 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +00001071
tiernoe876f672020-02-13 14:34:48 +00001072 except ROclient.ROClientException as e:
1073 if e.http_code != HTTPStatus.NOT_FOUND:
1074 raise
1075 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1076 if RO_nsr_id:
1077 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1078 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1079 if ns_status == "ERROR":
1080 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1081 self.logger.debug(logging_text + stage[2])
1082 await self.RO.delete("ns", RO_nsr_id)
1083 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1084 if not RO_nsr_id:
1085 stage[2] = "Checking dependencies"
1086 db_nsr_update["detailed-status"] = " ".join(stage)
1087 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1088 self._write_op_status(nslcmop_id, stage)
1089 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +00001090
tiernoe876f672020-02-13 14:34:48 +00001091 # check if VIM is creating and wait look if previous tasks in process
1092 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1093 if task_dependency:
1094 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1095 self.logger.debug(logging_text + stage[2])
1096 await asyncio.wait(task_dependency, timeout=3600)
1097 if ns_params.get("vnf"):
1098 for vnf in ns_params["vnf"]:
1099 if "vimAccountId" in vnf:
1100 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1101 vnf["vimAccountId"])
1102 if task_dependency:
1103 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1104 self.logger.debug(logging_text + stage[2])
1105 await asyncio.wait(task_dependency, timeout=3600)
1106
1107 stage[2] = "Checking instantiation parameters."
tiernoe95ed362020-04-23 08:24:57 +00001108 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
tiernoe876f672020-02-13 14:34:48 +00001109 stage[2] = "Deploying ns at VIM."
1110 db_nsr_update["detailed-status"] = " ".join(stage)
1111 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1112 self._write_op_status(nslcmop_id, stage)
1113
1114 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1115 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1116 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1117 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1118 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1119
1120 # wait until NS is ready
1121 stage[2] = "Waiting VIM to deploy ns."
1122 db_nsr_update["detailed-status"] = " ".join(stage)
1123 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1124 self._write_op_status(nslcmop_id, stage)
1125 detailed_status_old = None
1126 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1127
1128 old_desc = None
1129 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +00001130 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001131
tiernoe876f672020-02-13 14:34:48 +00001132 # deploymentStatus
1133 if desc != old_desc:
1134 # desc has changed => update db
1135 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1136 old_desc = desc
tiernod8323042019-08-09 11:32:23 +00001137
tiernoe876f672020-02-13 14:34:48 +00001138 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1139 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1140 if ns_status == "ERROR":
1141 raise ROclient.ROClientException(ns_status_info)
1142 elif ns_status == "BUILD":
1143 stage[2] = "VIM: ({})".format(ns_status_info)
1144 elif ns_status == "ACTIVE":
1145 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1146 try:
1147 self.ns_update_vnfr(db_vnfrs, desc)
1148 break
1149 except LcmExceptionNoMgmtIP:
1150 pass
1151 else:
1152 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1153 if stage[2] != detailed_status_old:
1154 detailed_status_old = stage[2]
1155 db_nsr_update["detailed-status"] = " ".join(stage)
1156 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1157 self._write_op_status(nslcmop_id, stage)
1158 await asyncio.sleep(5, loop=self.loop)
1159 else: # timeout_ns_deploy
1160 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +00001161
tiernoe876f672020-02-13 14:34:48 +00001162 # Updating NSR
1163 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +00001164
tiernoe876f672020-02-13 14:34:48 +00001165 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1166 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1167 stage[2] = "Deployed at VIM"
1168 db_nsr_update["detailed-status"] = " ".join(stage)
1169 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1170 self._write_op_status(nslcmop_id, stage)
1171 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1172 # self.logger.debug(logging_text + "Deployed at VIM")
tierno69f0d382020-05-07 13:08:09 +00001173 except (ROclient.ROClientException, LcmException, DbException, NgRoException) as e:
tierno067e04a2020-03-31 12:53:13 +00001174 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001175 self.set_vnfr_at_error(db_vnfrs, str(e))
1176 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001177
tiernoa5088192019-11-26 16:12:53 +00001178 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1179 """
1180 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1181 :param logging_text: prefix use for logging
1182 :param nsr_id:
1183 :param vnfr_id:
1184 :param vdu_id:
1185 :param vdu_index:
1186 :param pub_key: public ssh key to inject, None to skip
1187 :param user: user to apply the public ssh key
1188 :return: IP address
1189 """
quilesj7e13aeb2019-10-08 13:34:55 +02001190
tiernoa5088192019-11-26 16:12:53 +00001191 # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001192 ro_nsr_id = None
1193 ip_address = None
1194 nb_tries = 0
1195 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001196 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001197
tiernod8323042019-08-09 11:32:23 +00001198 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001199
quilesj3149f262019-12-03 10:58:10 +00001200 ro_retries += 1
1201 if ro_retries >= 360: # 1 hour
1202 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1203
tiernod8323042019-08-09 11:32:23 +00001204 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001205
1206 # get ip address
tiernod8323042019-08-09 11:32:23 +00001207 if not target_vdu_id:
1208 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001209
1210 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001211 if db_vnfr.get("status") == "ERROR":
1212 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001213 ip_address = db_vnfr.get("ip-address")
1214 if not ip_address:
1215 continue
quilesj3149f262019-12-03 10:58:10 +00001216 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1217 else: # VDU case
1218 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1219 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1220
tierno0e8c3f02020-03-12 17:18:21 +00001221 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1222 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001223 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001224 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1225 vdu_index))
quilesj7e13aeb2019-10-08 13:34:55 +02001226
tierno0e8c3f02020-03-12 17:18:21 +00001227 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001228 ip_address = vdur.get("ip-address")
1229 if not ip_address:
1230 continue
1231 target_vdu_id = vdur["vdu-id-ref"]
1232 elif vdur.get("status") == "ERROR":
1233 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1234
tiernod8323042019-08-09 11:32:23 +00001235 if not target_vdu_id:
1236 continue
tiernod8323042019-08-09 11:32:23 +00001237
quilesj7e13aeb2019-10-08 13:34:55 +02001238 # inject public key into machine
1239 if pub_key and user:
tiernoe876f672020-02-13 14:34:48 +00001240 # wait until NS is deployed at RO
1241 if not ro_nsr_id:
1242 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1243 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1244 if not ro_nsr_id:
1245 continue
1246
tiernoa5088192019-11-26 16:12:53 +00001247 # self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001248 if vdur.get("pdu-type"):
1249 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1250 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001251 try:
1252 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001253 if self.ng_ro:
1254 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
1255 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdu_id}]}],
1256 }
1257 await self.RO.deploy(nsr_id, target)
1258 else:
1259 result_dict = await self.RO.create_action(
1260 item="ns",
1261 item_id_name=ro_nsr_id,
1262 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1263 )
1264 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1265 if not result_dict or not isinstance(result_dict, dict):
1266 raise LcmException("Unknown response from RO when injecting key")
1267 for result in result_dict.values():
1268 if result.get("vim_result") == 200:
1269 break
1270 else:
1271 raise ROclient.ROClientException("error injecting key: {}".format(
1272 result.get("description")))
1273 break
1274 except NgRoException as e:
1275 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001276 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001277 if not nb_tries:
1278 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1279 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001280 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001281 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001282 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001283 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001284 break
1285
1286 return ip_address
1287
tierno5ee02052019-12-05 19:55:02 +00001288 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1289 """
1290 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1291 """
1292 my_vca = vca_deployed_list[vca_index]
1293 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001294 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001295 return
1296 timeout = 300
1297 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001298 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1299 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1300 configuration_status_list = db_nsr["configurationStatus"]
1301 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001302 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001303 # myself
tierno5ee02052019-12-05 19:55:02 +00001304 continue
1305 if not my_vca.get("member-vnf-index") or \
1306 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001307 internal_status = configuration_status_list[index].get("status")
1308 if internal_status == 'READY':
1309 continue
1310 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001311 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001312 else:
1313 break
tierno5ee02052019-12-05 19:55:02 +00001314 else:
quilesj3655ae02019-12-12 16:08:35 +00001315 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001316 return
1317 await asyncio.sleep(10)
1318 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001319
1320 raise LcmException("Configuration aborted because dependent charm/s timeout")
1321
tiernoe876f672020-02-13 14:34:48 +00001322 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
1323 config_descriptor, deploy_params, base_folder, nslcmop_id, stage):
tiernod8323042019-08-09 11:32:23 +00001324 nsr_id = db_nsr["_id"]
1325 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001326 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001327 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
quilesj7e13aeb2019-10-08 13:34:55 +02001328 db_dict = {
1329 'collection': 'nsrs',
1330 'filter': {'_id': nsr_id},
1331 'path': db_update_entry
1332 }
tiernod8323042019-08-09 11:32:23 +00001333 step = ""
1334 try:
quilesj3655ae02019-12-12 16:08:35 +00001335
1336 element_type = 'NS'
1337 element_under_configuration = nsr_id
1338
tiernod8323042019-08-09 11:32:23 +00001339 vnfr_id = None
1340 if db_vnfr:
1341 vnfr_id = db_vnfr["_id"]
1342
1343 namespace = "{nsi}.{ns}".format(
1344 nsi=nsi_id if nsi_id else "",
1345 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001346
tiernod8323042019-08-09 11:32:23 +00001347 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001348 element_type = 'VNF'
1349 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001350 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001351 if vdu_id:
1352 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001353 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001354 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tierno51183952020-04-03 15:48:18 +00001355 elif kdu_name:
1356 namespace += ".{}".format(kdu_name)
1357 element_type = 'KDU'
1358 element_under_configuration = kdu_name
tiernod8323042019-08-09 11:32:23 +00001359
1360 # Get artifact path
David Garcia485b2912019-12-04 14:01:50 +01001361 artifact_path = "{}/{}/charms/{}".format(
tiernod8323042019-08-09 11:32:23 +00001362 base_folder["folder"],
1363 base_folder["pkg-dir"],
1364 config_descriptor["juju"]["charm"]
1365 )
1366
quilesj7e13aeb2019-10-08 13:34:55 +02001367 is_proxy_charm = deep_get(config_descriptor, ('juju', 'charm')) is not None
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001368 is_k8s_proxy_charm = False
1369
quilesj7e13aeb2019-10-08 13:34:55 +02001370 if deep_get(config_descriptor, ('juju', 'proxy')) is False:
tiernod8323042019-08-09 11:32:23 +00001371 is_proxy_charm = False
1372
Dominik Fleischmann047cf3d2020-07-01 16:53:02 +02001373 if deep_get(config_descriptor, ('juju', 'cloud')) == 'k8s' and is_proxy_charm:
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001374 is_k8s_proxy_charm = True
quilesj7e13aeb2019-10-08 13:34:55 +02001375
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001376 if not is_k8s_proxy_charm:
1377 # n2vc_redesign STEP 3.1
tiernod8323042019-08-09 11:32:23 +00001378
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001379 # find old ee_id if exists
1380 ee_id = vca_deployed.get("ee_id")
quilesj3655ae02019-12-12 16:08:35 +00001381
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001382 # create or register execution environment in VCA
1383 if is_proxy_charm:
quilesj3655ae02019-12-12 16:08:35 +00001384
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001385 self._write_configuration_status(
1386 nsr_id=nsr_id,
1387 vca_index=vca_index,
1388 status='CREATING',
1389 element_under_configuration=element_under_configuration,
1390 element_type=element_type
1391 )
quilesj3655ae02019-12-12 16:08:35 +00001392
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001393 step = "create execution environment"
1394 self.logger.debug(logging_text + step)
1395 ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace,
1396 reuse_ee_id=ee_id,
1397 db_dict=db_dict)
tierno3bedc9b2019-11-27 15:46:57 +00001398
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001399 else:
1400 step = "Waiting to VM being up and getting IP address"
1401 self.logger.debug(logging_text + step)
1402 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1403 user=None, pub_key=None)
1404 credentials = {"hostname": rw_mgmt_ip}
1405 # get username
1406 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1407 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1408 # merged. Meanwhile let's get username from initial-config-primitive
1409 if not username and config_descriptor.get("initial-config-primitive"):
1410 for config_primitive in config_descriptor["initial-config-primitive"]:
1411 for param in config_primitive.get("parameter", ()):
1412 if param["name"] == "ssh-username":
1413 username = param["value"]
1414 break
1415 if not username:
1416 raise LcmException("Cannot determine the username neither with"
1417 "'initial-config-promitive' nor with "
1418 "'config-access.ssh-access.default-user'")
1419 credentials["username"] = username
1420 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001421
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001422 self._write_configuration_status(
1423 nsr_id=nsr_id,
1424 vca_index=vca_index,
1425 status='REGISTERING',
1426 element_under_configuration=element_under_configuration,
1427 element_type=element_type
1428 )
quilesj7e13aeb2019-10-08 13:34:55 +02001429
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001430 step = "register execution environment {}".format(credentials)
1431 self.logger.debug(logging_text + step)
1432 ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace,
1433 db_dict=db_dict)
tiernod8323042019-08-09 11:32:23 +00001434
1435 # n2vc_redesign STEP 3.3
tierno3bedc9b2019-11-27 15:46:57 +00001436
tiernod8323042019-08-09 11:32:23 +00001437 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001438
tiernoc231a872020-01-21 08:49:05 +00001439 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001440 nsr_id=nsr_id,
1441 vca_index=vca_index,
1442 status='INSTALLING SW',
1443 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001444 element_type=element_type,
quilesj3655ae02019-12-12 16:08:35 +00001445 )
1446
tierno3bedc9b2019-11-27 15:46:57 +00001447 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001448 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001449 config = None
1450 if not is_proxy_charm:
1451 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1452 if initial_config_primitive_list:
1453 for primitive in initial_config_primitive_list:
1454 if primitive["name"] == "config":
1455 config = self._map_primitive_params(
1456 primitive,
1457 {},
1458 deploy_params
1459 )
1460 break
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001461 if is_k8s_proxy_charm:
1462 charm_name = deep_get(config_descriptor, ('juju', 'charm'))
1463 self.logger.debug("Installing K8s Proxy Charm: {}".format(charm_name))
David Garcia06a11f22020-03-25 18:21:37 +01001464
Dominik Fleischmanneccc2102020-06-09 11:55:08 +02001465 ee_id = await self.n2vc.install_k8s_proxy_charm(
1466 charm_name=charm_name,
1467 namespace=namespace,
1468 artifact_path=artifact_path,
1469 db_dict=db_dict
1470 )
1471 else:
1472 num_units = 1
1473 if is_proxy_charm:
1474 if element_type == "NS":
1475 num_units = db_nsr.get("config-units") or 1
1476 elif element_type == "VNF":
1477 num_units = db_vnfr.get("config-units") or 1
1478 elif element_type == "VDU":
1479 for v in db_vnfr["vdur"]:
1480 if vdu_id == v["vdu-id-ref"]:
1481 num_units = v.get("config-units") or 1
1482 break
1483
1484 await self.n2vc.install_configuration_sw(
1485 ee_id=ee_id,
1486 artifact_path=artifact_path,
1487 db_dict=db_dict,
1488 config=config,
1489 num_units=num_units
1490 )
quilesj7e13aeb2019-10-08 13:34:55 +02001491
quilesj63f90042020-01-17 09:53:55 +00001492 # write in db flag of configuration_sw already installed
1493 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1494
1495 # add relations for this VCA (wait for other peers related with this VCA)
1496 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id, vca_index=vca_index)
1497
quilesj7e13aeb2019-10-08 13:34:55 +02001498 # if SSH access is required, then get execution environment SSH public
tierno3bedc9b2019-11-27 15:46:57 +00001499 if is_proxy_charm: # if native charm we have waited already to VM be UP
1500 pub_key = None
1501 user = None
1502 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
1503 # Needed to inject a ssh key
1504 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1505 step = "Install configuration Software, getting public ssh key"
1506 pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001507
tiernoacc90452019-12-10 11:06:54 +00001508 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001509 else:
1510 step = "Waiting to VM being up and getting IP address"
1511 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001512
tierno3bedc9b2019-11-27 15:46:57 +00001513 # n2vc_redesign STEP 5.1
1514 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001515 if vnfr_id:
1516 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1517 user=user, pub_key=pub_key)
1518 else:
1519 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001520
1521 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001522
tiernoa5088192019-11-26 16:12:53 +00001523 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001524 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001525
1526 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001527 step = 'execute initial config primitive'
tiernoa5088192019-11-26 16:12:53 +00001528 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
quilesj7e13aeb2019-10-08 13:34:55 +02001529
1530 # sort initial config primitives by 'seq'
quilesj63f90042020-01-17 09:53:55 +00001531 if initial_config_primitive_list:
1532 try:
1533 initial_config_primitive_list.sort(key=lambda val: int(val['seq']))
1534 except Exception as e:
1535 self.logger.error(logging_text + step + ": " + str(e))
1536 else:
1537 self.logger.debug(logging_text + step + ": No initial-config-primitive")
quilesj7e13aeb2019-10-08 13:34:55 +02001538
tiernoda6fb102019-11-23 00:36:52 +00001539 # add config if not present for NS charm
1540 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1541 vca_deployed)
quilesj3655ae02019-12-12 16:08:35 +00001542
1543 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001544 if initial_config_primitive_list:
1545 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001546
1547 # stage, in function of element type: vdu, kdu, vnf or ns
1548 my_vca = vca_deployed_list[vca_index]
1549 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1550 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001551 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001552 elif my_vca.get("member-vnf-index"):
1553 # VNF
tiernoe876f672020-02-13 14:34:48 +00001554 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001555 else:
1556 # NS
tiernoe876f672020-02-13 14:34:48 +00001557 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001558
tiernoc231a872020-01-21 08:49:05 +00001559 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001560 nsr_id=nsr_id,
1561 vca_index=vca_index,
1562 status='EXECUTING PRIMITIVE'
1563 )
1564
1565 self._write_op_status(
1566 op_id=nslcmop_id,
1567 stage=stage
1568 )
1569
tiernoe876f672020-02-13 14:34:48 +00001570 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001571 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001572 # adding information on the vca_deployed if it is a NS execution environment
1573 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001574 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001575 # TODO check if already done
1576 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001577
tiernod8323042019-08-09 11:32:23 +00001578 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1579 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001580 await self.n2vc.exec_primitive(
1581 ee_id=ee_id,
1582 primitive_name=initial_config_primitive["name"],
1583 params_dict=primitive_params_,
1584 db_dict=db_dict
1585 )
tiernoe876f672020-02-13 14:34:48 +00001586 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1587 if check_if_terminated_needed:
1588 if config_descriptor.get('terminate-config-primitive'):
1589 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1590 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001591
tiernod8323042019-08-09 11:32:23 +00001592 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001593
1594 step = "instantiated at VCA"
1595 self.logger.debug(logging_text + step)
1596
tiernoc231a872020-01-21 08:49:05 +00001597 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001598 nsr_id=nsr_id,
1599 vca_index=vca_index,
1600 status='READY'
1601 )
1602
tiernod8323042019-08-09 11:32:23 +00001603 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001604 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001605 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1606 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001607 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001608 nsr_id=nsr_id,
1609 vca_index=vca_index,
1610 status='BROKEN'
1611 )
tiernoe876f672020-02-13 14:34:48 +00001612 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001613
quilesj4cda56b2019-12-05 10:02:20 +00001614 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001615 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001616 """
1617 Update db_nsr fields.
1618 :param nsr_id:
1619 :param ns_state:
1620 :param current_operation:
1621 :param current_operation_id:
1622 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001623 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001624 :param other_update: Other required changes at database if provided, will be cleared
1625 :return:
1626 """
quilesj4cda56b2019-12-05 10:02:20 +00001627 try:
tiernoe876f672020-02-13 14:34:48 +00001628 db_dict = other_update or {}
1629 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1630 db_dict["_admin.current-operation"] = current_operation_id
1631 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001632 db_dict["currentOperation"] = current_operation
1633 db_dict["currentOperationID"] = current_operation_id
1634 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001635 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001636
1637 if ns_state:
1638 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001639 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001640 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001641 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1642
tiernoe876f672020-02-13 14:34:48 +00001643 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1644 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001645 try:
tiernoe876f672020-02-13 14:34:48 +00001646 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001647 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001648 if isinstance(stage, list):
1649 db_dict['stage'] = stage[0]
1650 db_dict['detailed-status'] = " ".join(stage)
1651 elif stage is not None:
1652 db_dict['stage'] = str(stage)
1653
1654 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001655 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001656 if operation_state is not None:
1657 db_dict['operationState'] = operation_state
1658 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001659 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001660 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001661 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1662
tierno51183952020-04-03 15:48:18 +00001663 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001664 try:
tierno51183952020-04-03 15:48:18 +00001665 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001666 # configurationStatus
1667 config_status = db_nsr.get('configurationStatus')
1668 if config_status:
tierno51183952020-04-03 15:48:18 +00001669 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1670 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001671 # update status
tierno51183952020-04-03 15:48:18 +00001672 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001673
tiernoe876f672020-02-13 14:34:48 +00001674 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001675 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1676
quilesj63f90042020-01-17 09:53:55 +00001677 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001678 element_under_configuration: str = None, element_type: str = None,
1679 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001680
1681 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1682 # .format(vca_index, status))
1683
1684 try:
1685 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001686 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00001687 if status:
1688 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00001689 if element_under_configuration:
1690 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1691 if element_type:
1692 db_dict[db_path + 'elementType'] = element_type
1693 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001694 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001695 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1696 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00001697
tierno38089af2020-04-16 07:56:58 +00001698 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1699 """
1700 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1701 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1702 Database is used because the result can be obtained from a different LCM worker in case of HA.
1703 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1704 :param db_nslcmop: database content of nslcmop
1705 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00001706 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1707 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00001708 """
tierno8790a3d2020-04-23 22:49:52 +00001709 modified = False
tierno38089af2020-04-16 07:56:58 +00001710 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01001711 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1712 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00001713 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1714 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01001715 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00001716 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01001717 pla_result = None
1718 while not pla_result and wait >= 0:
1719 await asyncio.sleep(db_poll_interval)
1720 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00001721 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01001722 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1723
1724 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00001725 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01001726
1727 for pla_vnf in pla_result['vnf']:
1728 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1729 if not pla_vnf.get('vimAccountId') or not vnfr:
1730 continue
tierno8790a3d2020-04-23 22:49:52 +00001731 modified = True
magnussonle9198bb2020-01-21 13:00:51 +01001732 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00001733 # Modifies db_vnfrs
1734 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
tierno8790a3d2020-04-23 22:49:52 +00001735 return modified
magnussonle9198bb2020-01-21 13:00:51 +01001736
1737 def update_nsrs_with_pla_result(self, params):
1738 try:
1739 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1740 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1741 except Exception as e:
1742 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1743
tierno59d22d22018-09-25 18:10:19 +02001744 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02001745 """
1746
1747 :param nsr_id: ns instance to deploy
1748 :param nslcmop_id: operation to run
1749 :return:
1750 """
kuused124bfe2019-06-18 12:09:24 +02001751
1752 # Try to lock HA task here
1753 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1754 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00001755 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02001756 return
1757
tierno59d22d22018-09-25 18:10:19 +02001758 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1759 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02001760
sousaedua0deb2d2020-04-21 12:08:14 +01001761 # Sync from FSMongo
1762 self.fs.sync()
1763
tierno59d22d22018-09-25 18:10:19 +02001764 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02001765
1766 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02001767 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02001768
1769 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02001770 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02001771
1772 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00001773 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001774 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02001775 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001776
tierno59d22d22018-09-25 18:10:19 +02001777 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02001778 db_vnfrs = {} # vnf's info indexed by member-index
1779 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00001780 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02001781 exc = None
tiernoe876f672020-02-13 14:34:48 +00001782 error_list = []
1783 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1784 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02001785 try:
kuused124bfe2019-06-18 12:09:24 +02001786 # wait for any previous tasks in process
1787 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1788
quilesj7e13aeb2019-10-08 13:34:55 +02001789 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernoe876f672020-02-13 14:34:48 +00001790 stage[1] = "Reading from database,"
quilesj4cda56b2019-12-05 10:02:20 +00001791 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00001792 db_nsr_update["detailed-status"] = "creating"
1793 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00001794 self._write_ns_status(
1795 nsr_id=nsr_id,
1796 ns_state="BUILDING",
1797 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00001798 current_operation_id=nslcmop_id,
1799 other_update=db_nsr_update
1800 )
1801 self._write_op_status(
1802 op_id=nslcmop_id,
1803 stage=stage,
1804 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00001805 )
1806
quilesj7e13aeb2019-10-08 13:34:55 +02001807 # read from db: operation
tiernoe876f672020-02-13 14:34:48 +00001808 stage[1] = "Getting nslcmop={} from db".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02001809 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00001810 ns_params = db_nslcmop.get("operationParams")
1811 if ns_params and ns_params.get("timeout_ns_deploy"):
1812 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1813 else:
1814 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001815
1816 # read from db: ns
tiernoe876f672020-02-13 14:34:48 +00001817 stage[1] = "Getting nsr={} from db".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02001818 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernod732fb82020-05-21 13:18:23 +00001819 stage[1] = "Getting nsd={} from db".format(db_nsr["nsd-id"])
1820 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1821 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00001822 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02001823
quilesj7e13aeb2019-10-08 13:34:55 +02001824 # read from db: vnf's of this ns
tiernoe876f672020-02-13 14:34:48 +00001825 stage[1] = "Getting vnfrs from db"
1826 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001827 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02001828
quilesj7e13aeb2019-10-08 13:34:55 +02001829 # read from db: vnfd's for every vnf
1830 db_vnfds_ref = {} # every vnfd data indexed by vnf name
1831 db_vnfds = {} # every vnfd data indexed by vnf id
1832 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
1833
1834 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02001835 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02001836 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
1837 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
1838 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
1839 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02001840 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00001841 # read from db
tiernoe876f672020-02-13 14:34:48 +00001842 stage[1] = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_ref)
1843 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001844 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02001845
quilesj7e13aeb2019-10-08 13:34:55 +02001846 # store vnfd
1847 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
1848 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
1849 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
1850
1851 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00001852 vca_deployed_list = None
1853 if db_nsr["_admin"].get("deployed"):
1854 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1855 if vca_deployed_list is None:
1856 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00001857 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00001858 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00001859 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02001860 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001861 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001862 elif isinstance(vca_deployed_list, dict):
1863 # maintain backward compatibility. Change a dict to list at database
1864 vca_deployed_list = list(vca_deployed_list.values())
1865 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001866 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001867
tierno6cf25f52019-09-12 09:33:40 +00001868 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00001869 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1870 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02001871
tiernobaa51102018-12-14 13:16:18 +00001872 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1873 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1874 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001875
1876 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00001877 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00001878 self._write_op_status(
1879 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00001880 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00001881 )
1882
tiernoe876f672020-02-13 14:34:48 +00001883 stage[1] = "Deploying KDUs,"
1884 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01001885 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00001886 await self.deploy_kdus(
1887 logging_text=logging_text,
1888 nsr_id=nsr_id,
1889 nslcmop_id=nslcmop_id,
1890 db_vnfrs=db_vnfrs,
1891 db_vnfds=db_vnfds,
1892 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001893 )
tiernoe876f672020-02-13 14:34:48 +00001894
1895 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00001896 # n2vc_redesign STEP 1 Get VCA public ssh-key
1897 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00001898 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00001899 n2vc_key_list = [n2vc_key]
1900 if self.vca_config.get("public_key"):
1901 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00001902
tiernoe876f672020-02-13 14:34:48 +00001903 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00001904 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02001905 self.instantiate_RO(
1906 logging_text=logging_text,
1907 nsr_id=nsr_id,
1908 nsd=nsd,
1909 db_nsr=db_nsr,
1910 db_nslcmop=db_nslcmop,
1911 db_vnfrs=db_vnfrs,
1912 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00001913 n2vc_key_list=n2vc_key_list,
1914 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00001915 )
tiernod8323042019-08-09 11:32:23 +00001916 )
1917 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00001918 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00001919
tiernod8323042019-08-09 11:32:23 +00001920 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00001921 stage[1] = "Deploying Execution Environments."
1922 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00001923
tiernod8323042019-08-09 11:32:23 +00001924 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02001925 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00001926 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
1927 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00001928 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00001929 member_vnf_index = str(c_vnf["member-vnf-index"])
1930 db_vnfr = db_vnfrs[member_vnf_index]
1931 base_folder = vnfd["_admin"]["storage"]
1932 vdu_id = None
1933 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00001934 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01001935 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02001936
tierno8a518872018-12-21 13:42:14 +00001937 # Get additional parameters
tiernod8323042019-08-09 11:32:23 +00001938 deploy_params = {}
1939 if db_vnfr.get("additionalParamsForVnf"):
tierno626e0152019-11-29 14:16:16 +00001940 deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
tierno8a518872018-12-21 13:42:14 +00001941
tiernod8323042019-08-09 11:32:23 +00001942 descriptor_config = vnfd.get("vnf-configuration")
1943 if descriptor_config and descriptor_config.get("juju"):
quilesj7e13aeb2019-10-08 13:34:55 +02001944 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00001945 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02001946 db_nsr=db_nsr,
1947 db_vnfr=db_vnfr,
1948 nslcmop_id=nslcmop_id,
1949 nsr_id=nsr_id,
1950 nsi_id=nsi_id,
1951 vnfd_id=vnfd_id,
1952 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001953 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02001954 member_vnf_index=member_vnf_index,
1955 vdu_index=vdu_index,
1956 vdu_name=vdu_name,
1957 deploy_params=deploy_params,
1958 descriptor_config=descriptor_config,
1959 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00001960 task_instantiation_info=tasks_dict_info,
1961 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02001962 )
tierno59d22d22018-09-25 18:10:19 +02001963
1964 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00001965 for vdud in get_iterable(vnfd, 'vdu'):
1966 vdu_id = vdud["id"]
1967 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00001968 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
1969 if vdur.get("additionalParams"):
1970 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
1971 else:
1972 deploy_params_vdu = deploy_params
tiernod8323042019-08-09 11:32:23 +00001973 if descriptor_config and descriptor_config.get("juju"):
1974 # look for vdu index in the db_vnfr["vdu"] section
1975 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
1976 # if vdur["vdu-id-ref"] == vdu_id:
1977 # break
1978 # else:
1979 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
1980 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
1981 # vdu_name = vdur.get("name")
1982 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01001983 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00001984 for vdu_index in range(int(vdud.get("count", 1))):
1985 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02001986 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00001987 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
1988 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02001989 db_nsr=db_nsr,
1990 db_vnfr=db_vnfr,
1991 nslcmop_id=nslcmop_id,
1992 nsr_id=nsr_id,
1993 nsi_id=nsi_id,
1994 vnfd_id=vnfd_id,
1995 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001996 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02001997 member_vnf_index=member_vnf_index,
1998 vdu_index=vdu_index,
1999 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002000 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002001 descriptor_config=descriptor_config,
2002 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002003 task_instantiation_info=tasks_dict_info,
2004 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002005 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01002006 for kdud in get_iterable(vnfd, 'kdu'):
2007 kdu_name = kdud["name"]
2008 descriptor_config = kdud.get('kdu-configuration')
2009 if descriptor_config and descriptor_config.get("juju"):
2010 vdu_id = None
2011 vdu_index = 0
2012 vdu_name = None
2013 # look for vdu index in the db_vnfr["vdu"] section
2014 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2015 # if vdur["vdu-id-ref"] == vdu_id:
2016 # break
2017 # else:
2018 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2019 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2020 # vdu_name = vdur.get("name")
2021 # vdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002022
calvinosanch9f9c6f22019-11-04 13:37:39 +01002023 self._deploy_n2vc(
2024 logging_text=logging_text,
2025 db_nsr=db_nsr,
2026 db_vnfr=db_vnfr,
2027 nslcmop_id=nslcmop_id,
2028 nsr_id=nsr_id,
2029 nsi_id=nsi_id,
2030 vnfd_id=vnfd_id,
2031 vdu_id=vdu_id,
2032 kdu_name=kdu_name,
2033 member_vnf_index=member_vnf_index,
2034 vdu_index=vdu_index,
2035 vdu_name=vdu_name,
2036 deploy_params=deploy_params,
2037 descriptor_config=descriptor_config,
2038 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002039 task_instantiation_info=tasks_dict_info,
2040 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01002041 )
tierno59d22d22018-09-25 18:10:19 +02002042
tierno1b633412019-02-25 16:48:23 +00002043 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002044 descriptor_config = nsd.get("ns-configuration")
2045 if descriptor_config and descriptor_config.get("juju"):
2046 vnfd_id = None
2047 db_vnfr = None
2048 member_vnf_index = None
2049 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002050 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002051 vdu_index = 0
2052 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002053
tiernod8323042019-08-09 11:32:23 +00002054 # Get additional parameters
2055 deploy_params = {}
2056 if db_nsr.get("additionalParamsForNs"):
tierno626e0152019-11-29 14:16:16 +00002057 deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
tiernod8323042019-08-09 11:32:23 +00002058 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002059 self._deploy_n2vc(
2060 logging_text=logging_text,
2061 db_nsr=db_nsr,
2062 db_vnfr=db_vnfr,
2063 nslcmop_id=nslcmop_id,
2064 nsr_id=nsr_id,
2065 nsi_id=nsi_id,
2066 vnfd_id=vnfd_id,
2067 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002068 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002069 member_vnf_index=member_vnf_index,
2070 vdu_index=vdu_index,
2071 vdu_name=vdu_name,
2072 deploy_params=deploy_params,
2073 descriptor_config=descriptor_config,
2074 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002075 task_instantiation_info=tasks_dict_info,
2076 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002077 )
tierno1b633412019-02-25 16:48:23 +00002078
tiernoe876f672020-02-13 14:34:48 +00002079 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002080
tiernoe876f672020-02-13 14:34:48 +00002081 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2082 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02002083 exc = e
2084 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00002085 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02002086 exc = "Operation was cancelled"
2087 except Exception as e:
2088 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00002089 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02002090 finally:
2091 if exc:
tiernoe876f672020-02-13 14:34:48 +00002092 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002093 try:
tiernoe876f672020-02-13 14:34:48 +00002094 # wait for pending tasks
2095 if tasks_dict_info:
2096 stage[1] = "Waiting for instantiate pending tasks."
2097 self.logger.debug(logging_text + stage[1])
2098 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2099 stage, nslcmop_id, nsr_id=nsr_id)
2100 stage[1] = stage[2] = ""
2101 except asyncio.CancelledError:
2102 error_list.append("Cancelled")
2103 # TODO cancel all tasks
2104 except Exception as exc:
2105 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002106
tiernoe876f672020-02-13 14:34:48 +00002107 # update operation-status
2108 db_nsr_update["operational-status"] = "running"
2109 # let's begin with VCA 'configured' status (later we can change it)
2110 db_nsr_update["config-status"] = "configured"
2111 for task, task_name in tasks_dict_info.items():
2112 if not task.done() or task.cancelled() or task.exception():
2113 if task_name.startswith(self.task_name_deploy_vca):
2114 # A N2VC task is pending
2115 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002116 else:
tiernoe876f672020-02-13 14:34:48 +00002117 # RO or KDU task is pending
2118 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002119
tiernoe876f672020-02-13 14:34:48 +00002120 # update status at database
2121 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002122 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002123 self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00002124 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
2125 error_description_nsr = 'Operation: INSTANTIATING.{}, Stage {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00002126
tiernoa2143262020-03-27 16:20:40 +00002127 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002128 db_nslcmop_update["detailed-status"] = error_detail
2129 nslcmop_operation_state = "FAILED"
2130 ns_state = "BROKEN"
2131 else:
tiernoa2143262020-03-27 16:20:40 +00002132 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002133 error_description_nsr = error_description_nslcmop = None
2134 ns_state = "READY"
2135 db_nsr_update["detailed-status"] = "Done"
2136 db_nslcmop_update["detailed-status"] = "Done"
2137 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002138
tiernoe876f672020-02-13 14:34:48 +00002139 if db_nsr:
2140 self._write_ns_status(
2141 nsr_id=nsr_id,
2142 ns_state=ns_state,
2143 current_operation="IDLE",
2144 current_operation_id=None,
2145 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002146 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002147 other_update=db_nsr_update
2148 )
tiernoa17d4f42020-04-28 09:59:23 +00002149 self._write_op_status(
2150 op_id=nslcmop_id,
2151 stage="",
2152 error_message=error_description_nslcmop,
2153 operation_state=nslcmop_operation_state,
2154 other_update=db_nslcmop_update,
2155 )
quilesj3655ae02019-12-12 16:08:35 +00002156
tierno59d22d22018-09-25 18:10:19 +02002157 if nslcmop_operation_state:
2158 try:
2159 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00002160 "operationState": nslcmop_operation_state},
2161 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002162 except Exception as e:
2163 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2164
2165 self.logger.debug(logging_text + "Exit")
2166 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2167
quilesj63f90042020-01-17 09:53:55 +00002168 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int, timeout: int = 3600) -> bool:
2169
2170 # steps:
2171 # 1. find all relations for this VCA
2172 # 2. wait for other peers related
2173 # 3. add relations
2174
2175 try:
2176
2177 # STEP 1: find all relations for this VCA
2178
2179 # read nsr record
2180 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garcia171f3542020-05-21 16:41:07 +02002181 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
quilesj63f90042020-01-17 09:53:55 +00002182
2183 # this VCA data
2184 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2185
2186 # read all ns-configuration relations
2187 ns_relations = list()
David Garcia171f3542020-05-21 16:41:07 +02002188 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
quilesj63f90042020-01-17 09:53:55 +00002189 if db_ns_relations:
2190 for r in db_ns_relations:
2191 # check if this VCA is in the relation
2192 if my_vca.get('member-vnf-index') in\
2193 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2194 ns_relations.append(r)
2195
2196 # read all vnf-configuration relations
2197 vnf_relations = list()
2198 db_vnfd_list = db_nsr.get('vnfd-id')
2199 if db_vnfd_list:
2200 for vnfd in db_vnfd_list:
2201 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2202 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2203 if db_vnf_relations:
2204 for r in db_vnf_relations:
2205 # check if this VCA is in the relation
2206 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2207 vnf_relations.append(r)
2208
2209 # if no relations, terminate
2210 if not ns_relations and not vnf_relations:
2211 self.logger.debug(logging_text + ' No relations')
2212 return True
2213
2214 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2215
2216 # add all relations
2217 start = time()
2218 while True:
2219 # check timeout
2220 now = time()
2221 if now - start >= timeout:
2222 self.logger.error(logging_text + ' : timeout adding relations')
2223 return False
2224
2225 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2226 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2227
2228 # for each defined NS relation, find the VCA's related
2229 for r in ns_relations:
2230 from_vca_ee_id = None
2231 to_vca_ee_id = None
2232 from_vca_endpoint = None
2233 to_vca_endpoint = None
2234 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2235 for vca in vca_list:
2236 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2237 and vca.get('config_sw_installed'):
2238 from_vca_ee_id = vca.get('ee_id')
2239 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2240 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2241 and vca.get('config_sw_installed'):
2242 to_vca_ee_id = vca.get('ee_id')
2243 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2244 if from_vca_ee_id and to_vca_ee_id:
2245 # add relation
2246 await self.n2vc.add_relation(
2247 ee_id_1=from_vca_ee_id,
2248 ee_id_2=to_vca_ee_id,
2249 endpoint_1=from_vca_endpoint,
2250 endpoint_2=to_vca_endpoint)
2251 # remove entry from relations list
2252 ns_relations.remove(r)
2253 else:
2254 # check failed peers
2255 try:
2256 vca_status_list = db_nsr.get('configurationStatus')
2257 if vca_status_list:
2258 for i in range(len(vca_list)):
2259 vca = vca_list[i]
2260 vca_status = vca_status_list[i]
2261 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2262 if vca_status.get('status') == 'BROKEN':
2263 # peer broken: remove relation from list
2264 ns_relations.remove(r)
2265 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2266 if vca_status.get('status') == 'BROKEN':
2267 # peer broken: remove relation from list
2268 ns_relations.remove(r)
2269 except Exception:
2270 # ignore
2271 pass
2272
2273 # for each defined VNF relation, find the VCA's related
2274 for r in vnf_relations:
2275 from_vca_ee_id = None
2276 to_vca_ee_id = None
2277 from_vca_endpoint = None
2278 to_vca_endpoint = None
2279 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2280 for vca in vca_list:
2281 if vca.get('vdu_id') == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2282 from_vca_ee_id = vca.get('ee_id')
2283 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2284 if vca.get('vdu_id') == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2285 to_vca_ee_id = vca.get('ee_id')
2286 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2287 if from_vca_ee_id and to_vca_ee_id:
2288 # add relation
2289 await self.n2vc.add_relation(
2290 ee_id_1=from_vca_ee_id,
2291 ee_id_2=to_vca_ee_id,
2292 endpoint_1=from_vca_endpoint,
2293 endpoint_2=to_vca_endpoint)
2294 # remove entry from relations list
2295 vnf_relations.remove(r)
2296 else:
2297 # check failed peers
2298 try:
2299 vca_status_list = db_nsr.get('configurationStatus')
2300 if vca_status_list:
2301 for i in range(len(vca_list)):
2302 vca = vca_list[i]
2303 vca_status = vca_status_list[i]
2304 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2305 if vca_status.get('status') == 'BROKEN':
2306 # peer broken: remove relation from list
2307 ns_relations.remove(r)
2308 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2309 if vca_status.get('status') == 'BROKEN':
2310 # peer broken: remove relation from list
2311 ns_relations.remove(r)
2312 except Exception:
2313 # ignore
2314 pass
2315
2316 # wait for next try
2317 await asyncio.sleep(5.0)
2318
2319 if not ns_relations and not vnf_relations:
2320 self.logger.debug('Relations added')
2321 break
2322
2323 return True
2324
2325 except Exception as e:
2326 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2327 return False
2328
tiernob9018152020-04-16 14:18:24 +00002329 def _write_db_callback(self, task, item, _id, on_done=None, on_exc=None):
2330 """
2331 callback for kdu install intended to store the returned kdu_instance at database
2332 :return: None
2333 """
2334 db_update = {}
2335 try:
2336 result = task.result()
2337 if on_done:
2338 db_update[on_done] = str(result)
2339 except Exception as e:
2340 if on_exc:
2341 db_update[on_exc] = str(e)
2342 if db_update:
2343 try:
2344 self.update_db_2(item, _id, db_update)
2345 except Exception:
2346 pass
2347
tiernoe876f672020-02-13 14:34:48 +00002348 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002349 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002350
2351 k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
2352
2353 def _get_cluster_id(cluster_id, cluster_type):
2354 nonlocal k8scluster_id_2_uuic
2355 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2356 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2357
2358 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2359 if not db_k8scluster:
2360 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
2361 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2362 if not k8s_id:
2363 raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type))
2364 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2365 return k8s_id
2366
2367 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002368 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002369 try:
tierno626e0152019-11-29 14:16:16 +00002370 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002371 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002372
tierno626e0152019-11-29 14:16:16 +00002373 index = 0
tiernoe876f672020-02-13 14:34:48 +00002374 updated_cluster_list = []
2375
tierno626e0152019-11-29 14:16:16 +00002376 for vnfr_data in db_vnfrs.values():
2377 for kdur in get_iterable(vnfr_data, "kdur"):
2378 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002379 vnfd_id = vnfr_data.get('vnfd-id')
tiernode1584f2020-04-07 09:07:33 +00002380 namespace = kdur.get("k8s-namespace")
tierno626e0152019-11-29 14:16:16 +00002381 if kdur.get("helm-chart"):
2382 kdumodel = kdur["helm-chart"]
tiernoe876f672020-02-13 14:34:48 +00002383 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002384 elif kdur.get("juju-bundle"):
2385 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002386 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002387 else:
tiernoe876f672020-02-13 14:34:48 +00002388 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2389 "juju-bundle. Maybe an old NBI version is running".
2390 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002391 # check if kdumodel is a file and exists
2392 try:
tierno51183952020-04-03 15:48:18 +00002393 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2394 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2395 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
Dominik Fleischmann010c0e72020-05-18 15:19:11 +02002396 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
tierno51183952020-04-03 15:48:18 +00002397 kdumodel)
2398 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2399 kdumodel = self.fs.path + filename
2400 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002401 raise
2402 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002403 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002404
tiernoe876f672020-02-13 14:34:48 +00002405 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2406 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
2407 cluster_uuid = _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002408
tiernoe876f672020-02-13 14:34:48 +00002409 if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
2410 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2411 self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
2412 if del_repo_list or added_repo_dict:
2413 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2414 updated = {'_admin.helm_charts_added.' +
2415 item: name for item, name in added_repo_dict.items()}
2416 self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
2417 "to_add: {}".format(k8s_cluster_id, del_repo_list,
2418 added_repo_dict))
2419 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2420 updated_cluster_list.append(cluster_uuid)
lloretgallegedc5f332020-02-20 11:50:50 +01002421
tiernoe876f672020-02-13 14:34:48 +00002422 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2423 kdur["kdu-name"], k8s_cluster_id)
tierno626e0152019-11-29 14:16:16 +00002424
tierno067e04a2020-03-31 12:53:13 +00002425 k8s_instace_info = {"kdu-instance": None,
2426 "k8scluster-uuid": cluster_uuid,
tierno626e0152019-11-29 14:16:16 +00002427 "k8scluster-type": k8sclustertype,
tierno067e04a2020-03-31 12:53:13 +00002428 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2429 "kdu-name": kdur["kdu-name"],
tiernode1584f2020-04-07 09:07:33 +00002430 "kdu-model": kdumodel,
2431 "namespace": namespace}
tiernob9018152020-04-16 14:18:24 +00002432 db_path = "_admin.deployed.K8s.{}".format(index)
2433 db_nsr_update[db_path] = k8s_instace_info
tierno626e0152019-11-29 14:16:16 +00002434 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002435
tiernoe876f672020-02-13 14:34:48 +00002436 db_dict = {"collection": "nsrs",
2437 "filter": {"_id": nsr_id},
tiernob9018152020-04-16 14:18:24 +00002438 "path": db_path}
lloretgallegedc5f332020-02-20 11:50:50 +01002439
tiernoa2143262020-03-27 16:20:40 +00002440 task = asyncio.ensure_future(
2441 self.k8scluster_map[k8sclustertype].install(cluster_uuid=cluster_uuid, kdu_model=kdumodel,
2442 atomic=True, params=desc_params,
2443 db_dict=db_dict, timeout=600,
tiernode1584f2020-04-07 09:07:33 +00002444 kdu_name=kdur["kdu-name"], namespace=namespace))
Adam Israelbaacc302019-12-01 12:41:39 -05002445
tiernob9018152020-04-16 14:18:24 +00002446 task.add_done_callback(partial(self._write_db_callback, item="nsrs", _id=nsr_id,
2447 on_done=db_path + ".kdu-instance",
2448 on_exc=db_path + ".detailed-status"))
tiernoe876f672020-02-13 14:34:48 +00002449 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002450 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002451
tierno626e0152019-11-29 14:16:16 +00002452 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002453
tiernoe876f672020-02-13 14:34:48 +00002454 except (LcmException, asyncio.CancelledError):
2455 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002456 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002457 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2458 if isinstance(e, (N2VCException, DbException)):
2459 self.logger.error(logging_text + msg)
2460 else:
2461 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002462 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002463 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002464 if db_nsr_update:
2465 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002466
quilesj7e13aeb2019-10-08 13:34:55 +02002467 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002468 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002469 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002470 # launch instantiate_N2VC in a asyncio task and register task object
2471 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2472 # if not found, create one entry and update database
tiernobaa51102018-12-14 13:16:18 +00002473
quilesj7e13aeb2019-10-08 13:34:55 +02002474 # fill db_nsr._admin.deployed.VCA.<index>
2475 vca_index = -1
2476 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2477 if not vca_deployed:
2478 continue
2479 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2480 vca_deployed.get("vdu_id") == vdu_id and \
calvinosanch9f9c6f22019-11-04 13:37:39 +01002481 vca_deployed.get("kdu_name") == kdu_name and \
quilesj7e13aeb2019-10-08 13:34:55 +02002482 vca_deployed.get("vdu_count_index", 0) == vdu_index:
2483 break
2484 else:
2485 # not found, create one.
2486 vca_deployed = {
2487 "member-vnf-index": member_vnf_index,
2488 "vdu_id": vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002489 "kdu_name": kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002490 "vdu_count_index": vdu_index,
2491 "operational-status": "init", # TODO revise
2492 "detailed-status": "", # TODO revise
2493 "step": "initial-deploy", # TODO revise
2494 "vnfd_id": vnfd_id,
2495 "vdu_name": vdu_name,
2496 }
2497 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002498
2499 # create VCA and configurationStatus in db
2500 db_dict = {
2501 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2502 "configurationStatus.{}".format(vca_index): dict()
2503 }
2504 self.update_db_2("nsrs", nsr_id, db_dict)
2505
quilesj7e13aeb2019-10-08 13:34:55 +02002506 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2507
2508 # Launch task
2509 task_n2vc = asyncio.ensure_future(
2510 self.instantiate_N2VC(
2511 logging_text=logging_text,
2512 vca_index=vca_index,
2513 nsi_id=nsi_id,
2514 db_nsr=db_nsr,
2515 db_vnfr=db_vnfr,
2516 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002517 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002518 vdu_index=vdu_index,
2519 deploy_params=deploy_params,
2520 config_descriptor=descriptor_config,
2521 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002522 nslcmop_id=nslcmop_id,
2523 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002524 )
2525 )
2526 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
tiernoe876f672020-02-13 14:34:48 +00002527 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2528 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002529
kuuse0ca67472019-05-13 15:59:27 +02002530 # Check if this VNFD has a configured terminate action
2531 def _has_terminate_config_primitive(self, vnfd):
2532 vnf_config = vnfd.get("vnf-configuration")
2533 if vnf_config and vnf_config.get("terminate-config-primitive"):
2534 return True
2535 else:
2536 return False
2537
tiernoc9556972019-07-05 15:25:25 +00002538 @staticmethod
2539 def _get_terminate_config_primitive_seq_list(vnfd):
2540 """ Get a numerically sorted list of the sequences for this VNFD's terminate action """
kuuse0ca67472019-05-13 15:59:27 +02002541 # No need to check for existing primitive twice, already done before
2542 vnf_config = vnfd.get("vnf-configuration")
2543 seq_list = vnf_config.get("terminate-config-primitive")
2544 # Get all 'seq' tags in seq_list, order sequences numerically, ascending.
2545 seq_list_sorted = sorted(seq_list, key=lambda x: int(x['seq']))
2546 return seq_list_sorted
2547
2548 @staticmethod
2549 def _create_nslcmop(nsr_id, operation, params):
2550 """
2551 Creates a ns-lcm-opp content to be stored at database.
2552 :param nsr_id: internal id of the instance
2553 :param operation: instantiate, terminate, scale, action, ...
2554 :param params: user parameters for the operation
2555 :return: dictionary following SOL005 format
2556 """
2557 # Raise exception if invalid arguments
2558 if not (nsr_id and operation and params):
2559 raise LcmException(
2560 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2561 now = time()
2562 _id = str(uuid4())
2563 nslcmop = {
2564 "id": _id,
2565 "_id": _id,
2566 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2567 "operationState": "PROCESSING",
2568 "statusEnteredTime": now,
2569 "nsInstanceId": nsr_id,
2570 "lcmOperationType": operation,
2571 "startTime": now,
2572 "isAutomaticInvocation": False,
2573 "operationParams": params,
2574 "isCancelPending": False,
2575 "links": {
2576 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2577 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2578 }
2579 }
2580 return nslcmop
2581
calvinosanch9f9c6f22019-11-04 13:37:39 +01002582 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00002583 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002584 for key, value in params.items():
2585 if str(value).startswith("!!yaml "):
2586 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01002587 return params
2588
kuuse8b998e42019-07-30 15:22:16 +02002589 def _get_terminate_primitive_params(self, seq, vnf_index):
2590 primitive = seq.get('name')
2591 primitive_params = {}
2592 params = {
2593 "member_vnf_index": vnf_index,
2594 "primitive": primitive,
2595 "primitive_params": primitive_params,
2596 }
2597 desc_params = {}
2598 return self._map_primitive_params(seq, params, desc_params)
2599
kuuseac3a8882019-10-03 10:48:06 +02002600 # sub-operations
2601
tierno51183952020-04-03 15:48:18 +00002602 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2603 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2604 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02002605 # b. Skip sub-operation
2606 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2607 return self.SUBOPERATION_STATUS_SKIP
2608 else:
tierno7c4e24c2020-05-13 08:41:35 +00002609 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02002610 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00002611 # Update operationState = 'PROCESSING' to indicate a retry.
kuuseac3a8882019-10-03 10:48:06 +02002612 operationState = 'PROCESSING'
2613 detailed_status = 'In progress'
2614 self._update_suboperation_status(
2615 db_nslcmop, op_index, operationState, detailed_status)
2616 # Return the sub-operation index
2617 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2618 # with arguments extracted from the sub-operation
2619 return op_index
2620
2621 # Find a sub-operation where all keys in a matching dictionary must match
2622 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2623 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00002624 if db_nslcmop and match:
kuuseac3a8882019-10-03 10:48:06 +02002625 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2626 for i, op in enumerate(op_list):
2627 if all(op.get(k) == match[k] for k in match):
2628 return i
2629 return self.SUBOPERATION_STATUS_NOT_FOUND
2630
2631 # Update status for a sub-operation given its index
2632 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2633 # Update DB for HA tasks
2634 q_filter = {'_id': db_nslcmop['_id']}
2635 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2636 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2637 self.db.set_one("nslcmops",
2638 q_filter=q_filter,
2639 update_dict=update_dict,
2640 fail_on_empty=False)
2641
2642 # Add sub-operation, return the index of the added sub-operation
2643 # Optionally, set operationState, detailed-status, and operationType
2644 # Status and type are currently set for 'scale' sub-operations:
2645 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2646 # 'detailed-status' : status message
2647 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2648 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
quilesj7e13aeb2019-10-08 13:34:55 +02002649 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2650 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02002651 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00002652 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02002653 return self.SUBOPERATION_STATUS_NOT_FOUND
2654 # Get the "_admin.operations" list, if it exists
2655 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2656 op_list = db_nslcmop_admin.get('operations')
2657 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02002658 new_op = {'member_vnf_index': vnf_index,
2659 'vdu_id': vdu_id,
2660 'vdu_count_index': vdu_count_index,
2661 'primitive': primitive,
2662 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02002663 if operationState:
2664 new_op['operationState'] = operationState
2665 if detailed_status:
2666 new_op['detailed-status'] = detailed_status
2667 if operationType:
2668 new_op['lcmOperationType'] = operationType
2669 if RO_nsr_id:
2670 new_op['RO_nsr_id'] = RO_nsr_id
2671 if RO_scaling_info:
2672 new_op['RO_scaling_info'] = RO_scaling_info
2673 if not op_list:
2674 # No existing operations, create key 'operations' with current operation as first list element
2675 db_nslcmop_admin.update({'operations': [new_op]})
2676 op_list = db_nslcmop_admin.get('operations')
2677 else:
2678 # Existing operations, append operation to list
2679 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02002680
kuuseac3a8882019-10-03 10:48:06 +02002681 db_nslcmop_update = {'_admin.operations': op_list}
2682 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2683 op_index = len(op_list) - 1
2684 return op_index
2685
2686 # Helper methods for scale() sub-operations
2687
2688 # pre-scale/post-scale:
2689 # Check for 3 different cases:
2690 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2691 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00002692 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02002693 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2694 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02002695 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00002696 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002697 operationType = 'SCALE-RO'
2698 match = {
2699 'member_vnf_index': vnf_index,
2700 'RO_nsr_id': RO_nsr_id,
2701 'RO_scaling_info': RO_scaling_info,
2702 }
2703 else:
2704 match = {
2705 'member_vnf_index': vnf_index,
2706 'primitive': vnf_config_primitive,
2707 'primitive_params': primitive_params,
2708 'lcmOperationType': operationType
2709 }
2710 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00002711 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02002712 # a. New sub-operation
2713 # The sub-operation does not exist, add it.
2714 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2715 # The following parameters are set to None for all kind of scaling:
2716 vdu_id = None
2717 vdu_count_index = None
2718 vdu_name = None
tierno51183952020-04-03 15:48:18 +00002719 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002720 vnf_config_primitive = None
2721 primitive_params = None
2722 else:
2723 RO_nsr_id = None
2724 RO_scaling_info = None
2725 # Initial status for sub-operation
2726 operationState = 'PROCESSING'
2727 detailed_status = 'In progress'
2728 # Add sub-operation for pre/post-scaling (zero or more operations)
2729 self._add_suboperation(db_nslcmop,
2730 vnf_index,
2731 vdu_id,
2732 vdu_count_index,
2733 vdu_name,
2734 vnf_config_primitive,
2735 primitive_params,
2736 operationState,
2737 detailed_status,
2738 operationType,
2739 RO_nsr_id,
2740 RO_scaling_info)
2741 return self.SUBOPERATION_STATUS_NEW
2742 else:
2743 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2744 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00002745 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02002746
preethika.pdf7d8e02019-12-10 13:10:48 +00002747 # Function to return execution_environment id
2748
2749 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00002750 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00002751 for vca in vca_deployed_list:
2752 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2753 return vca["ee_id"]
2754
tiernoe876f672020-02-13 14:34:48 +00002755 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor, vca_index, destroy_ee=True):
2756 """
2757 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2758 :param logging_text:
2759 :param db_nslcmop:
2760 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2761 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2762 :param vca_index: index in the database _admin.deployed.VCA
2763 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
2764 :return: None or exception
2765 """
2766 # execute terminate_primitives
2767 terminate_primitives = config_descriptor.get("terminate-config-primitive")
2768 vdu_id = vca_deployed.get("vdu_id")
2769 vdu_count_index = vca_deployed.get("vdu_count_index")
2770 vdu_name = vca_deployed.get("vdu_name")
2771 vnf_index = vca_deployed.get("member-vnf-index")
2772 if terminate_primitives and vca_deployed.get("needed_terminate"):
2773 # Get all 'seq' tags in seq_list, order sequences numerically, ascending.
2774 terminate_primitives = sorted(terminate_primitives, key=lambda x: int(x['seq']))
2775 for seq in terminate_primitives:
kuuse8b998e42019-07-30 15:22:16 +02002776 # For each sequence in list, get primitive and call _ns_execute_primitive()
kuuse0ca67472019-05-13 15:59:27 +02002777 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
2778 vnf_index, seq.get("name"))
2779 self.logger.debug(logging_text + step)
kuuse8b998e42019-07-30 15:22:16 +02002780 # Create the primitive for each sequence, i.e. "primitive": "touch"
kuuse0ca67472019-05-13 15:59:27 +02002781 primitive = seq.get('name')
kuuse8b998e42019-07-30 15:22:16 +02002782 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
2783 # The following 3 parameters are currently set to None for 'terminate':
2784 # vdu_id, vdu_count_index, vdu_name
tiernoe876f672020-02-13 14:34:48 +00002785
kuuseac3a8882019-10-03 10:48:06 +02002786 # Add sub-operation
kuuse8b998e42019-07-30 15:22:16 +02002787 self._add_suboperation(db_nslcmop,
kuuse8b998e42019-07-30 15:22:16 +02002788 vnf_index,
2789 vdu_id,
2790 vdu_count_index,
2791 vdu_name,
2792 primitive,
2793 mapped_primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02002794 # Sub-operations: Call _ns_execute_primitive() instead of action()
quilesj7e13aeb2019-10-08 13:34:55 +02002795 try:
tiernoe876f672020-02-13 14:34:48 +00002796 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
2797 mapped_primitive_params)
2798 except LcmException:
2799 # this happens when VCA is not deployed. In this case it is not needed to terminate
2800 continue
2801 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
2802 if result not in result_ok:
2803 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
2804 "error {}".format(seq.get("name"), vnf_index, result_detail))
2805 # set that this VCA do not need terminated
2806 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
2807 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
2808
2809 if destroy_ee:
2810 await self.n2vc.delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02002811
tierno51183952020-04-03 15:48:18 +00002812 async def _delete_all_N2VC(self, db_nsr: dict):
2813 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
2814 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00002815 try:
2816 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
2817 except N2VCNotFound: # already deleted. Skip
2818 pass
tierno51183952020-04-03 15:48:18 +00002819 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00002820
tiernoe876f672020-02-13 14:34:48 +00002821 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
2822 """
2823 Terminates a deployment from RO
2824 :param logging_text:
2825 :param nsr_deployed: db_nsr._admin.deployed
2826 :param nsr_id:
2827 :param nslcmop_id:
2828 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
2829 this method will update only the index 2, but it will write on database the concatenated content of the list
2830 :return:
2831 """
2832 db_nsr_update = {}
2833 failed_detail = []
2834 ro_nsr_id = ro_delete_action = None
2835 if nsr_deployed and nsr_deployed.get("RO"):
2836 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
2837 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
2838 try:
2839 if ro_nsr_id:
2840 stage[2] = "Deleting ns from VIM."
2841 db_nsr_update["detailed-status"] = " ".join(stage)
2842 self._write_op_status(nslcmop_id, stage)
2843 self.logger.debug(logging_text + stage[2])
2844 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2845 self._write_op_status(nslcmop_id, stage)
2846 desc = await self.RO.delete("ns", ro_nsr_id)
2847 ro_delete_action = desc["action_id"]
2848 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
2849 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2850 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2851 if ro_delete_action:
2852 # wait until NS is deleted from VIM
2853 stage[2] = "Waiting ns deleted from VIM."
2854 detailed_status_old = None
2855 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
2856 ro_delete_action))
2857 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2858 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02002859
tiernoe876f672020-02-13 14:34:48 +00002860 delete_timeout = 20 * 60 # 20 minutes
2861 while delete_timeout > 0:
2862 desc = await self.RO.show(
2863 "ns",
2864 item_id_name=ro_nsr_id,
2865 extra_item="action",
2866 extra_item_id=ro_delete_action)
2867
2868 # deploymentStatus
2869 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
2870
2871 ns_status, ns_status_info = self.RO.check_action_status(desc)
2872 if ns_status == "ERROR":
2873 raise ROclient.ROClientException(ns_status_info)
2874 elif ns_status == "BUILD":
2875 stage[2] = "Deleting from VIM {}".format(ns_status_info)
2876 elif ns_status == "ACTIVE":
2877 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2878 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2879 break
2880 else:
2881 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
2882 if stage[2] != detailed_status_old:
2883 detailed_status_old = stage[2]
2884 db_nsr_update["detailed-status"] = " ".join(stage)
2885 self._write_op_status(nslcmop_id, stage)
2886 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2887 await asyncio.sleep(5, loop=self.loop)
2888 delete_timeout -= 5
2889 else: # delete_timeout <= 0:
2890 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
2891
2892 except Exception as e:
2893 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2894 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2895 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2896 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2897 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2898 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
2899 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00002900 failed_detail.append("delete conflict: {}".format(e))
2901 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00002902 else:
tiernoa2143262020-03-27 16:20:40 +00002903 failed_detail.append("delete error: {}".format(e))
2904 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00002905
2906 # Delete nsd
2907 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
2908 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
2909 try:
2910 stage[2] = "Deleting nsd from RO."
2911 db_nsr_update["detailed-status"] = " ".join(stage)
2912 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2913 self._write_op_status(nslcmop_id, stage)
2914 await self.RO.delete("nsd", ro_nsd_id)
2915 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
2916 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2917 except Exception as e:
2918 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2919 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2920 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
2921 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2922 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
2923 self.logger.debug(logging_text + failed_detail[-1])
2924 else:
2925 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
2926 self.logger.error(logging_text + failed_detail[-1])
2927
2928 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
2929 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
2930 if not vnf_deployed or not vnf_deployed["id"]:
2931 continue
2932 try:
2933 ro_vnfd_id = vnf_deployed["id"]
2934 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
2935 vnf_deployed["member-vnf-index"], ro_vnfd_id)
2936 db_nsr_update["detailed-status"] = " ".join(stage)
2937 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2938 self._write_op_status(nslcmop_id, stage)
2939 await self.RO.delete("vnfd", ro_vnfd_id)
2940 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
2941 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2942 except Exception as e:
2943 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2944 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2945 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
2946 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2947 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
2948 self.logger.debug(logging_text + failed_detail[-1])
2949 else:
2950 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
2951 self.logger.error(logging_text + failed_detail[-1])
2952
tiernoa2143262020-03-27 16:20:40 +00002953 if failed_detail:
2954 stage[2] = "Error deleting from VIM"
2955 else:
2956 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00002957 db_nsr_update["detailed-status"] = " ".join(stage)
2958 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2959 self._write_op_status(nslcmop_id, stage)
2960
2961 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00002962 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00002963
2964 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02002965 # Try to lock HA task here
2966 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
2967 if not task_is_locked_by_me:
2968 return
2969
tierno59d22d22018-09-25 18:10:19 +02002970 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
2971 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00002972 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02002973 db_nsr = None
2974 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00002975 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02002976 exc = None
tiernoe876f672020-02-13 14:34:48 +00002977 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02002978 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00002979 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00002980 tasks_dict_info = {}
2981 db_nsr_update = {}
2982 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
2983 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02002984 try:
kuused124bfe2019-06-18 12:09:24 +02002985 # wait for any previous tasks in process
2986 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
2987
tiernoe876f672020-02-13 14:34:48 +00002988 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2989 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2990 operation_params = db_nslcmop.get("operationParams") or {}
2991 if operation_params.get("timeout_ns_terminate"):
2992 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
2993 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2994 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2995
2996 db_nsr_update["operational-status"] = "terminating"
2997 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00002998 self._write_ns_status(
2999 nsr_id=nsr_id,
3000 ns_state="TERMINATING",
3001 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00003002 current_operation_id=nslcmop_id,
3003 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00003004 )
quilesj3655ae02019-12-12 16:08:35 +00003005 self._write_op_status(
3006 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00003007 queuePosition=0,
3008 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00003009 )
tiernoe876f672020-02-13 14:34:48 +00003010 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02003011 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3012 return
tierno59d22d22018-09-25 18:10:19 +02003013
tiernoe876f672020-02-13 14:34:48 +00003014 stage[1] = "Getting vnf descriptors from db."
3015 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3016 db_vnfds_from_id = {}
3017 db_vnfds_from_member_index = {}
3018 # Loop over VNFRs
3019 for vnfr in db_vnfrs_list:
3020 vnfd_id = vnfr["vnfd-id"]
3021 if vnfd_id not in db_vnfds_from_id:
3022 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3023 db_vnfds_from_id[vnfd_id] = vnfd
3024 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01003025
tiernoe876f672020-02-13 14:34:48 +00003026 # Destroy individual execution environments when there are terminating primitives.
3027 # Rest of EE will be deleted at once
3028 if not operation_params.get("skip_terminate_primitives"):
3029 stage[0] = "Stage 2/3 execute terminating primitives."
3030 stage[1] = "Looking execution environment that needs terminate."
3031 self.logger.debug(logging_text + stage[1])
3032 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
3033 config_descriptor = None
3034 if not vca or not vca.get("ee_id") or not vca.get("needed_terminate"):
3035 continue
3036 if not vca.get("member-vnf-index"):
3037 # ns
3038 config_descriptor = db_nsr.get("ns-configuration")
3039 elif vca.get("vdu_id"):
3040 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3041 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3042 if vdud:
3043 config_descriptor = vdud.get("vdu-configuration")
3044 elif vca.get("kdu_name"):
3045 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3046 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3047 if kdud:
3048 config_descriptor = kdud.get("kdu-configuration")
3049 else:
3050 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
3051 task = asyncio.ensure_future(self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor,
3052 vca_index, False))
3053 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02003054
tiernoe876f672020-02-13 14:34:48 +00003055 # wait for pending tasks of terminate primitives
3056 if tasks_dict_info:
3057 self.logger.debug(logging_text + 'Waiting for terminate primitive pending tasks...')
3058 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3059 min(self.timeout_charm_delete, timeout_ns_terminate),
3060 stage, nslcmop_id)
3061 if error_list:
3062 return # raise LcmException("; ".join(error_list))
3063 tasks_dict_info.clear()
tierno82974b22018-11-27 21:55:36 +00003064
tiernoe876f672020-02-13 14:34:48 +00003065 # remove All execution environments at once
3066 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00003067
tierno49676be2020-04-07 16:34:35 +00003068 if nsr_deployed.get("VCA"):
3069 stage[1] = "Deleting all execution environments."
3070 self.logger.debug(logging_text + stage[1])
3071 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3072 timeout=self.timeout_charm_delete))
3073 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3074 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02003075
tiernoe876f672020-02-13 14:34:48 +00003076 # Delete from k8scluster
3077 stage[1] = "Deleting KDUs."
3078 self.logger.debug(logging_text + stage[1])
3079 # print(nsr_deployed)
3080 for kdu in get_iterable(nsr_deployed, "K8s"):
3081 if not kdu or not kdu.get("kdu-instance"):
3082 continue
3083 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00003084 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00003085 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00003086 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3087 cluster_uuid=kdu.get("k8scluster-uuid"),
3088 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00003089 else:
3090 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3091 format(kdu.get("k8scluster-type")))
3092 continue
3093 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02003094
3095 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00003096 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00003097 if self.ng_ro:
3098 task_delete_ro = asyncio.ensure_future(
3099 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3100 else:
3101 task_delete_ro = asyncio.ensure_future(
3102 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
tiernoe876f672020-02-13 14:34:48 +00003103 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02003104
tiernoe876f672020-02-13 14:34:48 +00003105 # rest of staff will be done at finally
3106
3107 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3108 self.logger.error(logging_text + "Exit Exception {}".format(e))
3109 exc = e
3110 except asyncio.CancelledError:
3111 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3112 exc = "Operation was cancelled"
3113 except Exception as e:
3114 exc = traceback.format_exc()
3115 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3116 finally:
3117 if exc:
3118 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02003119 try:
tiernoe876f672020-02-13 14:34:48 +00003120 # wait for pending tasks
3121 if tasks_dict_info:
3122 stage[1] = "Waiting for terminate pending tasks."
3123 self.logger.debug(logging_text + stage[1])
3124 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3125 stage, nslcmop_id)
3126 stage[1] = stage[2] = ""
3127 except asyncio.CancelledError:
3128 error_list.append("Cancelled")
3129 # TODO cancell all tasks
3130 except Exception as exc:
3131 error_list.append(str(exc))
3132 # update status at database
3133 if error_list:
3134 error_detail = "; ".join(error_list)
3135 # self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00003136 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
3137 error_description_nsr = 'Operation: TERMINATING.{}, Stage {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02003138
tierno59d22d22018-09-25 18:10:19 +02003139 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00003140 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00003141 db_nslcmop_update["detailed-status"] = error_detail
3142 nslcmop_operation_state = "FAILED"
3143 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02003144 else:
tiernoa2143262020-03-27 16:20:40 +00003145 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00003146 error_description_nsr = error_description_nslcmop = None
3147 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02003148 db_nsr_update["operational-status"] = "terminated"
3149 db_nsr_update["detailed-status"] = "Done"
3150 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3151 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00003152 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02003153
tiernoe876f672020-02-13 14:34:48 +00003154 if db_nsr:
3155 self._write_ns_status(
3156 nsr_id=nsr_id,
3157 ns_state=ns_state,
3158 current_operation="IDLE",
3159 current_operation_id=None,
3160 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00003161 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00003162 other_update=db_nsr_update
3163 )
tiernoa17d4f42020-04-28 09:59:23 +00003164 self._write_op_status(
3165 op_id=nslcmop_id,
3166 stage="",
3167 error_message=error_description_nslcmop,
3168 operation_state=nslcmop_operation_state,
3169 other_update=db_nslcmop_update,
3170 )
3171 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00003172 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02003173 if nslcmop_operation_state:
3174 try:
3175 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00003176 "operationState": nslcmop_operation_state,
3177 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00003178 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003179 except Exception as e:
3180 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02003181
tierno59d22d22018-09-25 18:10:19 +02003182 self.logger.debug(logging_text + "Exit")
3183 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3184
tiernoe876f672020-02-13 14:34:48 +00003185 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3186 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00003187 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00003188 error_list = []
3189 pending_tasks = list(created_tasks_info.keys())
3190 num_tasks = len(pending_tasks)
3191 num_done = 0
3192 stage[1] = "{}/{}.".format(num_done, num_tasks)
3193 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00003194 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003195 new_error = None
tiernoe876f672020-02-13 14:34:48 +00003196 _timeout = timeout + time_start - time()
3197 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3198 return_when=asyncio.FIRST_COMPLETED)
3199 num_done += len(done)
3200 if not done: # Timeout
3201 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003202 new_error = created_tasks_info[task] + ": Timeout"
3203 error_detail_list.append(new_error)
3204 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00003205 break
3206 for task in done:
3207 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00003208 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00003209 else:
3210 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00003211 if exc:
3212 if isinstance(exc, asyncio.TimeoutError):
3213 exc = "Timeout"
3214 new_error = created_tasks_info[task] + ": {}".format(exc)
3215 error_list.append(created_tasks_info[task])
3216 error_detail_list.append(new_error)
tierno28c63da2020-04-20 16:28:56 +00003217 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3218 K8sException)):
tierno067e04a2020-03-31 12:53:13 +00003219 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00003220 else:
tierno067e04a2020-03-31 12:53:13 +00003221 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3222 self.logger.error(logging_text + created_tasks_info[task] + exc_traceback)
3223 else:
3224 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00003225 stage[1] = "{}/{}.".format(num_done, num_tasks)
3226 if new_error:
tiernoa2143262020-03-27 16:20:40 +00003227 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00003228 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00003229 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3230 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00003231 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00003232 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00003233
tiernoda964822019-01-14 15:53:47 +00003234 @staticmethod
3235 def _map_primitive_params(primitive_desc, params, instantiation_params):
3236 """
3237 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3238 The default-value is used. If it is between < > it look for a value at instantiation_params
3239 :param primitive_desc: portion of VNFD/NSD that describes primitive
3240 :param params: Params provided by user
3241 :param instantiation_params: Instantiation params provided by user
3242 :return: a dictionary with the calculated params
3243 """
3244 calculated_params = {}
3245 for parameter in primitive_desc.get("parameter", ()):
3246 param_name = parameter["name"]
3247 if param_name in params:
3248 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00003249 elif "default-value" in parameter or "value" in parameter:
3250 if "value" in parameter:
3251 calculated_params[param_name] = parameter["value"]
3252 else:
3253 calculated_params[param_name] = parameter["default-value"]
3254 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3255 and calculated_params[param_name].endswith(">"):
3256 if calculated_params[param_name][1:-1] in instantiation_params:
3257 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00003258 else:
3259 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00003260 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00003261 else:
3262 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3263 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02003264
tiernoda964822019-01-14 15:53:47 +00003265 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3266 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3267 width=256)
3268 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3269 calculated_params[param_name] = calculated_params[param_name][7:]
tiernoc3f2a822019-11-05 13:45:04 +00003270
3271 # add always ns_config_info if primitive name is config
3272 if primitive_desc["name"] == "config":
3273 if "ns_config_info" in instantiation_params:
3274 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003275 return calculated_params
3276
tierno067e04a2020-03-31 12:53:13 +00003277 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None):
tiernoe876f672020-02-13 14:34:48 +00003278 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3279 for vca in deployed_vca:
3280 if not vca:
3281 continue
3282 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3283 continue
tiernoe876f672020-02-13 14:34:48 +00003284 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3285 continue
3286 if kdu_name and kdu_name != vca["kdu_name"]:
3287 continue
3288 break
3289 else:
3290 # vca_deployed not found
tierno067e04a2020-03-31 12:53:13 +00003291 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} is not "
3292 "deployed".format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003293
tiernoe876f672020-02-13 14:34:48 +00003294 # get ee_id
3295 ee_id = vca.get("ee_id")
3296 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003297 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003298 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003299 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tiernoe876f672020-02-13 14:34:48 +00003300 return ee_id
3301
3302 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno067e04a2020-03-31 12:53:13 +00003303 retries_interval=30, timeout=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003304 try:
tierno98ad6ea2019-05-30 17:16:28 +00003305 if primitive == "config":
3306 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003307
quilesj7e13aeb2019-10-08 13:34:55 +02003308 while retries >= 0:
3309 try:
tierno067e04a2020-03-31 12:53:13 +00003310 output = await asyncio.wait_for(
3311 self.n2vc.exec_primitive(
3312 ee_id=ee_id,
3313 primitive_name=primitive,
3314 params_dict=primitive_params,
3315 progress_timeout=self.timeout_progress_primitive,
3316 total_timeout=self.timeout_primitive),
3317 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003318 # execution was OK
3319 break
tierno067e04a2020-03-31 12:53:13 +00003320 except asyncio.CancelledError:
3321 raise
3322 except Exception as e: # asyncio.TimeoutError
3323 if isinstance(e, asyncio.TimeoutError):
3324 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003325 retries -= 1
3326 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003327 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003328 # wait and retry
3329 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003330 else:
tierno067e04a2020-03-31 12:53:13 +00003331 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003332
tiernoe876f672020-02-13 14:34:48 +00003333 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003334
tierno067e04a2020-03-31 12:53:13 +00003335 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003336 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003337 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003338 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003339
3340 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003341
3342 # Try to lock HA task here
3343 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3344 if not task_is_locked_by_me:
3345 return
3346
tierno59d22d22018-09-25 18:10:19 +02003347 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3348 self.logger.debug(logging_text + "Enter")
3349 # get all needed from database
3350 db_nsr = None
3351 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003352 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003353 db_nslcmop_update = {}
3354 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003355 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003356 exc = None
3357 try:
kuused124bfe2019-06-18 12:09:24 +02003358 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003359 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003360 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3361
quilesj4cda56b2019-12-05 10:02:20 +00003362 self._write_ns_status(
3363 nsr_id=nsr_id,
3364 ns_state=None,
3365 current_operation="RUNNING ACTION",
3366 current_operation_id=nslcmop_id
3367 )
3368
tierno59d22d22018-09-25 18:10:19 +02003369 step = "Getting information from database"
3370 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3371 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003372
tiernoe4f7e6c2018-11-27 14:55:30 +00003373 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003374 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003375 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003376 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003377 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003378 primitive = db_nslcmop["operationParams"]["primitive"]
3379 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3380 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003381
tierno1b633412019-02-25 16:48:23 +00003382 if vnf_index:
3383 step = "Getting vnfr from database"
3384 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3385 step = "Getting vnfd from database"
3386 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3387 else:
tierno067e04a2020-03-31 12:53:13 +00003388 step = "Getting nsd from database"
3389 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003390
tierno82974b22018-11-27 21:55:36 +00003391 # for backward compatibility
3392 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3393 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3394 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3395 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3396
tiernoda964822019-01-14 15:53:47 +00003397 # look for primitive
3398 config_primitive_desc = None
3399 if vdu_id:
3400 for vdu in get_iterable(db_vnfd, "vdu"):
3401 if vdu_id == vdu["id"]:
tierno067e04a2020-03-31 12:53:13 +00003402 for config_primitive in deep_get(vdu, ("vdu-configuration", "config-primitive"), ()):
tiernoda964822019-01-14 15:53:47 +00003403 if config_primitive["name"] == primitive:
3404 config_primitive_desc = config_primitive
3405 break
tierno067e04a2020-03-31 12:53:13 +00003406 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003407 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003408 for kdu in get_iterable(db_vnfd, "kdu"):
3409 if kdu_name == kdu["name"]:
3410 for config_primitive in deep_get(kdu, ("kdu-configuration", "config-primitive"), ()):
3411 if config_primitive["name"] == primitive:
3412 config_primitive_desc = config_primitive
3413 break
3414 break
tierno1b633412019-02-25 16:48:23 +00003415 elif vnf_index:
tierno067e04a2020-03-31 12:53:13 +00003416 for config_primitive in deep_get(db_vnfd, ("vnf-configuration", "config-primitive"), ()):
tierno1b633412019-02-25 16:48:23 +00003417 if config_primitive["name"] == primitive:
3418 config_primitive_desc = config_primitive
3419 break
3420 else:
tierno067e04a2020-03-31 12:53:13 +00003421 for config_primitive in deep_get(db_nsd, ("ns-configuration", "config-primitive"), ()):
tierno1b633412019-02-25 16:48:23 +00003422 if config_primitive["name"] == primitive:
3423 config_primitive_desc = config_primitive
3424 break
tiernoda964822019-01-14 15:53:47 +00003425
tierno067e04a2020-03-31 12:53:13 +00003426 if not config_primitive_desc and not (kdu_name and primitive in ("upgrade", "rollback", "status")):
tierno1b633412019-02-25 16:48:23 +00003427 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3428 format(primitive))
3429
tierno1b633412019-02-25 16:48:23 +00003430 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003431 if vdu_id:
3432 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003433 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3434 elif kdu_name:
3435 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3436 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3437 else:
3438 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003439 else:
tierno067e04a2020-03-31 12:53:13 +00003440 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003441
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003442 if kdu_name:
3443 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3444
tiernoda964822019-01-14 15:53:47 +00003445 # TODO check if ns is in a proper status
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003446 if kdu_name and (primitive in ("upgrade", "rollback", "status") or kdu_action):
tierno067e04a2020-03-31 12:53:13 +00003447 # kdur and desc_params already set from before
3448 if primitive_params:
3449 desc_params.update(primitive_params)
3450 # TODO Check if we will need something at vnf level
3451 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3452 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3453 break
3454 else:
3455 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003456
tierno067e04a2020-03-31 12:53:13 +00003457 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3458 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3459 raise LcmException(msg)
3460
3461 db_dict = {"collection": "nsrs",
3462 "filter": {"_id": nsr_id},
3463 "path": "_admin.deployed.K8s.{}".format(index)}
3464 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive, vnf_index, kdu_name))
3465 step = "Executing kdu {}".format(primitive)
3466 if primitive == "upgrade":
3467 if desc_params.get("kdu_model"):
3468 kdu_model = desc_params.get("kdu_model")
3469 del desc_params["kdu_model"]
3470 else:
3471 kdu_model = kdu.get("kdu-model")
3472 parts = kdu_model.split(sep=":")
3473 if len(parts) == 2:
3474 kdu_model = parts[0]
3475
3476 detailed_status = await asyncio.wait_for(
3477 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3478 cluster_uuid=kdu.get("k8scluster-uuid"),
3479 kdu_instance=kdu.get("kdu-instance"),
3480 atomic=True, kdu_model=kdu_model,
3481 params=desc_params, db_dict=db_dict,
3482 timeout=timeout_ns_action),
3483 timeout=timeout_ns_action + 10)
3484 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
3485 elif primitive == "rollback":
3486 detailed_status = await asyncio.wait_for(
3487 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3488 cluster_uuid=kdu.get("k8scluster-uuid"),
3489 kdu_instance=kdu.get("kdu-instance"),
3490 db_dict=db_dict),
3491 timeout=timeout_ns_action)
3492 elif primitive == "status":
3493 detailed_status = await asyncio.wait_for(
3494 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3495 cluster_uuid=kdu.get("k8scluster-uuid"),
3496 kdu_instance=kdu.get("kdu-instance")),
3497 timeout=timeout_ns_action)
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003498 else:
3499 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3500 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3501
3502 detailed_status = await asyncio.wait_for(
3503 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3504 cluster_uuid=kdu.get("k8scluster-uuid"),
3505 kdu_instance=kdu_instance,
3506 primitive_name=primitive,
3507 params=params, db_dict=db_dict,
3508 timeout=timeout_ns_action),
3509 timeout=timeout_ns_action)
tierno067e04a2020-03-31 12:53:13 +00003510
3511 if detailed_status:
3512 nslcmop_operation_state = 'COMPLETED'
3513 else:
3514 detailed_status = ''
3515 nslcmop_operation_state = 'FAILED'
tierno067e04a2020-03-31 12:53:13 +00003516 else:
3517 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
3518 self._look_for_deployed_vca(nsr_deployed["VCA"],
3519 member_vnf_index=vnf_index,
3520 vdu_id=vdu_id,
3521 vdu_count_index=vdu_count_index),
3522 primitive=primitive,
3523 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
3524 timeout=timeout_ns_action)
3525
3526 db_nslcmop_update["detailed-status"] = detailed_status
3527 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3528 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3529 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02003530 return # database update is called inside finally
3531
tiernof59ad6c2020-04-08 12:50:52 +00003532 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02003533 self.logger.error(logging_text + "Exit Exception {}".format(e))
3534 exc = e
3535 except asyncio.CancelledError:
3536 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3537 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00003538 except asyncio.TimeoutError:
3539 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3540 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02003541 except Exception as e:
3542 exc = traceback.format_exc()
3543 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3544 finally:
tierno067e04a2020-03-31 12:53:13 +00003545 if exc:
3546 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02003547 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00003548 nslcmop_operation_state = "FAILED"
3549 if db_nsr:
3550 self._write_ns_status(
3551 nsr_id=nsr_id,
3552 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3553 current_operation="IDLE",
3554 current_operation_id=None,
3555 # error_description=error_description_nsr,
3556 # error_detail=error_detail,
3557 other_update=db_nsr_update
3558 )
3559
tiernoa17d4f42020-04-28 09:59:23 +00003560 self._write_op_status(
3561 op_id=nslcmop_id,
3562 stage="",
3563 error_message=error_description_nslcmop,
3564 operation_state=nslcmop_operation_state,
3565 other_update=db_nslcmop_update,
3566 )
tierno067e04a2020-03-31 12:53:13 +00003567
tierno59d22d22018-09-25 18:10:19 +02003568 if nslcmop_operation_state:
3569 try:
3570 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003571 "operationState": nslcmop_operation_state},
3572 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003573 except Exception as e:
3574 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3575 self.logger.debug(logging_text + "Exit")
3576 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00003577 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02003578
3579 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003580
3581 # Try to lock HA task here
3582 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3583 if not task_is_locked_by_me:
3584 return
3585
tierno59d22d22018-09-25 18:10:19 +02003586 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3587 self.logger.debug(logging_text + "Enter")
3588 # get all needed from database
3589 db_nsr = None
3590 db_nslcmop = None
3591 db_nslcmop_update = {}
3592 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00003593 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003594 exc = None
tierno9ab95942018-10-10 16:44:22 +02003595 # in case of error, indicates what part of scale was failed to put nsr at error status
3596 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02003597 old_operational_status = ""
3598 old_config_status = ""
tiernof578e552018-11-08 19:07:20 +01003599 vnfr_scaled = False
tierno59d22d22018-09-25 18:10:19 +02003600 try:
kuused124bfe2019-06-18 12:09:24 +02003601 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003602 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003603 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02003604
quilesj4cda56b2019-12-05 10:02:20 +00003605 self._write_ns_status(
3606 nsr_id=nsr_id,
3607 ns_state=None,
3608 current_operation="SCALING",
3609 current_operation_id=nslcmop_id
3610 )
3611
ikalyvas02d9e7b2019-05-27 18:16:01 +03003612 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03003613 self.logger.debug(step + " after having waited for previous tasks to be completed")
3614 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3615 step = "Getting nsr from database"
3616 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3617
3618 old_operational_status = db_nsr["operational-status"]
3619 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02003620 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00003621 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02003622 db_nsr_update["operational-status"] = "scaling"
3623 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00003624 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003625
3626 #######
3627 nsr_deployed = db_nsr["_admin"].get("deployed")
3628 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00003629 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3630 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3631 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003632 #######
3633
tiernoe4f7e6c2018-11-27 14:55:30 +00003634 RO_nsr_id = nsr_deployed["RO"]["nsr_id"]
tierno59d22d22018-09-25 18:10:19 +02003635 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3636 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3637 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3638 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
3639
tierno82974b22018-11-27 21:55:36 +00003640 # for backward compatibility
3641 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3642 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3643 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3644 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3645
tierno59d22d22018-09-25 18:10:19 +02003646 step = "Getting vnfr from database"
3647 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3648 step = "Getting vnfd from database"
3649 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03003650
tierno59d22d22018-09-25 18:10:19 +02003651 step = "Getting scaling-group-descriptor"
3652 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
3653 if scaling_descriptor["name"] == scaling_group:
3654 break
3655 else:
3656 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3657 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003658
tierno59d22d22018-09-25 18:10:19 +02003659 # cooldown_time = 0
3660 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
3661 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
3662 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
3663 # break
3664
3665 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00003666 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02003667 nb_scale_op = 0
3668 if not db_nsr["_admin"].get("scaling-group"):
3669 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3670 admin_scale_index = 0
3671 else:
3672 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3673 if admin_scale_info["name"] == scaling_group:
3674 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3675 break
tierno9ab95942018-10-10 16:44:22 +02003676 else: # not found, set index one plus last element and add new entry with the name
3677 admin_scale_index += 1
3678 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02003679 RO_scaling_info = []
3680 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3681 if scaling_type == "SCALE_OUT":
3682 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02003683 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
3684 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
3685 if nb_scale_op >= max_instance_count:
3686 raise LcmException("reached the limit of {} (max-instance-count) "
3687 "scaling-out operations for the "
3688 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02003689
ikalyvas02d9e7b2019-05-27 18:16:01 +03003690 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02003691 vdu_scaling_info["scaling_direction"] = "OUT"
3692 vdu_scaling_info["vdu-create"] = {}
3693 for vdu_scale_info in scaling_descriptor["vdu"]:
3694 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3695 "type": "create", "count": vdu_scale_info.get("count", 1)})
3696 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003697
tierno59d22d22018-09-25 18:10:19 +02003698 elif scaling_type == "SCALE_IN":
3699 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02003700 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02003701 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3702 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00003703 if nb_scale_op <= min_instance_count:
3704 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
3705 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003706 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02003707 vdu_scaling_info["scaling_direction"] = "IN"
3708 vdu_scaling_info["vdu-delete"] = {}
3709 for vdu_scale_info in scaling_descriptor["vdu"]:
3710 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3711 "type": "delete", "count": vdu_scale_info.get("count", 1)})
3712 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
3713
3714 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02003715 vdu_create = vdu_scaling_info.get("vdu-create")
3716 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02003717 if vdu_scaling_info["scaling_direction"] == "IN":
3718 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02003719 if vdu_delete.get(vdur["vdu-id-ref"]):
3720 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02003721 vdu_scaling_info["vdu"].append({
3722 "name": vdur["name"],
3723 "vdu_id": vdur["vdu-id-ref"],
3724 "interface": []
3725 })
3726 for interface in vdur["interfaces"]:
3727 vdu_scaling_info["vdu"][-1]["interface"].append({
3728 "name": interface["name"],
3729 "ip_address": interface["ip-address"],
3730 "mac_address": interface.get("mac-address"),
3731 })
tierno27246d82018-09-27 15:59:09 +02003732 vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02003733
kuuseac3a8882019-10-03 10:48:06 +02003734 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003735 step = "Executing pre-scale vnf-config-primitive"
3736 if scaling_descriptor.get("scaling-config-action"):
3737 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003738 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
3739 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003740 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3741 step = db_nslcmop_update["detailed-status"] = \
3742 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003743
tierno59d22d22018-09-25 18:10:19 +02003744 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003745 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3746 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003747 break
3748 else:
3749 raise LcmException(
3750 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00003751 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tierno59d22d22018-09-25 18:10:19 +02003752 "primitive".format(scaling_group, config_primitive))
tiernoda964822019-01-14 15:53:47 +00003753
tierno16fedf52019-05-24 08:38:26 +00003754 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003755 if db_vnfr.get("additionalParamsForVnf"):
3756 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02003757
tierno9ab95942018-10-10 16:44:22 +02003758 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003759 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003760 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3761
tierno7c4e24c2020-05-13 08:41:35 +00003762 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003763 op_index = self._check_or_add_scale_suboperation(
3764 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
tierno7c4e24c2020-05-13 08:41:35 +00003765 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003766 # Skip sub-operation
3767 result = 'COMPLETED'
3768 result_detail = 'Done'
3769 self.logger.debug(logging_text +
3770 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
3771 vnf_config_primitive, result, result_detail))
3772 else:
tierno7c4e24c2020-05-13 08:41:35 +00003773 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003774 # New sub-operation: Get index of this sub-operation
3775 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3776 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3777 format(vnf_config_primitive))
3778 else:
tierno7c4e24c2020-05-13 08:41:35 +00003779 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003780 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3781 vnf_index = op.get('member_vnf_index')
3782 vnf_config_primitive = op.get('primitive')
3783 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00003784 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02003785 format(vnf_config_primitive))
tierno7c4e24c2020-05-13 08:41:35 +00003786 # Execute the primitive, either with new (first-time) or registered (retry) args
kuuseac3a8882019-10-03 10:48:06 +02003787 result, result_detail = await self._ns_execute_primitive(
tiernoe876f672020-02-13 14:34:48 +00003788 self._look_for_deployed_vca(nsr_deployed["VCA"],
3789 member_vnf_index=vnf_index,
3790 vdu_id=None,
tiernoe876f672020-02-13 14:34:48 +00003791 vdu_count_index=None),
3792 vnf_config_primitive, primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02003793 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3794 vnf_config_primitive, result, result_detail))
3795 # Update operationState = COMPLETED | FAILED
3796 self._update_suboperation_status(
3797 db_nslcmop, op_index, result, result_detail)
3798
tierno59d22d22018-09-25 18:10:19 +02003799 if result == "FAILED":
3800 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02003801 db_nsr_update["config-status"] = old_config_status
3802 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02003803 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02003804
kuuseac3a8882019-10-03 10:48:06 +02003805 # SCALE RO - BEGIN
3806 # Should this block be skipped if 'RO_nsr_id' == None ?
3807 # if (RO_nsr_id and RO_scaling_info):
tierno59d22d22018-09-25 18:10:19 +02003808 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02003809 scale_process = "RO"
tierno7c4e24c2020-05-13 08:41:35 +00003810 # Scale RO retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003811 op_index = self._check_or_add_scale_suboperation(
3812 db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
tierno7c4e24c2020-05-13 08:41:35 +00003813 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003814 # Skip sub-operation
3815 result = 'COMPLETED'
3816 result_detail = 'Done'
3817 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
3818 result, result_detail))
3819 else:
tierno7c4e24c2020-05-13 08:41:35 +00003820 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003821 # New sub-operation: Get index of this sub-operation
3822 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3823 self.logger.debug(logging_text + "New sub-operation RO")
tierno59d22d22018-09-25 18:10:19 +02003824 else:
tierno7c4e24c2020-05-13 08:41:35 +00003825 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003826 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3827 RO_nsr_id = op.get('RO_nsr_id')
3828 RO_scaling_info = op.get('RO_scaling_info')
tierno7c4e24c2020-05-13 08:41:35 +00003829 self.logger.debug(logging_text + "Sub-operation RO retry for primitive {}".format(
kuuseac3a8882019-10-03 10:48:06 +02003830 vnf_config_primitive))
3831
3832 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
3833 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
3834 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
3835 # wait until ready
3836 RO_nslcmop_id = RO_desc["instance_action_id"]
3837 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
3838
3839 RO_task_done = False
3840 step = detailed_status = "Waiting RO_task_id={} to complete the scale action.".format(RO_nslcmop_id)
3841 detailed_status_old = None
3842 self.logger.debug(logging_text + step)
3843
3844 deployment_timeout = 1 * 3600 # One hour
3845 while deployment_timeout > 0:
3846 if not RO_task_done:
3847 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
3848 extra_item_id=RO_nslcmop_id)
quilesj3655ae02019-12-12 16:08:35 +00003849
3850 # deploymentStatus
3851 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3852
kuuseac3a8882019-10-03 10:48:06 +02003853 ns_status, ns_status_info = self.RO.check_action_status(desc)
3854 if ns_status == "ERROR":
3855 raise ROclient.ROClientException(ns_status_info)
3856 elif ns_status == "BUILD":
3857 detailed_status = step + "; {}".format(ns_status_info)
3858 elif ns_status == "ACTIVE":
3859 RO_task_done = True
3860 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
3861 self.logger.debug(logging_text + step)
3862 else:
3863 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
tierno59d22d22018-09-25 18:10:19 +02003864 else:
quilesj7e13aeb2019-10-08 13:34:55 +02003865
kuuseac3a8882019-10-03 10:48:06 +02003866 if ns_status == "ERROR":
3867 raise ROclient.ROClientException(ns_status_info)
3868 elif ns_status == "BUILD":
3869 detailed_status = step + "; {}".format(ns_status_info)
3870 elif ns_status == "ACTIVE":
3871 step = detailed_status = \
3872 "Waiting for management IP address reported by the VIM. Updating VNFRs"
3873 if not vnfr_scaled:
3874 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
3875 vnfr_scaled = True
3876 try:
3877 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00003878
3879 # deploymentStatus
3880 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3881
kuuseac3a8882019-10-03 10:48:06 +02003882 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
3883 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
3884 break
3885 except LcmExceptionNoMgmtIP:
3886 pass
3887 else:
3888 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
3889 if detailed_status != detailed_status_old:
3890 self._update_suboperation_status(
3891 db_nslcmop, op_index, 'COMPLETED', detailed_status)
3892 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
3893 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
tierno59d22d22018-09-25 18:10:19 +02003894
kuuseac3a8882019-10-03 10:48:06 +02003895 await asyncio.sleep(5, loop=self.loop)
3896 deployment_timeout -= 5
3897 if deployment_timeout <= 0:
3898 self._update_suboperation_status(
3899 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
3900 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tierno59d22d22018-09-25 18:10:19 +02003901
kuuseac3a8882019-10-03 10:48:06 +02003902 # update VDU_SCALING_INFO with the obtained ip_addresses
3903 if vdu_scaling_info["scaling_direction"] == "OUT":
3904 for vdur in reversed(db_vnfr["vdur"]):
3905 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
3906 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
3907 vdu_scaling_info["vdu"].append({
3908 "name": vdur["name"],
3909 "vdu_id": vdur["vdu-id-ref"],
3910 "interface": []
tierno59d22d22018-09-25 18:10:19 +02003911 })
kuuseac3a8882019-10-03 10:48:06 +02003912 for interface in vdur["interfaces"]:
3913 vdu_scaling_info["vdu"][-1]["interface"].append({
3914 "name": interface["name"],
3915 "ip_address": interface["ip-address"],
3916 "mac_address": interface.get("mac-address"),
3917 })
3918 del vdu_scaling_info["vdu-create"]
3919
3920 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
3921 # SCALE RO - END
tierno59d22d22018-09-25 18:10:19 +02003922
tierno9ab95942018-10-10 16:44:22 +02003923 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02003924 if db_nsr_update:
3925 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3926
kuuseac3a8882019-10-03 10:48:06 +02003927 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003928 # execute primitive service POST-SCALING
3929 step = "Executing post-scale vnf-config-primitive"
3930 if scaling_descriptor.get("scaling-config-action"):
3931 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003932 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
3933 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003934 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3935 step = db_nslcmop_update["detailed-status"] = \
3936 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003937
tierno589befb2019-05-29 07:06:23 +00003938 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003939 if db_vnfr.get("additionalParamsForVnf"):
3940 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
3941
tierno59d22d22018-09-25 18:10:19 +02003942 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003943 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3944 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003945 break
3946 else:
3947 raise LcmException("Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:"
3948 "scaling-config-action[vnf-config-primitive-name-ref='{}'] does not "
tierno47e86b52018-10-10 14:05:55 +02003949 "match any vnf-configuration:config-primitive".format(scaling_group,
3950 config_primitive))
tierno9ab95942018-10-10 16:44:22 +02003951 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003952 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003953 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02003954
tierno7c4e24c2020-05-13 08:41:35 +00003955 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003956 op_index = self._check_or_add_scale_suboperation(
3957 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00003958 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003959 # Skip sub-operation
3960 result = 'COMPLETED'
3961 result_detail = 'Done'
3962 self.logger.debug(logging_text +
3963 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
3964 format(vnf_config_primitive, result, result_detail))
3965 else:
quilesj4cda56b2019-12-05 10:02:20 +00003966 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003967 # New sub-operation: Get index of this sub-operation
3968 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3969 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3970 format(vnf_config_primitive))
3971 else:
tierno7c4e24c2020-05-13 08:41:35 +00003972 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003973 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3974 vnf_index = op.get('member_vnf_index')
3975 vnf_config_primitive = op.get('primitive')
3976 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00003977 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02003978 format(vnf_config_primitive))
tierno7c4e24c2020-05-13 08:41:35 +00003979 # Execute the primitive, either with new (first-time) or registered (retry) args
kuuseac3a8882019-10-03 10:48:06 +02003980 result, result_detail = await self._ns_execute_primitive(
tiernoe876f672020-02-13 14:34:48 +00003981 self._look_for_deployed_vca(nsr_deployed["VCA"],
3982 member_vnf_index=vnf_index,
3983 vdu_id=None,
tiernoe876f672020-02-13 14:34:48 +00003984 vdu_count_index=None),
3985 vnf_config_primitive, primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02003986 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3987 vnf_config_primitive, result, result_detail))
3988 # Update operationState = COMPLETED | FAILED
3989 self._update_suboperation_status(
3990 db_nslcmop, op_index, result, result_detail)
3991
tierno59d22d22018-09-25 18:10:19 +02003992 if result == "FAILED":
3993 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02003994 db_nsr_update["config-status"] = old_config_status
3995 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02003996 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02003997
tiernod6de1992018-10-11 13:05:52 +02003998 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003999 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4000 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02004001 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02004002 return
4003 except (ROclient.ROClientException, DbException, LcmException) as e:
4004 self.logger.error(logging_text + "Exit Exception {}".format(e))
4005 exc = e
4006 except asyncio.CancelledError:
4007 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4008 exc = "Operation was cancelled"
4009 except Exception as e:
4010 exc = traceback.format_exc()
4011 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4012 finally:
quilesj3655ae02019-12-12 16:08:35 +00004013 self._write_ns_status(
4014 nsr_id=nsr_id,
4015 ns_state=None,
4016 current_operation="IDLE",
4017 current_operation_id=None
4018 )
tierno59d22d22018-09-25 18:10:19 +02004019 if exc:
tiernoa17d4f42020-04-28 09:59:23 +00004020 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4021 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02004022 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02004023 db_nsr_update["operational-status"] = old_operational_status
4024 db_nsr_update["config-status"] = old_config_status
4025 db_nsr_update["detailed-status"] = ""
4026 if scale_process:
4027 if "VCA" in scale_process:
4028 db_nsr_update["config-status"] = "failed"
4029 if "RO" in scale_process:
4030 db_nsr_update["operational-status"] = "failed"
4031 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4032 exc)
tiernoa17d4f42020-04-28 09:59:23 +00004033 else:
4034 error_description_nslcmop = None
4035 nslcmop_operation_state = "COMPLETED"
4036 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00004037
tiernoa17d4f42020-04-28 09:59:23 +00004038 self._write_op_status(
4039 op_id=nslcmop_id,
4040 stage="",
4041 error_message=error_description_nslcmop,
4042 operation_state=nslcmop_operation_state,
4043 other_update=db_nslcmop_update,
4044 )
4045 if db_nsr:
4046 self._write_ns_status(
4047 nsr_id=nsr_id,
4048 ns_state=None,
4049 current_operation="IDLE",
4050 current_operation_id=None,
4051 other_update=db_nsr_update
4052 )
4053
tierno59d22d22018-09-25 18:10:19 +02004054 if nslcmop_operation_state:
4055 try:
4056 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004057 "operationState": nslcmop_operation_state},
4058 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004059 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00004060 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004061 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4062 except Exception as e:
4063 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4064 self.logger.debug(logging_text + "Exit")
4065 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")