Code Coverage

Cobertura Coverage Report > RO-VIM-openstack.osm_rovim_openstack >

vimconn_openstack.py

Trend

File Coverage summary

NameClassesLinesConditionals
vimconn_openstack.py
100%
1/1
36%
685/1911
100%
0/0

Coverage Breakdown by Class

NameLinesConditionals
vimconn_openstack.py
36%
685/1911
N/A

Source

RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 #         http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 1 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 1 import copy
34 1 from http.client import HTTPException
35 1 import json
36 1 import logging
37 1 from pprint import pformat
38 1 import random
39 1 import re
40 1 import time
41 1 from typing import Dict, List, Optional, Tuple
42
43 1 from cinderclient import client as cClient
44 1 import cinderclient.exceptions as cExceptions
45 1 from glanceclient import client as glClient
46 1 import glanceclient.exc as gl1Exceptions
47 1 from keystoneauth1 import session
48 1 from keystoneauth1.identity import v2, v3
49 1 import keystoneclient.exceptions as ksExceptions
50 1 import keystoneclient.v2_0.client as ksClient_v2
51 1 import keystoneclient.v3.client as ksClient_v3
52 1 import netaddr
53 1 from neutronclient.common import exceptions as neExceptions
54 1 from neutronclient.neutron import client as neClient
55 1 from novaclient import client as nClient, exceptions as nvExceptions
56 1 from osm_ro_plugin import vimconn
57 1 from requests.exceptions import ConnectionError
58 1 import yaml
59
60 1 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
61 1 __date__ = "$22-sep-2017 23:59:59$"
62
63 1 """contain the openstack virtual machine status to openmano status"""
64 1 vmStatus2manoFormat = {
65     "ACTIVE": "ACTIVE",
66     "PAUSED": "PAUSED",
67     "SUSPENDED": "SUSPENDED",
68     "SHUTOFF": "INACTIVE",
69     "BUILD": "BUILD",
70     "ERROR": "ERROR",
71     "DELETED": "DELETED",
72 }
73 1 netStatus2manoFormat = {
74     "ACTIVE": "ACTIVE",
75     "PAUSED": "PAUSED",
76     "INACTIVE": "INACTIVE",
77     "BUILD": "BUILD",
78     "ERROR": "ERROR",
79     "DELETED": "DELETED",
80 }
81
82 1 supportedClassificationTypes = ["legacy_flow_classifier"]
83
84 # global var to have a timeout creating and deleting volumes
85 1 volume_timeout = 1800
86 1 server_timeout = 1800
87
88
89 1 def catch_any_exception(func):
90 1     def format_exception(*args, **kwargs):
91 1         try:
92 1             return func(*args, *kwargs)
93 1         except Exception as e:
94 1             vimconnector._format_exception(e)
95
96 1     return format_exception
97
98
99 1 class SafeDumper(yaml.SafeDumper):
100 1     def represent_data(self, data):
101         # Openstack APIs use custom subclasses of dict and YAML safe dumper
102         # is designed to not handle that (reference issue 142 of pyyaml)
103 0         if isinstance(data, dict) and data.__class__ != dict:
104             # A simple solution is to convert those items back to dicts
105 0             data = dict(data.items())
106
107 0         return super(SafeDumper, self).represent_data(data)
108
109
110 1 class vimconnector(vimconn.VimConnector):
111 1     def __init__(
112         self,
113         uuid,
114         name,
115         tenant_id,
116         tenant_name,
117         url,
118         url_admin=None,
119         user=None,
120         passwd=None,
121         log_level=None,
122         config={},
123         persistent_info={},
124     ):
125         """using common constructor parameters. In this case
126         'url' is the keystone authorization url,
127         'url_admin' is not use
128         """
129 1         api_version = config.get("APIversion")
130
131 1         if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
132 0             raise vimconn.VimConnException(
133                 "Invalid value '{}' for config:APIversion. "
134                 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
135             )
136
137 1         vim_type = config.get("vim_type")
138
139 1         if vim_type and vim_type not in ("vio", "VIO"):
140 0             raise vimconn.VimConnException(
141                 "Invalid value '{}' for config:vim_type."
142                 "Allowed values are 'vio' or 'VIO'".format(vim_type)
143             )
144
145 1         if config.get("dataplane_net_vlan_range") is not None:
146             # validate vlan ranges provided by user
147 0             self._validate_vlan_ranges(
148                 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
149             )
150
151 1         if config.get("multisegment_vlan_range") is not None:
152             # validate vlan ranges provided by user
153 0             self._validate_vlan_ranges(
154                 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
155             )
156
157 1         vimconn.VimConnector.__init__(
158             self,
159             uuid,
160             name,
161             tenant_id,
162             tenant_name,
163             url,
164             url_admin,
165             user,
166             passwd,
167             log_level,
168             config,
169         )
170
171 1         if self.config.get("insecure") and self.config.get("ca_cert"):
172 0             raise vimconn.VimConnException(
173                 "options insecure and ca_cert are mutually exclusive"
174             )
175
176 1         self.verify = True
177
178 1         if self.config.get("insecure"):
179 0             self.verify = False
180
181 1         if self.config.get("ca_cert"):
182 0             self.verify = self.config.get("ca_cert")
183
184 1         if not url:
185 0             raise TypeError("url param can not be NoneType")
186
187 1         self.persistent_info = persistent_info
188 1         self.availability_zone = persistent_info.get("availability_zone", None)
189 1         self.storage_availability_zone = None
190 1         self.vm_av_zone = None
191 1         self.session = persistent_info.get("session", {"reload_client": True})
192 1         self.my_tenant_id = self.session.get("my_tenant_id")
193 1         self.nova = self.session.get("nova")
194 1         self.neutron = self.session.get("neutron")
195 1         self.cinder = self.session.get("cinder")
196 1         self.glance = self.session.get("glance")
197         # self.glancev1 = self.session.get("glancev1")
198 1         self.keystone = self.session.get("keystone")
199 1         self.api_version3 = self.session.get("api_version3")
200 1         self.vim_type = self.config.get("vim_type")
201
202 1         if self.vim_type:
203 0             self.vim_type = self.vim_type.upper()
204
205 1         if self.config.get("use_internal_endpoint"):
206 0             self.endpoint_type = "internalURL"
207         else:
208 1             self.endpoint_type = None
209
210 1         logging.getLogger("urllib3").setLevel(logging.WARNING)
211 1         logging.getLogger("keystoneauth").setLevel(logging.WARNING)
212 1         logging.getLogger("novaclient").setLevel(logging.WARNING)
213 1         self.logger = logging.getLogger("ro.vim.openstack")
214
215         # allow security_groups to be a list or a single string
216 1         if isinstance(self.config.get("security_groups"), str):
217 0             self.config["security_groups"] = [self.config["security_groups"]]
218
219 1         self.security_groups_id = None
220
221         # ###### VIO Specific Changes #########
222 1         if self.vim_type == "VIO":
223 0             self.logger = logging.getLogger("ro.vim.vio")
224
225 1         if log_level:
226 0             self.logger.setLevel(getattr(logging, log_level))
227
228 1     def __getitem__(self, index):
229         """Get individuals parameters.
230         Throw KeyError"""
231 0         if index == "project_domain_id":
232 0             return self.config.get("project_domain_id")
233 0         elif index == "user_domain_id":
234 0             return self.config.get("user_domain_id")
235         else:
236 0             return vimconn.VimConnector.__getitem__(self, index)
237
238 1     def __setitem__(self, index, value):
239         """Set individuals parameters and it is marked as dirty so to force connection reload.
240         Throw KeyError"""
241 0         if index == "project_domain_id":
242 0             self.config["project_domain_id"] = value
243 0         elif index == "user_domain_id":
244 0             self.config["user_domain_id"] = value
245         else:
246 0             vimconn.VimConnector.__setitem__(self, index, value)
247
248 0         self.session["reload_client"] = True
249
250 1     def serialize(self, value):
251         """Serialization of python basic types.
252
253         In the case value is not serializable a message will be logged and a
254         simple representation of the data that cannot be converted back to
255         python is returned.
256         """
257 0         if isinstance(value, str):
258 0             return value
259
260 0         try:
261 0             return yaml.dump(
262                 value, Dumper=SafeDumper, default_flow_style=True, width=256
263             )
264 0         except yaml.representer.RepresenterError:
265 0             self.logger.debug(
266                 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
267                 pformat(value),
268                 exc_info=True,
269             )
270
271 0             return str(value)
272
273 1     def _reload_connection(self):
274         """Called before any operation, it check if credentials has changed
275         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
276         """
277         # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
278 0         if self.session["reload_client"]:
279 0             if self.config.get("APIversion"):
280 0                 self.api_version3 = (
281                     self.config["APIversion"] == "v3.3"
282                     or self.config["APIversion"] == "3"
283                 )
284             else:  # get from ending auth_url that end with v3 or with v2.0
285 0                 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
286                     "/v3/"
287                 )
288
289 0             self.session["api_version3"] = self.api_version3
290
291 0             if self.api_version3:
292 0                 if self.config.get("project_domain_id") or self.config.get(
293                     "project_domain_name"
294                 ):
295 0                     project_domain_id_default = None
296                 else:
297 0                     project_domain_id_default = "default"
298
299 0                 if self.config.get("user_domain_id") or self.config.get(
300                     "user_domain_name"
301                 ):
302 0                     user_domain_id_default = None
303                 else:
304 0                     user_domain_id_default = "default"
305 0                 auth = v3.Password(
306                     auth_url=self.url,
307                     username=self.user,
308                     password=self.passwd,
309                     project_name=self.tenant_name,
310                     project_id=self.tenant_id,
311                     project_domain_id=self.config.get(
312                         "project_domain_id", project_domain_id_default
313                     ),
314                     user_domain_id=self.config.get(
315                         "user_domain_id", user_domain_id_default
316                     ),
317                     project_domain_name=self.config.get("project_domain_name"),
318                     user_domain_name=self.config.get("user_domain_name"),
319                 )
320             else:
321 0                 auth = v2.Password(
322                     auth_url=self.url,
323                     username=self.user,
324                     password=self.passwd,
325                     tenant_name=self.tenant_name,
326                     tenant_id=self.tenant_id,
327                 )
328
329 0             sess = session.Session(auth=auth, verify=self.verify)
330             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
331             # Titanium cloud and StarlingX
332 0             region_name = self.config.get("region_name")
333
334 0             if self.api_version3:
335 0                 self.keystone = ksClient_v3.Client(
336                     session=sess,
337                     endpoint_type=self.endpoint_type,
338                     region_name=region_name,
339                 )
340             else:
341 0                 self.keystone = ksClient_v2.Client(
342                     session=sess, endpoint_type=self.endpoint_type
343                 )
344
345 0             self.session["keystone"] = self.keystone
346             # In order to enable microversion functionality an explicit microversion must be specified in "config".
347             # This implementation approach is due to the warning message in
348             # https://developer.openstack.org/api-guide/compute/microversions.html
349             # where it is stated that microversion backwards compatibility is not guaranteed and clients should
350             # always require an specific microversion.
351             # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
352 0             version = self.config.get("microversion")
353
354 0             if not version:
355 0                 version = "2.60"
356
357             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
358             # Titanium cloud and StarlingX
359 0             self.nova = self.session["nova"] = nClient.Client(
360                 str(version),
361                 session=sess,
362                 endpoint_type=self.endpoint_type,
363                 region_name=region_name,
364             )
365 0             self.neutron = self.session["neutron"] = neClient.Client(
366                 "2.0",
367                 session=sess,
368                 endpoint_type=self.endpoint_type,
369                 region_name=region_name,
370             )
371
372 0             if sess.get_all_version_data(service_type="volumev2"):
373 0                 self.cinder = self.session["cinder"] = cClient.Client(
374                     2,
375                     session=sess,
376                     endpoint_type=self.endpoint_type,
377                     region_name=region_name,
378                 )
379             else:
380 0                 self.cinder = self.session["cinder"] = cClient.Client(
381                     3,
382                     session=sess,
383                     endpoint_type=self.endpoint_type,
384                     region_name=region_name,
385                 )
386
387 0             try:
388 0                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
389 0             except Exception:
390 0                 self.logger.error("Cannot get project_id from session", exc_info=True)
391
392 0             if self.endpoint_type == "internalURL":
393 0                 glance_service_id = self.keystone.services.list(name="glance")[0].id
394 0                 glance_endpoint = self.keystone.endpoints.list(
395                     glance_service_id, interface="internal"
396                 )[0].url
397             else:
398 0                 glance_endpoint = None
399
400 0             self.glance = self.session["glance"] = glClient.Client(
401                 2, session=sess, endpoint=glance_endpoint
402             )
403             # using version 1 of glance client in new_image()
404             # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
405             #                                                            endpoint=glance_endpoint)
406 0             self.session["reload_client"] = False
407 0             self.persistent_info["session"] = self.session
408             # add availablity zone info inside  self.persistent_info
409 0             self._set_availablity_zones()
410 0             self.persistent_info["availability_zone"] = self.availability_zone
411             # force to get again security_groups_ids next time they are needed
412 0             self.security_groups_id = None
413
414 1     def __net_os2mano(self, net_list_dict):
415         """Transform the net openstack format to mano format
416         net_list_dict can be a list of dict or a single dict"""
417 0         if type(net_list_dict) is dict:
418 0             net_list_ = (net_list_dict,)
419 0         elif type(net_list_dict) is list:
420 0             net_list_ = net_list_dict
421         else:
422 0             raise TypeError("param net_list_dict must be a list or a dictionary")
423 0         for net in net_list_:
424 0             if net.get("provider:network_type") == "vlan":
425 0                 net["type"] = "data"
426             else:
427 0                 net["type"] = "bridge"
428
429 1     def __classification_os2mano(self, class_list_dict):
430         """Transform the openstack format (Flow Classifier) to mano format
431         (Classification) class_list_dict can be a list of dict or a single dict
432         """
433 0         if isinstance(class_list_dict, dict):
434 0             class_list_ = [class_list_dict]
435 0         elif isinstance(class_list_dict, list):
436 0             class_list_ = class_list_dict
437         else:
438 0             raise TypeError("param class_list_dict must be a list or a dictionary")
439 0         for classification in class_list_:
440 0             id = classification.pop("id")
441 0             name = classification.pop("name")
442 0             description = classification.pop("description")
443 0             project_id = classification.pop("project_id")
444 0             tenant_id = classification.pop("tenant_id")
445 0             original_classification = copy.deepcopy(classification)
446 0             classification.clear()
447 0             classification["ctype"] = "legacy_flow_classifier"
448 0             classification["definition"] = original_classification
449 0             classification["id"] = id
450 0             classification["name"] = name
451 0             classification["description"] = description
452 0             classification["project_id"] = project_id
453 0             classification["tenant_id"] = tenant_id
454
455 1     def __sfi_os2mano(self, sfi_list_dict):
456         """Transform the openstack format (Port Pair) to mano format (SFI)
457         sfi_list_dict can be a list of dict or a single dict
458         """
459 0         if isinstance(sfi_list_dict, dict):
460 0             sfi_list_ = [sfi_list_dict]
461 0         elif isinstance(sfi_list_dict, list):
462 0             sfi_list_ = sfi_list_dict
463         else:
464 0             raise TypeError("param sfi_list_dict must be a list or a dictionary")
465
466 0         for sfi in sfi_list_:
467 0             sfi["ingress_ports"] = []
468 0             sfi["egress_ports"] = []
469
470 0             if sfi.get("ingress"):
471 0                 sfi["ingress_ports"].append(sfi["ingress"])
472
473 0             if sfi.get("egress"):
474 0                 sfi["egress_ports"].append(sfi["egress"])
475
476 0             del sfi["ingress"]
477 0             del sfi["egress"]
478 0             params = sfi.get("service_function_parameters")
479 0             sfc_encap = False
480
481 0             if params:
482 0                 correlation = params.get("correlation")
483
484 0                 if correlation:
485 0                     sfc_encap = True
486
487 0             sfi["sfc_encap"] = sfc_encap
488 0             del sfi["service_function_parameters"]
489
490 1     def __sf_os2mano(self, sf_list_dict):
491         """Transform the openstack format (Port Pair Group) to mano format (SF)
492         sf_list_dict can be a list of dict or a single dict
493         """
494 0         if isinstance(sf_list_dict, dict):
495 0             sf_list_ = [sf_list_dict]
496 0         elif isinstance(sf_list_dict, list):
497 0             sf_list_ = sf_list_dict
498         else:
499 0             raise TypeError("param sf_list_dict must be a list or a dictionary")
500
501 0         for sf in sf_list_:
502 0             del sf["port_pair_group_parameters"]
503 0             sf["sfis"] = sf["port_pairs"]
504 0             del sf["port_pairs"]
505
506 1     def __sfp_os2mano(self, sfp_list_dict):
507         """Transform the openstack format (Port Chain) to mano format (SFP)
508         sfp_list_dict can be a list of dict or a single dict
509         """
510 0         if isinstance(sfp_list_dict, dict):
511 0             sfp_list_ = [sfp_list_dict]
512 0         elif isinstance(sfp_list_dict, list):
513 0             sfp_list_ = sfp_list_dict
514         else:
515 0             raise TypeError("param sfp_list_dict must be a list or a dictionary")
516
517 0         for sfp in sfp_list_:
518 0             params = sfp.pop("chain_parameters")
519 0             sfc_encap = False
520
521 0             if params:
522 0                 correlation = params.get("correlation")
523
524 0                 if correlation:
525 0                     sfc_encap = True
526
527 0             sfp["sfc_encap"] = sfc_encap
528 0             sfp["spi"] = sfp.pop("chain_id")
529 0             sfp["classifications"] = sfp.pop("flow_classifiers")
530 0             sfp["service_functions"] = sfp.pop("port_pair_groups")
531
532     # placeholder for now; read TODO note below
533 1     def _validate_classification(self, type, definition):
534         # only legacy_flow_classifier Type is supported at this point
535 0         return True
536         # TODO(igordcard): this method should be an abstract method of an
537         # abstract Classification class to be implemented by the specific
538         # Types. Also, abstract vimconnector should call the validation
539         # method before the implemented VIM connectors are called.
540
541 1     @staticmethod
542 1     def _format_exception(exception):
543         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
544 1         message_error = str(exception)
545 1         tip = ""
546
547 1         if isinstance(
548             exception,
549             (
550                 neExceptions.NetworkNotFoundClient,
551                 nvExceptions.NotFound,
552                 nvExceptions.ResourceNotFound,
553                 ksExceptions.NotFound,
554                 gl1Exceptions.HTTPNotFound,
555                 cExceptions.NotFound,
556             ),
557         ):
558 0             raise vimconn.VimConnNotFoundException(
559                 type(exception).__name__ + ": " + message_error
560             )
561 1         elif isinstance(
562             exception,
563             (
564                 HTTPException,
565                 gl1Exceptions.HTTPException,
566                 gl1Exceptions.CommunicationError,
567                 ConnectionError,
568                 ksExceptions.ConnectionError,
569                 neExceptions.ConnectionFailed,
570                 cExceptions.ConnectionError,
571             ),
572         ):
573 1             if type(exception).__name__ == "SSLError":
574 0                 tip = " (maybe option 'insecure' must be added to the VIM)"
575
576 1             raise vimconn.VimConnConnectionException(
577                 "Invalid URL or credentials{}: {}".format(tip, message_error)
578             )
579 1         elif isinstance(
580             exception,
581             (
582                 KeyError,
583                 nvExceptions.BadRequest,
584                 ksExceptions.BadRequest,
585                 gl1Exceptions.BadRequest,
586                 cExceptions.BadRequest,
587             ),
588         ):
589 0             if message_error == "OS-EXT-SRV-ATTR:host":
590 0                 tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
591 0                 raise vimconn.VimConnInsufficientCredentials(
592                     type(exception).__name__ + ": " + message_error + tip
593                 )
594 0             raise vimconn.VimConnException(
595                 type(exception).__name__ + ": " + message_error
596             )
597
598 1         elif isinstance(
599             exception,
600             (
601                 nvExceptions.ClientException,
602                 ksExceptions.ClientException,
603                 neExceptions.NeutronException,
604                 cExceptions.ClientException,
605             ),
606         ):
607 1             raise vimconn.VimConnUnexpectedResponse(
608                 type(exception).__name__ + ": " + message_error
609             )
610 1         elif isinstance(exception, nvExceptions.Conflict):
611 0             raise vimconn.VimConnConflictException(
612                 type(exception).__name__ + ": " + message_error
613             )
614 1         elif isinstance(exception, vimconn.VimConnException):
615 0             raise exception
616         else:  # ()
617 1             logger = logging.getLogger("ro.vim.openstack")
618 1             logger.error("General Exception " + message_error, exc_info=True)
619
620 1             raise vimconn.VimConnException(
621                 type(exception).__name__ + ": " + message_error
622             )
623
624 1     def _get_ids_from_name(self):
625         """
626          Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
627         :return: None
628         """
629         # get tenant_id if only tenant_name is supplied
630 0         self._reload_connection()
631
632 0         if not self.my_tenant_id:
633 0             raise vimconn.VimConnConnectionException(
634                 "Error getting tenant information from name={} id={}".format(
635                     self.tenant_name, self.tenant_id
636                 )
637             )
638
639 0         if self.config.get("security_groups") and not self.security_groups_id:
640             # convert from name to id
641 0             neutron_sg_list = self.neutron.list_security_groups(
642                 tenant_id=self.my_tenant_id
643             )["security_groups"]
644
645 0             self.security_groups_id = []
646 0             for sg in self.config.get("security_groups"):
647 0                 for neutron_sg in neutron_sg_list:
648 0                     if sg in (neutron_sg["id"], neutron_sg["name"]):
649 0                         self.security_groups_id.append(neutron_sg["id"])
650 0                         break
651                 else:
652 0                     self.security_groups_id = None
653
654 0                     raise vimconn.VimConnConnectionException(
655                         "Not found security group {} for this tenant".format(sg)
656                     )
657
658 1     def _find_nova_server(self, vm_id):
659         """
660         Returns the VM instance from Openstack and completes it with flavor ID
661         Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
662         """
663 0         try:
664 0             self._reload_connection()
665 0             server = self.nova.servers.find(id=vm_id)
666             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
667 0             server_dict = server.to_dict()
668 0             try:
669 0                 if server_dict["flavor"].get("original_name"):
670 0                     server_dict["flavor"]["id"] = self.nova.flavors.find(
671                         name=server_dict["flavor"]["original_name"]
672                     ).id
673 0             except nClient.exceptions.NotFound as e:
674 0                 self.logger.warning(str(e.message))
675 0             return server_dict
676 0         except (
677             ksExceptions.ClientException,
678             nvExceptions.ClientException,
679             nvExceptions.NotFound,
680             ConnectionError,
681         ) as e:
682 0             self._format_exception(e)
683
684 1     def check_vim_connectivity(self):
685         # just get network list to check connectivity and credentials
686 0         self.get_network_list(filter_dict={})
687
688 1     def get_tenant_list(self, filter_dict={}):
689         """Obtain tenants of VIM
690         filter_dict can contain the following keys:
691             name: filter by tenant name
692             id: filter by tenant uuid/id
693             <other VIM specific>
694         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
695         """
696 0         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
697 0         try:
698 0             self._reload_connection()
699
700 0             if self.api_version3:
701 0                 project_class_list = self.keystone.projects.list(
702                     name=filter_dict.get("name")
703                 )
704             else:
705 0                 project_class_list = self.keystone.tenants.findall(**filter_dict)
706
707 0             project_list = []
708
709 0             for project in project_class_list:
710 0                 if filter_dict.get("id") and filter_dict["id"] != project.id:
711 0                     continue
712
713 0                 project_list.append(project.to_dict())
714
715 0             return project_list
716 0         except (
717             ksExceptions.ConnectionError,
718             ksExceptions.ClientException,
719             ConnectionError,
720         ) as e:
721 0             self._format_exception(e)
722
723 1     def new_tenant(self, tenant_name, tenant_description):
724         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
725 0         self.logger.debug("Adding a new tenant name: %s", tenant_name)
726 0         try:
727 0             self._reload_connection()
728
729 0             if self.api_version3:
730 0                 project = self.keystone.projects.create(
731                     tenant_name,
732                     self.config.get("project_domain_id", "default"),
733                     description=tenant_description,
734                     is_domain=False,
735                 )
736             else:
737 0                 project = self.keystone.tenants.create(tenant_name, tenant_description)
738
739 0             return project.id
740 0         except (
741             ksExceptions.ConnectionError,
742             ksExceptions.ClientException,
743             ksExceptions.BadRequest,
744             ConnectionError,
745         ) as e:
746 0             self._format_exception(e)
747
748 1     def delete_tenant(self, tenant_id):
749         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
750 0         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
751 0         try:
752 0             self._reload_connection()
753
754 0             if self.api_version3:
755 0                 self.keystone.projects.delete(tenant_id)
756             else:
757 0                 self.keystone.tenants.delete(tenant_id)
758
759 0             return tenant_id
760
761 0         except (
762             ksExceptions.ConnectionError,
763             ksExceptions.ClientException,
764             ksExceptions.NotFound,
765             ConnectionError,
766         ) as e:
767 0             self._format_exception(e)
768
769 1     def new_network(
770         self,
771         net_name,
772         net_type,
773         ip_profile=None,
774         shared=False,
775         provider_network_profile=None,
776     ):
777         """Adds a tenant network to VIM
778         Params:
779             'net_name': name of the network
780             'net_type': one of:
781                 'bridge': overlay isolated network
782                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
783                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
784             'ip_profile': is a dict containing the IP parameters of the network
785                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
786                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
787                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
788                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
789                 'dhcp_enabled': True or False
790                 'dhcp_start_address': ip_schema, first IP to grant
791                 'dhcp_count': number of IPs to grant.
792             'shared': if this network can be seen/use by other tenants/organization
793             'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
794                                                              physical-network: physnet-label}
795         Returns a tuple with the network identifier and created_items, or raises an exception on error
796             created_items can be None or a dictionary where this method can include key-values that will be passed to
797             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
798             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
799             as not present.
800         """
801 0         self.logger.debug(
802             "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
803         )
804         # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
805
806 0         try:
807 0             vlan = None
808
809 0             if provider_network_profile:
810 0                 vlan = provider_network_profile.get("segmentation-id")
811
812 0             new_net = None
813 0             created_items = {}
814 0             self._reload_connection()
815 0             network_dict = {"name": net_name, "admin_state_up": True}
816
817 0             if net_type in ("data", "ptp") or provider_network_profile:
818 0                 provider_physical_network = None
819
820 0                 if provider_network_profile and provider_network_profile.get(
821                     "physical-network"
822                 ):
823 0                     provider_physical_network = provider_network_profile.get(
824                         "physical-network"
825                     )
826
827                     # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
828                     # or not declared, just ignore the checking
829 0                     if (
830                         isinstance(
831                             self.config.get("dataplane_physical_net"), (tuple, list)
832                         )
833                         and provider_physical_network
834                         not in self.config["dataplane_physical_net"]
835                     ):
836 0                         raise vimconn.VimConnConflictException(
837                             "Invalid parameter 'provider-network:physical-network' "
838                             "for network creation. '{}' is not one of the declared "
839                             "list at VIM_config:dataplane_physical_net".format(
840                                 provider_physical_network
841                             )
842                         )
843
844                 # use the default dataplane_physical_net
845 0                 if not provider_physical_network:
846 0                     provider_physical_network = self.config.get(
847                         "dataplane_physical_net"
848                     )
849
850                     # if it is non-empty list, use the first value. If it is a string use the value directly
851 0                     if (
852                         isinstance(provider_physical_network, (tuple, list))
853                         and provider_physical_network
854                     ):
855 0                         provider_physical_network = provider_physical_network[0]
856
857 0                 if not provider_physical_network:
858 0                     raise vimconn.VimConnConflictException(
859                         "missing information needed for underlay networks. Provide "
860                         "'dataplane_physical_net' configuration at VIM or use the NS "
861                         "instantiation parameter 'provider-network.physical-network'"
862                         " for the VLD"
863                     )
864
865 0                 if not self.config.get("multisegment_support"):
866 0                     network_dict[
867                         "provider:physical_network"
868                     ] = provider_physical_network
869
870 0                     if (
871                         provider_network_profile
872                         and "network-type" in provider_network_profile
873                     ):
874 0                         network_dict[
875                             "provider:network_type"
876                         ] = provider_network_profile["network-type"]
877                     else:
878 0                         network_dict["provider:network_type"] = self.config.get(
879                             "dataplane_network_type", "vlan"
880                         )
881
882 0                     if vlan:
883 0                         network_dict["provider:segmentation_id"] = vlan
884                 else:
885                     # Multi-segment case
886 0                     segment_list = []
887 0                     segment1_dict = {
888                         "provider:physical_network": "",
889                         "provider:network_type": "vxlan",
890                     }
891 0                     segment_list.append(segment1_dict)
892 0                     segment2_dict = {
893                         "provider:physical_network": provider_physical_network,
894                         "provider:network_type": "vlan",
895                     }
896
897 0                     if vlan:
898 0                         segment2_dict["provider:segmentation_id"] = vlan
899 0                     elif self.config.get("multisegment_vlan_range"):
900 0                         vlanID = self._generate_multisegment_vlanID()
901 0                         segment2_dict["provider:segmentation_id"] = vlanID
902
903                     # else
904                     #     raise vimconn.VimConnConflictException(
905                     #         "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
906                     #         network")
907 0                     segment_list.append(segment2_dict)
908 0                     network_dict["segments"] = segment_list
909
910                 # VIO Specific Changes. It needs a concrete VLAN
911 0                 if self.vim_type == "VIO" and vlan is None:
912 0                     if self.config.get("dataplane_net_vlan_range") is None:
913 0                         raise vimconn.VimConnConflictException(
914                             "You must provide 'dataplane_net_vlan_range' in format "
915                             "[start_ID - end_ID] at VIM_config for creating underlay "
916                             "networks"
917                         )
918
919 0                     network_dict["provider:segmentation_id"] = self._generate_vlanID()
920
921 0             network_dict["shared"] = shared
922
923 0             if self.config.get("disable_network_port_security"):
924 0                 network_dict["port_security_enabled"] = False
925
926 0             if self.config.get("neutron_availability_zone_hints"):
927 0                 hints = self.config.get("neutron_availability_zone_hints")
928
929 0                 if isinstance(hints, str):
930 0                     hints = [hints]
931
932 0                 network_dict["availability_zone_hints"] = hints
933
934 0             new_net = self.neutron.create_network({"network": network_dict})
935             # print new_net
936             # create subnetwork, even if there is no profile
937
938 0             if not ip_profile:
939 0                 ip_profile = {}
940
941 0             if not ip_profile.get("subnet_address"):
942                 # Fake subnet is required
943 0                 subnet_rand = random.SystemRandom().randint(0, 255)
944 0                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
945
946 0             if "ip_version" not in ip_profile:
947 0                 ip_profile["ip_version"] = "IPv4"
948
949 0             subnet = {
950                 "name": net_name + "-subnet",
951                 "network_id": new_net["network"]["id"],
952                 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
953                 "cidr": ip_profile["subnet_address"],
954             }
955
956             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
957 0             if ip_profile.get("gateway_address"):
958 0                 subnet["gateway_ip"] = ip_profile["gateway_address"]
959             else:
960 0                 subnet["gateway_ip"] = None
961
962 0             if ip_profile.get("dns_address"):
963 0                 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
964
965 0             if "dhcp_enabled" in ip_profile:
966 0                 subnet["enable_dhcp"] = (
967                     False
968                     if ip_profile["dhcp_enabled"] == "false"
969                     or ip_profile["dhcp_enabled"] is False
970                     else True
971                 )
972
973 0             if ip_profile.get("dhcp_start_address"):
974 0                 subnet["allocation_pools"] = []
975 0                 subnet["allocation_pools"].append(dict())
976 0                 subnet["allocation_pools"][0]["start"] = ip_profile[
977                     "dhcp_start_address"
978                 ]
979
980 0             if ip_profile.get("dhcp_count"):
981                 # parts = ip_profile["dhcp_start_address"].split(".")
982                 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
983 0                 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
984 0                 ip_int += ip_profile["dhcp_count"] - 1
985 0                 ip_str = str(netaddr.IPAddress(ip_int))
986 0                 subnet["allocation_pools"][0]["end"] = ip_str
987
988 0             if (
989                 ip_profile.get("ipv6_address_mode")
990                 and ip_profile["ip_version"] != "IPv4"
991             ):
992 0                 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
993                 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
994                 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
995 0                 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
996
997             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
998 0             self.neutron.create_subnet({"subnet": subnet})
999
1000 0             if net_type == "data" and self.config.get("multisegment_support"):
1001 0                 if self.config.get("l2gw_support"):
1002 0                     l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
1003 0                     for l2gw in l2gw_list:
1004 0                         l2gw_conn = {
1005                             "l2_gateway_id": l2gw["id"],
1006                             "network_id": new_net["network"]["id"],
1007                             "segmentation_id": str(vlanID),
1008                         }
1009 0                         new_l2gw_conn = self.neutron.create_l2_gateway_connection(
1010                             {"l2_gateway_connection": l2gw_conn}
1011                         )
1012 0                         created_items[
1013                             "l2gwconn:"
1014                             + str(new_l2gw_conn["l2_gateway_connection"]["id"])
1015                         ] = True
1016
1017 0             return new_net["network"]["id"], created_items
1018 0         except Exception as e:
1019             # delete l2gw connections (if any) before deleting the network
1020 0             for k, v in created_items.items():
1021 0                 if not v:  # skip already deleted
1022 0                     continue
1023
1024 0                 try:
1025 0                     k_item, _, k_id = k.partition(":")
1026
1027 0                     if k_item == "l2gwconn":
1028 0                         self.neutron.delete_l2_gateway_connection(k_id)
1029
1030 0                 except (neExceptions.ConnectionFailed, ConnectionError) as e2:
1031 0                     self.logger.error(
1032                         "Error deleting l2 gateway connection: {}: {}".format(
1033                             type(e2).__name__, e2
1034                         )
1035                     )
1036 0                     self._format_exception(e2)
1037 0                 except Exception as e2:
1038 0                     self.logger.error(
1039                         "Error deleting l2 gateway connection: {}: {}".format(
1040                             type(e2).__name__, e2
1041                         )
1042                     )
1043
1044 0             if new_net:
1045 0                 self.neutron.delete_network(new_net["network"]["id"])
1046
1047 0             self._format_exception(e)
1048
1049 1     def get_network_list(self, filter_dict={}):
1050         """Obtain tenant networks of VIM
1051         Filter_dict can be:
1052             name: network name
1053             id: network uuid
1054             shared: boolean
1055             tenant_id: tenant
1056             admin_state_up: boolean
1057             status: 'ACTIVE'
1058         Returns the network list of dictionaries
1059         """
1060 0         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1061 0         try:
1062 0             self._reload_connection()
1063 0             filter_dict_os = filter_dict.copy()
1064
1065 0             if self.api_version3 and "tenant_id" in filter_dict_os:
1066                 # TODO check
1067 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1068
1069 0             net_dict = self.neutron.list_networks(**filter_dict_os)
1070 0             net_list = net_dict["networks"]
1071 0             self.__net_os2mano(net_list)
1072
1073 0             return net_list
1074 0         except (
1075             neExceptions.ConnectionFailed,
1076             ksExceptions.ClientException,
1077             neExceptions.NeutronException,
1078             ConnectionError,
1079         ) as e:
1080 0             self._format_exception(e)
1081
1082 1     def get_network(self, net_id):
1083         """Obtain details of network from VIM
1084         Returns the network information from a network id"""
1085 0         self.logger.debug(" Getting tenant network %s from VIM", net_id)
1086 0         filter_dict = {"id": net_id}
1087 0         net_list = self.get_network_list(filter_dict)
1088
1089 0         if len(net_list) == 0:
1090 0             raise vimconn.VimConnNotFoundException(
1091                 "Network '{}' not found".format(net_id)
1092             )
1093 0         elif len(net_list) > 1:
1094 0             raise vimconn.VimConnConflictException(
1095                 "Found more than one network with this criteria"
1096             )
1097
1098 0         net = net_list[0]
1099 0         subnets = []
1100 0         for subnet_id in net.get("subnets", ()):
1101 0             try:
1102 0                 subnet = self.neutron.show_subnet(subnet_id)
1103 0             except Exception as e:
1104 0                 self.logger.error(
1105                     "osconnector.get_network(): Error getting subnet %s %s"
1106                     % (net_id, str(e))
1107                 )
1108 0                 subnet = {"id": subnet_id, "fault": str(e)}
1109
1110 0             subnets.append(subnet)
1111
1112 0         net["subnets"] = subnets
1113 0         net["encapsulation"] = net.get("provider:network_type")
1114 0         net["encapsulation_type"] = net.get("provider:network_type")
1115 0         net["segmentation_id"] = net.get("provider:segmentation_id")
1116 0         net["encapsulation_id"] = net.get("provider:segmentation_id")
1117
1118 0         return net
1119
1120 1     @catch_any_exception
1121 1     def delete_network(self, net_id, created_items=None):
1122         """
1123         Removes a tenant network from VIM and its associated elements
1124         :param net_id: VIM identifier of the network, provided by method new_network
1125         :param created_items: dictionary with extra items to be deleted. provided by method new_network
1126         Returns the network identifier or raises an exception upon error or when network is not found
1127         """
1128 0         self.logger.debug("Deleting network '%s' from VIM", net_id)
1129
1130 0         if created_items is None:
1131 0             created_items = {}
1132
1133 0         try:
1134 0             self._reload_connection()
1135             # delete l2gw connections (if any) before deleting the network
1136 0             for k, v in created_items.items():
1137 0                 if not v:  # skip already deleted
1138 0                     continue
1139
1140 0                 try:
1141 0                     k_item, _, k_id = k.partition(":")
1142 0                     if k_item == "l2gwconn":
1143 0                         self.neutron.delete_l2_gateway_connection(k_id)
1144
1145 0                 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1146 0                     self.logger.error(
1147                         "Error deleting l2 gateway connection: {}: {}".format(
1148                             type(e).__name__, e
1149                         )
1150                     )
1151 0                     self._format_exception(e)
1152 0                 except Exception as e:
1153 0                     self.logger.error(
1154                         "Error deleting l2 gateway connection: {}: {}".format(
1155                             type(e).__name__, e
1156                         )
1157                     )
1158
1159             # delete VM ports attached to this networks before the network
1160 0             ports = self.neutron.list_ports(network_id=net_id)
1161 0             for p in ports["ports"]:
1162 0                 try:
1163 0                     self.neutron.delete_port(p["id"])
1164
1165 0                 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1166 0                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1167                     # If there is connection error, it raises.
1168 0                     self._format_exception(e)
1169 0                 except Exception as e:
1170 0                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1171
1172 0             self.neutron.delete_network(net_id)
1173
1174 0             return net_id
1175 0         except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
1176             # If network to be deleted is not found, it does not raise.
1177 0             self.logger.warning(
1178                 f"Error deleting network: {net_id} is not found, {str(e)}"
1179             )
1180
1181 1     def refresh_nets_status(self, net_list):
1182         """Get the status of the networks
1183         Params: the list of network identifiers
1184         Returns a dictionary with:
1185             net_id:         #VIM id of this network
1186                 status:     #Mandatory. Text with one of:
1187                             #  DELETED (not found at vim)
1188                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1189                             #  OTHER (Vim reported other status not understood)
1190                             #  ERROR (VIM indicates an ERROR status)
1191                             #  ACTIVE, INACTIVE, DOWN (admin down),
1192                             #  BUILD (on building process)
1193                             #
1194                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
1195                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
1196         """
1197 0         net_dict = {}
1198
1199 0         for net_id in net_list:
1200 0             net = {}
1201
1202 0             try:
1203 0                 net_vim = self.get_network(net_id)
1204
1205 0                 if net_vim["status"] in netStatus2manoFormat:
1206 0                     net["status"] = netStatus2manoFormat[net_vim["status"]]
1207                 else:
1208 0                     net["status"] = "OTHER"
1209 0                     net["error_msg"] = "VIM status reported " + net_vim["status"]
1210
1211 0                 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1212 0                     net["status"] = "DOWN"
1213
1214 0                 net["vim_info"] = self.serialize(net_vim)
1215
1216 0                 if net_vim.get("fault"):  # TODO
1217 0                     net["error_msg"] = str(net_vim["fault"])
1218 0             except vimconn.VimConnNotFoundException as e:
1219 0                 self.logger.error("Exception getting net status: %s", str(e))
1220 0                 net["status"] = "DELETED"
1221 0                 net["error_msg"] = str(e)
1222 0             except vimconn.VimConnException as e:
1223 0                 self.logger.error("Exception getting net status: %s", str(e))
1224 0                 net["status"] = "VIM_ERROR"
1225 0                 net["error_msg"] = str(e)
1226 0             net_dict[net_id] = net
1227 0         return net_dict
1228
1229 1     def get_flavor(self, flavor_id):
1230         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
1231 0         self.logger.debug("Getting flavor '%s'", flavor_id)
1232 0         try:
1233 0             self._reload_connection()
1234 0             flavor = self.nova.flavors.find(id=flavor_id)
1235 0             return flavor.to_dict()
1236
1237 0         except (
1238             nvExceptions.NotFound,
1239             nvExceptions.ClientException,
1240             ksExceptions.ClientException,
1241             ConnectionError,
1242         ) as e:
1243 0             self._format_exception(e)
1244
1245 1     def get_flavor_id_from_data(self, flavor_dict):
1246         """Obtain flavor id that match the flavor description
1247         Returns the flavor_id or raises a vimconnNotFoundException
1248         flavor_dict: contains the required ram, vcpus, disk
1249         If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1250             and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1251             vimconnNotFoundException is raised
1252         """
1253 0         exact_match = False if self.config.get("use_existing_flavors") else True
1254
1255 0         try:
1256 0             self._reload_connection()
1257 0             flavor_candidate_id = None
1258 0             flavor_candidate_data = (10000, 10000, 10000)
1259 0             flavor_target = (
1260                 flavor_dict["ram"],
1261                 flavor_dict["vcpus"],
1262                 flavor_dict["disk"],
1263                 flavor_dict.get("ephemeral", 0),
1264                 flavor_dict.get("swap", 0),
1265             )
1266             # numa=None
1267 0             extended = flavor_dict.get("extended", {})
1268 0             if extended:
1269                 # TODO
1270 0                 raise vimconn.VimConnNotFoundException(
1271                     "Flavor with EPA still not implemented"
1272                 )
1273                 # if len(numas) > 1:
1274                 #     raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1275                 # numa=numas[0]
1276                 # numas = extended.get("numas")
1277 0             for flavor in self.nova.flavors.list():
1278 0                 epa = flavor.get_keys()
1279
1280 0                 if epa:
1281 0                     continue
1282                     # TODO
1283
1284 0                 flavor_data = (
1285                     flavor.ram,
1286                     flavor.vcpus,
1287                     flavor.disk,
1288                     flavor.ephemeral,
1289                     flavor.swap if isinstance(flavor.swap, int) else 0,
1290                 )
1291 0                 if flavor_data == flavor_target:
1292 0                     return flavor.id
1293 0                 elif (
1294                     not exact_match
1295                     and flavor_target < flavor_data < flavor_candidate_data
1296                 ):
1297 0                     flavor_candidate_id = flavor.id
1298 0                     flavor_candidate_data = flavor_data
1299
1300 0             if not exact_match and flavor_candidate_id:
1301 0                 return flavor_candidate_id
1302
1303 0             raise vimconn.VimConnNotFoundException(
1304                 "Cannot find any flavor matching '{}'".format(flavor_dict)
1305             )
1306 0         except (
1307             nvExceptions.NotFound,
1308             nvExceptions.BadRequest,
1309             nvExceptions.ClientException,
1310             ksExceptions.ClientException,
1311             ConnectionError,
1312         ) as e:
1313 0             self._format_exception(e)
1314
1315 1     @staticmethod
1316 1     def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1317         """Process resource quota and fill up extra_specs.
1318         Args:
1319             quota       (dict):         Keeping the quota of resurces
1320             prefix      (str)           Prefix
1321             extra_specs (dict)          Dict to be filled to be used during flavor creation
1322
1323         """
1324 0         if "limit" in quota:
1325 0             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1326
1327 0         if "reserve" in quota:
1328 0             extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1329
1330 0         if "shares" in quota:
1331 0             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1332 0             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1333
1334 1     @staticmethod
1335 1     def process_numa_memory(
1336         numa: dict, node_id: Optional[int], extra_specs: dict
1337     ) -> None:
1338         """Set the memory in extra_specs.
1339         Args:
1340             numa        (dict):         A dictionary which includes numa information
1341             node_id     (int):          ID of numa node
1342             extra_specs (dict):         To be filled.
1343
1344         """
1345 1         if not numa.get("memory"):
1346 1             return
1347 1         memory_mb = numa["memory"] * 1024
1348 1         memory = "hw:numa_mem.{}".format(node_id)
1349 1         extra_specs[memory] = int(memory_mb)
1350
1351 1     @staticmethod
1352 1     def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1353         """Set the cpu in extra_specs.
1354         Args:
1355             numa        (dict):         A dictionary which includes numa information
1356             node_id     (int):          ID of numa node
1357             extra_specs (dict):         To be filled.
1358
1359         """
1360 1         if not numa.get("vcpu"):
1361 1             return
1362 1         vcpu = numa["vcpu"]
1363 1         cpu = "hw:numa_cpus.{}".format(node_id)
1364 1         vcpu = ",".join(map(str, vcpu))
1365 1         extra_specs[cpu] = vcpu
1366
1367 1     @staticmethod
1368 1     def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1369         """Fill up extra_specs if numa has paired-threads.
1370         Args:
1371             numa        (dict):         A dictionary which includes numa information
1372             extra_specs (dict):         To be filled.
1373
1374         Returns:
1375             threads       (int)           Number of virtual cpus
1376
1377         """
1378 1         if not numa.get("paired-threads"):
1379 1             return
1380
1381         # cpu_thread_policy "require" implies that compute node must have an STM architecture
1382 1         threads = numa["paired-threads"] * 2
1383 1         extra_specs["hw:cpu_thread_policy"] = "require"
1384 1         extra_specs["hw:cpu_policy"] = "dedicated"
1385 1         return threads
1386
1387 1     @staticmethod
1388 1     def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1389         """Fill up extra_specs if numa has cores.
1390         Args:
1391             numa        (dict):         A dictionary which includes numa information
1392             extra_specs (dict):         To be filled.
1393
1394         Returns:
1395             cores       (int)           Number of virtual cpus
1396
1397         """
1398         # cpu_thread_policy "isolate" implies that the host must not have an SMT
1399         # architecture, or a non-SMT architecture will be emulated
1400 1         if not numa.get("cores"):
1401 1             return
1402 1         cores = numa["cores"]
1403 1         extra_specs["hw:cpu_thread_policy"] = "isolate"
1404 1         extra_specs["hw:cpu_policy"] = "dedicated"
1405 1         return cores
1406
1407 1     @staticmethod
1408 1     def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1409         """Fill up extra_specs if numa has threads.
1410         Args:
1411             numa        (dict):         A dictionary which includes numa information
1412             extra_specs (dict):         To be filled.
1413
1414         Returns:
1415             threads       (int)           Number of virtual cpus
1416
1417         """
1418         # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1419 1         if not numa.get("threads"):
1420 1             return
1421 1         threads = numa["threads"]
1422 1         extra_specs["hw:cpu_thread_policy"] = "prefer"
1423 1         extra_specs["hw:cpu_policy"] = "dedicated"
1424 1         return threads
1425
1426 1     def _process_numa_parameters_of_flavor(
1427         self, numas: List, extra_specs: Dict
1428     ) -> None:
1429         """Process numa parameters and fill up extra_specs.
1430
1431         Args:
1432             numas   (list):             List of dictionary which includes numa information
1433             extra_specs (dict):         To be filled.
1434
1435         """
1436 1         numa_nodes = len(numas)
1437 1         extra_specs["hw:numa_nodes"] = str(numa_nodes)
1438 1         cpu_cores, cpu_threads = 0, 0
1439
1440 1         if self.vim_type == "VIO":
1441 1             self.process_vio_numa_nodes(numa_nodes, extra_specs)
1442
1443 1         for numa in numas:
1444 1             if "id" in numa:
1445 1                 node_id = numa["id"]
1446                 # overwrite ram and vcpus
1447                 # check if key "memory" is present in numa else use ram value at flavor
1448 1                 self.process_numa_memory(numa, node_id, extra_specs)
1449 1                 self.process_numa_vcpu(numa, node_id, extra_specs)
1450
1451             # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1452 1             extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1453
1454 1             if "paired-threads" in numa:
1455 1                 threads = self.process_numa_paired_threads(numa, extra_specs)
1456 1                 cpu_threads += threads
1457
1458 1             elif "cores" in numa:
1459 1                 cores = self.process_numa_cores(numa, extra_specs)
1460 1                 cpu_cores += cores
1461
1462 1             elif "threads" in numa:
1463 1                 threads = self.process_numa_threads(numa, extra_specs)
1464 1                 cpu_threads += threads
1465
1466 1         if cpu_cores:
1467 1             extra_specs["hw:cpu_cores"] = str(cpu_cores)
1468 1         if cpu_threads:
1469 1             extra_specs["hw:cpu_threads"] = str(cpu_threads)
1470
1471 1     @staticmethod
1472 1     def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1473         """According to number of numa nodes, updates the extra_specs for VIO.
1474
1475         Args:
1476
1477             numa_nodes      (int):         List keeps the numa node numbers
1478             extra_specs     (dict):        Extra specs dict to be updated
1479
1480         """
1481         # If there are several numas, we do not define specific affinity.
1482 1         extra_specs["vmware:latency_sensitivity_level"] = "high"
1483
1484 1     def _change_flavor_name(
1485         self, name: str, name_suffix: int, flavor_data: dict
1486     ) -> str:
1487         """Change the flavor name if the name already exists.
1488
1489         Args:
1490             name    (str):          Flavor name to be checked
1491             name_suffix (int):      Suffix to be appended to name
1492             flavor_data (dict):     Flavor dict
1493
1494         Returns:
1495             name    (str):          New flavor name to be used
1496
1497         """
1498         # Get used names
1499 1         fl = self.nova.flavors.list()
1500 1         fl_names = [f.name for f in fl]
1501
1502 1         while name in fl_names:
1503 1             name_suffix += 1
1504 1             name = flavor_data["name"] + "-" + str(name_suffix)
1505
1506 1         return name
1507
1508 1     def _process_extended_config_of_flavor(
1509         self, extended: dict, extra_specs: dict
1510     ) -> None:
1511         """Process the extended dict to fill up extra_specs.
1512         Args:
1513
1514             extended                    (dict):         Keeping the extra specification of flavor
1515             extra_specs                 (dict)          Dict to be filled to be used during flavor creation
1516
1517         """
1518 1         quotas = {
1519             "cpu-quota": "cpu",
1520             "mem-quota": "memory",
1521             "vif-quota": "vif",
1522             "disk-io-quota": "disk_io",
1523         }
1524
1525 1         page_sizes = {
1526             "LARGE": "large",
1527             "SMALL": "small",
1528             "SIZE_2MB": "2MB",
1529             "SIZE_1GB": "1GB",
1530             "PREFER_LARGE": "any",
1531         }
1532
1533 1         policies = {
1534             "cpu-pinning-policy": "hw:cpu_policy",
1535             "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1536             "mem-policy": "hw:numa_mempolicy",
1537         }
1538
1539 1         numas = extended.get("numas")
1540 1         if numas:
1541 1             self._process_numa_parameters_of_flavor(numas, extra_specs)
1542
1543 1         for quota, item in quotas.items():
1544 1             if quota in extended.keys():
1545 1                 self.process_resource_quota(extended.get(quota), item, extra_specs)
1546
1547         # Set the mempage size as specified in the descriptor
1548 1         if extended.get("mempage-size"):
1549 1             if extended["mempage-size"] in page_sizes.keys():
1550 1                 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1551             else:
1552                 # Normally, validations in NBI should not allow to this condition.
1553 1                 self.logger.debug(
1554                     "Invalid mempage-size %s. Will be ignored",
1555                     extended.get("mempage-size"),
1556                 )
1557
1558 1         for policy, hw_policy in policies.items():
1559 1             if extended.get(policy):
1560 1                 extra_specs[hw_policy] = extended[policy].lower()
1561
1562 1     @staticmethod
1563 1     def _get_flavor_details(flavor_data: dict) -> Tuple:
1564         """Returns the details of flavor
1565         Args:
1566             flavor_data     (dict):     Dictionary that includes required flavor details
1567
1568         Returns:
1569             ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
1570
1571         """
1572 1         return (
1573             flavor_data.get("ram", 64),
1574             flavor_data.get("vcpus", 1),
1575             {},
1576             flavor_data.get("extended"),
1577         )
1578
1579 1     @catch_any_exception
1580 1     def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1581         """Adds a tenant flavor to openstack VIM.
1582         if change_name_if_used is True, it will change name in case of conflict,
1583         because it is not supported name repetition.
1584
1585         Args:
1586             flavor_data (dict):             Flavor details to be processed
1587             change_name_if_used (bool):     Change name in case of conflict
1588
1589         Returns:
1590              flavor_id  (str):     flavor identifier
1591
1592         """
1593 1         self.logger.debug("Adding flavor '%s'", str(flavor_data))
1594 1         retry = 0
1595 1         max_retries = 3
1596 1         name_suffix = 0
1597 1         name = flavor_data["name"]
1598 1         while retry < max_retries:
1599 1             retry += 1
1600 1             try:
1601 1                 self._reload_connection()
1602
1603 1                 if change_name_if_used:
1604 1                     name = self._change_flavor_name(name, name_suffix, flavor_data)
1605
1606 1                 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1607                     flavor_data
1608                 )
1609 1                 if extended:
1610 1                     self._process_extended_config_of_flavor(extended, extra_specs)
1611
1612                 # Create flavor
1613
1614 1                 new_flavor = self.nova.flavors.create(
1615                     name=name,
1616                     ram=ram,
1617                     vcpus=vcpus,
1618                     disk=flavor_data.get("disk", 0),
1619                     ephemeral=flavor_data.get("ephemeral", 0),
1620                     swap=flavor_data.get("swap", 0),
1621                     is_public=flavor_data.get("is_public", True),
1622                 )
1623
1624                 # Add metadata
1625 1                 if extra_specs:
1626 1                     new_flavor.set_keys(extra_specs)
1627
1628 1                 return new_flavor.id
1629
1630 1             except nvExceptions.Conflict as e:
1631 1                 if change_name_if_used and retry < max_retries:
1632 1                     continue
1633
1634 1                 self._format_exception(e)
1635
1636 1     @catch_any_exception
1637 1     def delete_flavor(self, flavor_id):
1638         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1639 0         try:
1640 0             self._reload_connection()
1641 0             self.nova.flavors.delete(flavor_id)
1642 0             return flavor_id
1643
1644 0         except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
1645             # If flavor is not found, it does not raise.
1646 0             self.logger.warning(
1647                 f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
1648             )
1649
1650 1     def new_image(self, image_dict):
1651         """
1652         Adds a tenant image to VIM. imge_dict is a dictionary with:
1653             name: name
1654             disk_format: qcow2, vhd, vmdk, raw (by default), ...
1655             location: path or URI
1656             public: "yes" or "no"
1657             metadata: metadata of the image
1658         Returns the image_id
1659         """
1660 0         retry = 0
1661 0         max_retries = 3
1662
1663 0         while retry < max_retries:
1664 0             retry += 1
1665 0             try:
1666 0                 self._reload_connection()
1667
1668                 # determine format  http://docs.openstack.org/developer/glance/formats.html
1669 0                 if "disk_format" in image_dict:
1670 0                     disk_format = image_dict["disk_format"]
1671                 else:  # autodiscover based on extension
1672 0                     if image_dict["location"].endswith(".qcow2"):
1673 0                         disk_format = "qcow2"
1674 0                     elif image_dict["location"].endswith(".vhd"):
1675 0                         disk_format = "vhd"
1676 0                     elif image_dict["location"].endswith(".vmdk"):
1677 0                         disk_format = "vmdk"
1678 0                     elif image_dict["location"].endswith(".vdi"):
1679 0                         disk_format = "vdi"
1680 0                     elif image_dict["location"].endswith(".iso"):
1681 0                         disk_format = "iso"
1682 0                     elif image_dict["location"].endswith(".aki"):
1683 0                         disk_format = "aki"
1684 0                     elif image_dict["location"].endswith(".ari"):
1685 0                         disk_format = "ari"
1686 0                     elif image_dict["location"].endswith(".ami"):
1687 0                         disk_format = "ami"
1688                     else:
1689 0                         disk_format = "raw"
1690
1691 0                 self.logger.debug(
1692                     "new_image: '%s' loading from '%s'",
1693                     image_dict["name"],
1694                     image_dict["location"],
1695                 )
1696 0                 if self.vim_type == "VIO":
1697 0                     container_format = "bare"
1698 0                     if "container_format" in image_dict:
1699 0                         container_format = image_dict["container_format"]
1700
1701 0                     new_image = self.glance.images.create(
1702                         name=image_dict["name"],
1703                         container_format=container_format,
1704                         disk_format=disk_format,
1705                     )
1706                 else:
1707 0                     new_image = self.glance.images.create(name=image_dict["name"])
1708
1709 0                 if image_dict["location"].startswith("http"):
1710                     # TODO there is not a method to direct download. It must be downloaded locally with requests
1711 0                     raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1712                 else:  # local path
1713 0                     with open(image_dict["location"]) as fimage:
1714 0                         self.glance.images.upload(new_image.id, fimage)
1715                         # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1716                         #  image_dict.get("public","yes")=="yes",
1717                         #    container_format="bare", data=fimage, disk_format=disk_format)
1718
1719 0                 metadata_to_load = image_dict.get("metadata")
1720
1721                 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1722                 #  for openstack
1723 0                 if self.vim_type == "VIO":
1724 0                     metadata_to_load["upload_location"] = image_dict["location"]
1725                 else:
1726 0                     metadata_to_load["location"] = image_dict["location"]
1727
1728 0                 self.glance.images.update(new_image.id, **metadata_to_load)
1729
1730 0                 return new_image.id
1731 0             except (
1732                 HTTPException,
1733                 gl1Exceptions.HTTPException,
1734                 gl1Exceptions.CommunicationError,
1735                 ConnectionError,
1736             ) as e:
1737 0                 if retry == max_retries:
1738 0                     continue
1739
1740 0                 self._format_exception(e)
1741 0             except IOError as e:  # can not open the file
1742 0                 raise vimconn.VimConnConnectionException(
1743                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1744                     http_code=vimconn.HTTP_Bad_Request,
1745                 )
1746 0             except Exception as e:
1747 0                 self._format_exception(e)
1748
1749 1     @catch_any_exception
1750 1     def delete_image(self, image_id):
1751         """Deletes a tenant image from openstack VIM. Returns the old id"""
1752 0         try:
1753 0             self._reload_connection()
1754 0             self.glance.images.delete(image_id)
1755
1756 0             return image_id
1757 0         except gl1Exceptions.NotFound as e:
1758             # If image is not found, it does not raise.
1759 0             self.logger.warning(
1760                 f"Error deleting image: {image_id} is not found, {str(e)}"
1761             )
1762
1763 1     @catch_any_exception
1764 1     def get_image_id_from_path(self, path):
1765         """Get the image id from image path in the VIM database. Returns the image_id"""
1766 0         self._reload_connection()
1767 0         images = self.glance.images.list()
1768
1769 0         for image in images:
1770 0             if image.metadata.get("location") == path:
1771 0                 return image.id
1772
1773 0         raise vimconn.VimConnNotFoundException(
1774             "image with location '{}' not found".format(path)
1775         )
1776
1777 1     def get_image_list(self, filter_dict={}):
1778         """Obtain tenant images from VIM
1779         Filter_dict can be:
1780             id: image id
1781             name: image name
1782             checksum: image checksum
1783         Returns the image list of dictionaries:
1784             [{<the fields at Filter_dict plus some VIM specific>}, ...]
1785             List can be empty
1786         """
1787 0         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1788 0         try:
1789 0             self._reload_connection()
1790             # filter_dict_os = filter_dict.copy()
1791             # First we filter by the available filter fields: name, id. The others are removed.
1792 0             image_list = self.glance.images.list()
1793 0             filtered_list = []
1794
1795 0             for image in image_list:
1796 0                 try:
1797 0                     if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1798 0                         continue
1799
1800 0                     if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1801 0                         continue
1802
1803 0                     if (
1804                         filter_dict.get("checksum")
1805                         and image["checksum"] != filter_dict["checksum"]
1806                     ):
1807 0                         continue
1808
1809 0                     filtered_list.append(image.copy())
1810 0                 except gl1Exceptions.HTTPNotFound:
1811 0                     pass
1812
1813 0             return filtered_list
1814
1815 0         except (
1816             ksExceptions.ClientException,
1817             nvExceptions.ClientException,
1818             gl1Exceptions.CommunicationError,
1819             ConnectionError,
1820         ) as e:
1821 0             self._format_exception(e)
1822
1823 1     def __wait_for_vm(self, vm_id, status):
1824         """wait until vm is in the desired status and return True.
1825         If the VM gets in ERROR status, return false.
1826         If the timeout is reached generate an exception"""
1827 0         elapsed_time = 0
1828 0         while elapsed_time < server_timeout:
1829 0             vm_status = self.nova.servers.get(vm_id).status
1830
1831 0             if vm_status == status:
1832 0                 return True
1833
1834 0             if vm_status == "ERROR":
1835 0                 return False
1836
1837 0             time.sleep(5)
1838 0             elapsed_time += 5
1839
1840         # if we exceeded the timeout rollback
1841 0         if elapsed_time >= server_timeout:
1842 0             raise vimconn.VimConnException(
1843                 "Timeout waiting for instance " + vm_id + " to get " + status,
1844                 http_code=vimconn.HTTP_Request_Timeout,
1845             )
1846
1847 1     def _get_openstack_availablity_zones(self):
1848         """
1849         Get from openstack availability zones available
1850         :return:
1851         """
1852 0         try:
1853 0             openstack_availability_zone = self.nova.availability_zones.list()
1854 0             openstack_availability_zone = [
1855                 str(zone.zoneName)
1856                 for zone in openstack_availability_zone
1857                 if zone.zoneName != "internal"
1858             ]
1859
1860 0             return openstack_availability_zone
1861 0         except Exception:
1862 0             return None
1863
1864 1     def _set_availablity_zones(self):
1865         """
1866         Set vim availablity zone
1867         :return:
1868         """
1869 0         if "availability_zone" in self.config:
1870 0             vim_availability_zones = self.config.get("availability_zone")
1871
1872 0             if isinstance(vim_availability_zones, str):
1873 0                 self.availability_zone = [vim_availability_zones]
1874 0             elif isinstance(vim_availability_zones, list):
1875 0                 self.availability_zone = vim_availability_zones
1876         else:
1877 0             self.availability_zone = self._get_openstack_availablity_zones()
1878 0         if "storage_availability_zone" in self.config:
1879 0             self.storage_availability_zone = self.config.get(
1880                 "storage_availability_zone"
1881             )
1882
1883 1     def _get_vm_availability_zone(
1884         self, availability_zone_index, availability_zone_list
1885     ):
1886         """
1887         Return thge availability zone to be used by the created VM.
1888         :return: The VIM availability zone to be used or None
1889         """
1890 0         if availability_zone_index is None:
1891 0             if not self.config.get("availability_zone"):
1892 0                 return None
1893 0             elif isinstance(self.config.get("availability_zone"), str):
1894 0                 return self.config["availability_zone"]
1895             else:
1896                 # TODO consider using a different parameter at config for default AV and AV list match
1897 0                 return self.config["availability_zone"][0]
1898
1899 0         vim_availability_zones = self.availability_zone
1900         # check if VIM offer enough availability zones describe in the VNFD
1901 0         if vim_availability_zones and len(availability_zone_list) <= len(
1902             vim_availability_zones
1903         ):
1904             # check if all the names of NFV AV match VIM AV names
1905 0             match_by_index = False
1906 0             for av in availability_zone_list:
1907 0                 if av not in vim_availability_zones:
1908 0                     match_by_index = True
1909 0                     break
1910
1911 0             if match_by_index:
1912 0                 return vim_availability_zones[availability_zone_index]
1913             else:
1914 0                 return availability_zone_list[availability_zone_index]
1915         else:
1916 0             raise vimconn.VimConnConflictException(
1917                 "No enough availability zones at VIM for this deployment"
1918             )
1919
1920 1     def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1921         """Fill up the security_groups in the port_dict.
1922
1923         Args:
1924             net (dict):             Network details
1925             port_dict   (dict):     Port details
1926
1927         """
1928 1         if (
1929             self.config.get("security_groups")
1930             and net.get("port_security") is not False
1931             and not self.config.get("no_port_security_extension")
1932         ):
1933 1             if not self.security_groups_id:
1934 1                 self._get_ids_from_name()
1935
1936 1             port_dict["security_groups"] = self.security_groups_id
1937
1938 1     def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1939         """Fill up the network binding depending on network type in the port_dict.
1940
1941         Args:
1942             net (dict):             Network details
1943             port_dict   (dict):     Port details
1944
1945         """
1946 1         if not net.get("type"):
1947 1             raise vimconn.VimConnException("Type is missing in the network details.")
1948
1949 1         if net["type"] == "virtual":
1950 1             pass
1951
1952         # For VF
1953 1         elif net["type"] == "VF" or net["type"] == "SR-IOV":
1954 1             port_dict["binding:vnic_type"] = "direct"
1955
1956             # VIO specific Changes
1957 1             if self.vim_type == "VIO":
1958                 # Need to create port with port_security_enabled = False and no-security-groups
1959 1                 port_dict["port_security_enabled"] = False
1960 1                 port_dict["provider_security_groups"] = []
1961 1                 port_dict["security_groups"] = []
1962
1963         else:
1964             # For PT PCI-PASSTHROUGH
1965 1             port_dict["binding:vnic_type"] = "direct-physical"
1966
1967 1     @staticmethod
1968 1     def _set_fixed_ip(new_port: dict, net: dict) -> None:
1969         """Set the "ip" parameter in net dictionary.
1970
1971         Args:
1972             new_port    (dict):     New created port
1973             net         (dict):     Network details
1974
1975         """
1976 1         fixed_ips = new_port["port"].get("fixed_ips")
1977
1978 1         if fixed_ips:
1979 1             net["ip"] = fixed_ips[0].get("ip_address")
1980         else:
1981 1             net["ip"] = None
1982
1983 1     @staticmethod
1984 1     def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1985         """Fill up the mac_address and fixed_ips in port_dict.
1986
1987         Args:
1988             net (dict):             Network details
1989             port_dict   (dict):     Port details
1990
1991         """
1992 1         if net.get("mac_address"):
1993 1             port_dict["mac_address"] = net["mac_address"]
1994
1995 1         ip_dual_list = []
1996 1         if ip_list := net.get("ip_address"):
1997 1             if not isinstance(ip_list, list):
1998 1                 ip_list = [ip_list]
1999 1             for ip in ip_list:
2000 1                 ip_dict = {"ip_address": ip}
2001 1                 ip_dual_list.append(ip_dict)
2002 1             port_dict["fixed_ips"] = ip_dual_list
2003             # TODO add "subnet_id": <subnet_id>
2004
2005 1     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
2006         """Create new port using neutron.
2007
2008         Args:
2009             port_dict   (dict):         Port details
2010             created_items   (dict):     All created items
2011             net (dict):                 Network details
2012
2013         Returns:
2014             new_port    (dict):         New created port
2015
2016         """
2017 1         new_port = self.neutron.create_port({"port": port_dict})
2018 1         created_items["port:" + str(new_port["port"]["id"])] = True
2019 1         net["mac_address"] = new_port["port"]["mac_address"]
2020 1         net["vim_id"] = new_port["port"]["id"]
2021
2022 1         return new_port
2023
2024 1     def _create_port(
2025         self, net: dict, name: str, created_items: dict
2026     ) -> Tuple[dict, dict]:
2027         """Create port using net details.
2028
2029         Args:
2030             net (dict):                 Network details
2031             name    (str):              Name to be used as network name if net dict does not include name
2032             created_items   (dict):     All created items
2033
2034         Returns:
2035             new_port, port              New created port, port dictionary
2036
2037         """
2038
2039 1         port_dict = {
2040             "network_id": net["net_id"],
2041             "name": net.get("name"),
2042             "admin_state_up": True,
2043         }
2044
2045 1         if not port_dict["name"]:
2046 1             port_dict["name"] = name
2047
2048 1         self._prepare_port_dict_security_groups(net, port_dict)
2049
2050 1         self._prepare_port_dict_binding(net, port_dict)
2051
2052 1         vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2053
2054 1         new_port = self._create_new_port(port_dict, created_items, net)
2055
2056 1         vimconnector._set_fixed_ip(new_port, net)
2057
2058 1         port = {"port-id": new_port["port"]["id"]}
2059
2060 1         if float(self.nova.api_version.get_string()) >= 2.32:
2061 1             port["tag"] = new_port["port"]["name"]
2062
2063 1         return new_port, port
2064
2065 1     def _prepare_network_for_vminstance(
2066         self,
2067         name: str,
2068         net_list: list,
2069         created_items: dict,
2070         net_list_vim: list,
2071         external_network: list,
2072         no_secured_ports: list,
2073     ) -> None:
2074         """Create port and fill up net dictionary for new VM instance creation.
2075
2076         Args:
2077             name    (str):                  Name of network
2078             net_list    (list):             List of networks
2079             created_items   (dict):         All created items belongs to a VM
2080             net_list_vim    (list):         List of ports
2081             external_network    (list):     List of external-networks
2082             no_secured_ports    (list):     Port security disabled ports
2083         """
2084
2085 1         self._reload_connection()
2086
2087 1         for net in net_list:
2088             # Skip non-connected iface
2089 1             if not net.get("net_id"):
2090 1                 continue
2091
2092 1             new_port, port = self._create_port(net, name, created_items)
2093
2094 1             net_list_vim.append(port)
2095
2096 1             if net.get("floating_ip", False):
2097 1                 net["exit_on_floating_ip_error"] = True
2098 1                 external_network.append(net)
2099
2100 1             elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2101 1                 net["exit_on_floating_ip_error"] = False
2102 1                 external_network.append(net)
2103 1                 net["floating_ip"] = self.config.get("use_floating_ip")
2104
2105             # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2106             # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2107 1             if net.get("port_security") is False and not self.config.get(
2108                 "no_port_security_extension"
2109             ):
2110 1                 no_secured_ports.append(
2111                     (
2112                         new_port["port"]["id"],
2113                         net.get("port_security_disable_strategy"),
2114                     )
2115                 )
2116
2117 1     def _prepare_persistent_root_volumes(
2118         self,
2119         name: str,
2120         storage_av_zone: list,
2121         disk: dict,
2122         base_disk_index: int,
2123         block_device_mapping: dict,
2124         existing_vim_volumes: list,
2125         created_items: dict,
2126     ) -> Optional[str]:
2127         """Prepare persistent root volumes for new VM instance.
2128
2129         Args:
2130             name    (str):                      Name of VM instance
2131             storage_av_zone  (list):            Storage of availability zones
2132             disk    (dict):                     Disk details
2133             base_disk_index (int):              Disk index
2134             block_device_mapping    (dict):     Block device details
2135             existing_vim_volumes    (list):     Existing disk details
2136             created_items   (dict):             All created items belongs to VM
2137
2138         Returns:
2139             boot_volume_id  (str):              ID of boot volume
2140
2141         """
2142         # Disk may include only vim_volume_id or only vim_id."
2143         # Use existing persistent root volume finding with volume_id or vim_id
2144 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2145 1         if disk.get(key_id):
2146 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2147 1             existing_vim_volumes.append({"id": disk[key_id]})
2148         else:
2149             # Create persistent root volume
2150 1             volume = self.cinder.volumes.create(
2151                 size=disk["size"],
2152                 name=name + "vd" + chr(base_disk_index),
2153                 imageRef=disk["image_id"],
2154                 # Make sure volume is in the same AZ as the VM to be attached to
2155                 availability_zone=storage_av_zone,
2156             )
2157 1             boot_volume_id = volume.id
2158 1             self.update_block_device_mapping(
2159                 volume=volume,
2160                 block_device_mapping=block_device_mapping,
2161                 base_disk_index=base_disk_index,
2162                 disk=disk,
2163                 created_items=created_items,
2164             )
2165
2166 1             return boot_volume_id
2167
2168 1     @staticmethod
2169 1     def update_block_device_mapping(
2170         volume: object,
2171         block_device_mapping: dict,
2172         base_disk_index: int,
2173         disk: dict,
2174         created_items: dict,
2175     ) -> None:
2176         """Add volume information to block device mapping dict.
2177         Args:
2178             volume  (object):                   Created volume object
2179             block_device_mapping    (dict):     Block device details
2180             base_disk_index (int):              Disk index
2181             disk    (dict):                     Disk details
2182             created_items   (dict):             All created items belongs to VM
2183         """
2184 1         if not volume:
2185 1             raise vimconn.VimConnException("Volume is empty.")
2186
2187 1         if not hasattr(volume, "id"):
2188 1             raise vimconn.VimConnException(
2189                 "Created volume is not valid, does not have id attribute."
2190             )
2191
2192 1         block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2193 1         if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
2194 0             return
2195 1         volume_txt = "volume:" + str(volume.id)
2196 1         if disk.get("keep"):
2197 1             volume_txt += ":keep"
2198 1         created_items[volume_txt] = True
2199
2200 1     @catch_any_exception
2201 1     def new_shared_volumes(self, shared_volume_data) -> (str, str):
2202 1         availability_zone = (
2203             self.storage_availability_zone
2204             if self.storage_availability_zone
2205             else self.vm_av_zone
2206         )
2207 1         volume = self.cinder.volumes.create(
2208             size=shared_volume_data["size"],
2209             name=shared_volume_data["name"],
2210             volume_type="multiattach",
2211             availability_zone=availability_zone,
2212         )
2213 1         return volume.name, volume.id
2214
2215 1     def _prepare_shared_volumes(
2216         self,
2217         name: str,
2218         disk: dict,
2219         base_disk_index: int,
2220         block_device_mapping: dict,
2221         existing_vim_volumes: list,
2222         created_items: dict,
2223     ):
2224 1         volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2225 1         if volumes.get(disk["name"]):
2226 1             sv_id = volumes[disk["name"]]
2227 1             max_retries = 3
2228 1             vol_status = ""
2229             # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2230 1             while max_retries:
2231 1                 max_retries -= 1
2232 1                 volume = self.cinder.volumes.get(sv_id)
2233 1                 vol_status = volume.status
2234 1                 if volume.status not in ("in-use", "available"):
2235 0                     time.sleep(5)
2236 0                     continue
2237 1                 self.update_block_device_mapping(
2238                     volume=volume,
2239                     block_device_mapping=block_device_mapping,
2240                     base_disk_index=base_disk_index,
2241                     disk=disk,
2242                     created_items=created_items,
2243                 )
2244 1                 return
2245 0             raise vimconn.VimConnException(
2246                 "Shared volume is not prepared, status is: {}".format(vol_status),
2247                 http_code=vimconn.HTTP_Internal_Server_Error,
2248             )
2249
2250 1     def _prepare_non_root_persistent_volumes(
2251         self,
2252         name: str,
2253         disk: dict,
2254         storage_av_zone: list,
2255         block_device_mapping: dict,
2256         base_disk_index: int,
2257         existing_vim_volumes: list,
2258         created_items: dict,
2259     ) -> None:
2260         """Prepare persistent volumes for new VM instance.
2261
2262         Args:
2263             name    (str):                      Name of VM instance
2264             disk    (dict):                     Disk details
2265             storage_av_zone  (list):            Storage of availability zones
2266             block_device_mapping    (dict):     Block device details
2267             base_disk_index (int):              Disk index
2268             existing_vim_volumes    (list):     Existing disk details
2269             created_items   (dict):             All created items belongs to VM
2270         """
2271         # Non-root persistent volumes
2272         # Disk may include only vim_volume_id or only vim_id."
2273 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2274 1         if disk.get(key_id):
2275             # Use existing persistent volume
2276 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2277 1             existing_vim_volumes.append({"id": disk[key_id]})
2278         else:
2279 1             volume_name = f"{name}vd{chr(base_disk_index)}"
2280 1             volume = self.cinder.volumes.create(
2281                 size=disk["size"],
2282                 name=volume_name,
2283                 # Make sure volume is in the same AZ as the VM to be attached to
2284                 availability_zone=storage_av_zone,
2285             )
2286 1             self.update_block_device_mapping(
2287                 volume=volume,
2288                 block_device_mapping=block_device_mapping,
2289                 base_disk_index=base_disk_index,
2290                 disk=disk,
2291                 created_items=created_items,
2292             )
2293
2294 1     def _wait_for_created_volumes_availability(
2295         self, elapsed_time: int, created_items: dict
2296     ) -> Optional[int]:
2297         """Wait till created volumes become available.
2298
2299         Args:
2300             elapsed_time    (int):          Passed time while waiting
2301             created_items   (dict):         All created items belongs to VM
2302
2303         Returns:
2304             elapsed_time    (int):          Time spent while waiting
2305
2306         """
2307 1         while elapsed_time < volume_timeout:
2308 1             for created_item in created_items:
2309 1                 v, volume_id = (
2310                     created_item.split(":")[0],
2311                     created_item.split(":")[1],
2312                 )
2313 1                 if v == "volume":
2314 1                     volume = self.cinder.volumes.get(volume_id)
2315 1                     if (
2316                         volume.volume_type == "multiattach"
2317                         and volume.status == "in-use"
2318                     ):
2319 0                         return elapsed_time
2320 1                     elif volume.status != "available":
2321 1                         break
2322             else:
2323                 # All ready: break from while
2324 1                 break
2325
2326 1             time.sleep(5)
2327 1             elapsed_time += 5
2328
2329 1         return elapsed_time
2330
2331 1     def _wait_for_existing_volumes_availability(
2332         self, elapsed_time: int, existing_vim_volumes: list
2333     ) -> Optional[int]:
2334         """Wait till existing volumes become available.
2335
2336         Args:
2337             elapsed_time    (int):          Passed time while waiting
2338             existing_vim_volumes   (list):  Existing volume details
2339
2340         Returns:
2341             elapsed_time    (int):          Time spent while waiting
2342
2343         """
2344
2345 1         while elapsed_time < volume_timeout:
2346 1             for volume in existing_vim_volumes:
2347 1                 v = self.cinder.volumes.get(volume["id"])
2348 1                 if v.volume_type == "multiattach" and v.status == "in-use":
2349 0                     return elapsed_time
2350 1                 elif v.status != "available":
2351 1                     break
2352             else:  # all ready: break from while
2353 1                 break
2354
2355 1             time.sleep(5)
2356 1             elapsed_time += 5
2357
2358 1         return elapsed_time
2359
2360 1     def _prepare_disk_for_vminstance(
2361         self,
2362         name: str,
2363         existing_vim_volumes: list,
2364         created_items: dict,
2365         storage_av_zone: list,
2366         block_device_mapping: dict,
2367         disk_list: list = None,
2368     ) -> None:
2369         """Prepare all volumes for new VM instance.
2370
2371         Args:
2372             name    (str):                      Name of Instance
2373             existing_vim_volumes    (list):     List of existing volumes
2374             created_items   (dict):             All created items belongs to VM
2375             storage_av_zone  (list):            Storage availability zone
2376             block_device_mapping (dict):        Block devices to be attached to VM
2377             disk_list   (list):                 List of disks
2378
2379         """
2380         # Create additional volumes in case these are present in disk_list
2381 1         base_disk_index = ord("b")
2382 1         boot_volume_id = None
2383 1         elapsed_time = 0
2384 1         for disk in disk_list:
2385 1             if "image_id" in disk:
2386                 # Root persistent volume
2387 1                 base_disk_index = ord("a")
2388 1                 boot_volume_id = self._prepare_persistent_root_volumes(
2389                     name=name,
2390                     storage_av_zone=storage_av_zone,
2391                     disk=disk,
2392                     base_disk_index=base_disk_index,
2393                     block_device_mapping=block_device_mapping,
2394                     existing_vim_volumes=existing_vim_volumes,
2395                     created_items=created_items,
2396                 )
2397 1             elif disk.get("multiattach"):
2398 0                 self._prepare_shared_volumes(
2399                     name=name,
2400                     disk=disk,
2401                     base_disk_index=base_disk_index,
2402                     block_device_mapping=block_device_mapping,
2403                     existing_vim_volumes=existing_vim_volumes,
2404                     created_items=created_items,
2405                 )
2406             else:
2407                 # Non-root persistent volume
2408 1                 self._prepare_non_root_persistent_volumes(
2409                     name=name,
2410                     disk=disk,
2411                     storage_av_zone=storage_av_zone,
2412                     block_device_mapping=block_device_mapping,
2413                     base_disk_index=base_disk_index,
2414                     existing_vim_volumes=existing_vim_volumes,
2415                     created_items=created_items,
2416                 )
2417 1             base_disk_index += 1
2418
2419         # Wait until created volumes are with status available
2420 1         elapsed_time = self._wait_for_created_volumes_availability(
2421             elapsed_time, created_items
2422         )
2423         # Wait until existing volumes in vim are with status available
2424 1         elapsed_time = self._wait_for_existing_volumes_availability(
2425             elapsed_time, existing_vim_volumes
2426         )
2427         # If we exceeded the timeout rollback
2428 1         if elapsed_time >= volume_timeout:
2429 1             raise vimconn.VimConnException(
2430                 "Timeout creating volumes for instance " + name,
2431                 http_code=vimconn.HTTP_Request_Timeout,
2432             )
2433 1         if boot_volume_id:
2434 1             self.cinder.volumes.set_bootable(boot_volume_id, True)
2435
2436 1     def _find_the_external_network_for_floating_ip(self):
2437         """Get the external network ip in order to create floating IP.
2438
2439         Returns:
2440             pool_id (str):      External network pool ID
2441
2442         """
2443
2444         # Find the external network
2445 1         external_nets = list()
2446
2447 1         for net in self.neutron.list_networks()["networks"]:
2448 1             if net["router:external"]:
2449 1                 external_nets.append(net)
2450
2451 1         if len(external_nets) == 0:
2452 1             raise vimconn.VimConnException(
2453                 "Cannot create floating_ip automatically since "
2454                 "no external network is present",
2455                 http_code=vimconn.HTTP_Conflict,
2456             )
2457
2458 1         if len(external_nets) > 1:
2459 1             raise vimconn.VimConnException(
2460                 "Cannot create floating_ip automatically since "
2461                 "multiple external networks are present",
2462                 http_code=vimconn.HTTP_Conflict,
2463             )
2464
2465         # Pool ID
2466 1         return external_nets[0].get("id")
2467
2468 1     def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2469         """Trigger neutron to create a new floating IP using external network ID.
2470
2471         Args:
2472             param   (dict):             Input parameters to create a floating IP
2473             created_items   (dict):     All created items belongs to new VM instance
2474
2475         Raises:
2476
2477             VimConnException
2478         """
2479 1         try:
2480 1             self.logger.debug("Creating floating IP")
2481 1             new_floating_ip = self.neutron.create_floatingip(param)
2482 1             free_floating_ip = new_floating_ip["floatingip"]["id"]
2483 1             created_items["floating_ip:" + str(free_floating_ip)] = True
2484
2485 1         except Exception as e:
2486 1             raise vimconn.VimConnException(
2487                 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2488                 http_code=vimconn.HTTP_Conflict,
2489             )
2490
2491 1     def _create_floating_ip(
2492         self, floating_network: dict, server: object, created_items: dict
2493     ) -> None:
2494         """Get the available Pool ID and create a new floating IP.
2495
2496         Args:
2497             floating_network    (dict):         Dict including external network ID
2498             server   (object):                  Server object
2499             created_items   (dict):             All created items belongs to new VM instance
2500
2501         """
2502
2503         # Pool_id is available
2504 1         if (
2505             isinstance(floating_network["floating_ip"], str)
2506             and floating_network["floating_ip"].lower() != "true"
2507         ):
2508 1             pool_id = floating_network["floating_ip"]
2509
2510         # Find the Pool_id
2511         else:
2512 1             pool_id = self._find_the_external_network_for_floating_ip()
2513
2514 1         param = {
2515             "floatingip": {
2516                 "floating_network_id": pool_id,
2517                 "tenant_id": server.tenant_id,
2518             }
2519         }
2520
2521 1         self._neutron_create_float_ip(param, created_items)
2522
2523 1     def _find_floating_ip(
2524         self,
2525         server: object,
2526         floating_ips: list,
2527         floating_network: dict,
2528     ) -> Optional[str]:
2529         """Find the available free floating IPs if there are.
2530
2531         Args:
2532             server  (object):                   Server object
2533             floating_ips    (list):             List of floating IPs
2534             floating_network    (dict):         Details of floating network such as ID
2535
2536         Returns:
2537             free_floating_ip    (str):          Free floating ip address
2538
2539         """
2540 1         for fip in floating_ips:
2541 1             if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2542 1                 continue
2543
2544 1             if isinstance(floating_network["floating_ip"], str):
2545 1                 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2546 0                     continue
2547
2548 1             return fip["id"]
2549
2550 1     def _assign_floating_ip(
2551         self, free_floating_ip: str, floating_network: dict
2552     ) -> Dict:
2553         """Assign the free floating ip address to port.
2554
2555         Args:
2556             free_floating_ip    (str):          Floating IP to be assigned
2557             floating_network    (dict):         ID of floating network
2558
2559         Returns:
2560             fip (dict)          (dict):         Floating ip details
2561
2562         """
2563         # The vim_id key contains the neutron.port_id
2564 1         self.neutron.update_floatingip(
2565             free_floating_ip,
2566             {"floatingip": {"port_id": floating_network["vim_id"]}},
2567         )
2568         # For race condition ensure not re-assigned to other VM after 5 seconds
2569 1         time.sleep(5)
2570
2571 1         return self.neutron.show_floatingip(free_floating_ip)
2572
2573 1     def _get_free_floating_ip(
2574         self, server: object, floating_network: dict
2575     ) -> Optional[str]:
2576         """Get the free floating IP address.
2577
2578         Args:
2579             server  (object):               Server Object
2580             floating_network    (dict):     Floating network details
2581
2582         Returns:
2583             free_floating_ip    (str):      Free floating ip addr
2584
2585         """
2586
2587 1         floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2588
2589         # Randomize
2590 1         random.shuffle(floating_ips)
2591
2592 1         return self._find_floating_ip(server, floating_ips, floating_network)
2593
2594 1     def _prepare_external_network_for_vminstance(
2595         self,
2596         external_network: list,
2597         server: object,
2598         created_items: dict,
2599         vm_start_time: float,
2600     ) -> None:
2601         """Assign floating IP address for VM instance.
2602
2603         Args:
2604             external_network    (list):         ID of External network
2605             server  (object):                   Server Object
2606             created_items   (dict):             All created items belongs to new VM instance
2607             vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
2608
2609         Raises:
2610             VimConnException
2611
2612         """
2613 1         for floating_network in external_network:
2614 1             try:
2615 1                 assigned = False
2616 1                 floating_ip_retries = 3
2617                 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2618                 # several times
2619 1                 while not assigned:
2620 1                     free_floating_ip = self._get_free_floating_ip(
2621                         server, floating_network
2622                     )
2623
2624 1                     if not free_floating_ip:
2625 1                         self._create_floating_ip(
2626                             floating_network, server, created_items
2627                         )
2628
2629 1                     try:
2630                         # For race condition ensure not already assigned
2631 1                         fip = self.neutron.show_floatingip(free_floating_ip)
2632
2633 1                         if fip["floatingip"].get("port_id"):
2634 1                             continue
2635
2636                         # Assign floating ip
2637 1                         fip = self._assign_floating_ip(
2638                             free_floating_ip, floating_network
2639                         )
2640
2641 1                         if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2642 1                             self.logger.warning(
2643                                 "floating_ip {} re-assigned to other port".format(
2644                                     free_floating_ip
2645                                 )
2646                             )
2647 1                             continue
2648
2649 1                         self.logger.debug(
2650                             "Assigned floating_ip {} to VM {}".format(
2651                                 free_floating_ip, server.id
2652                             )
2653                         )
2654
2655 1                         assigned = True
2656
2657 1                     except Exception as e:
2658                         # Openstack need some time after VM creation to assign an IP. So retry if fails
2659 1                         vm_status = self.nova.servers.get(server.id).status
2660
2661 1                         if vm_status not in ("ACTIVE", "ERROR"):
2662 1                             if time.time() - vm_start_time < server_timeout:
2663 1                                 time.sleep(5)
2664 1                                 continue
2665 1                         elif floating_ip_retries > 0:
2666 1                             floating_ip_retries -= 1
2667 1                             continue
2668
2669 1                         raise vimconn.VimConnException(
2670                             "Cannot create floating_ip: {} {}".format(
2671                                 type(e).__name__, e
2672                             ),
2673                             http_code=vimconn.HTTP_Conflict,
2674                         )
2675
2676 1             except Exception as e:
2677 1                 if not floating_network["exit_on_floating_ip_error"]:
2678 1                     self.logger.error("Cannot create floating_ip. %s", str(e))
2679 1                     continue
2680
2681 1                 raise
2682
2683 1     def _update_port_security_for_vminstance(
2684         self,
2685         no_secured_ports: list,
2686         server: object,
2687     ) -> None:
2688         """Updates the port security according to no_secured_ports list.
2689
2690         Args:
2691             no_secured_ports    (list):     List of ports that security will be disabled
2692             server  (object):               Server Object
2693
2694         Raises:
2695             VimConnException
2696
2697         """
2698         # Wait until the VM is active and then disable the port-security
2699 1         if no_secured_ports:
2700 1             self.__wait_for_vm(server.id, "ACTIVE")
2701
2702 1         for port in no_secured_ports:
2703 1             port_update = {
2704                 "port": {"port_security_enabled": False, "security_groups": None}
2705             }
2706
2707 1             if port[1] == "allow-address-pairs":
2708 1                 port_update = {
2709                     "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2710                 }
2711
2712 1             try:
2713 1                 self.neutron.update_port(port[0], port_update)
2714
2715 1             except Exception:
2716 1                 raise vimconn.VimConnException(
2717                     "It was not possible to disable port security for port {}".format(
2718                         port[0]
2719                     )
2720                 )
2721
2722 1     def new_vminstance(
2723         self,
2724         name: str,
2725         description: str,
2726         start: bool,
2727         image_id: str,
2728         flavor_id: str,
2729         affinity_group_list: list,
2730         net_list: list,
2731         cloud_config=None,
2732         disk_list=None,
2733         availability_zone_index=None,
2734         availability_zone_list=None,
2735     ) -> tuple:
2736         """Adds a VM instance to VIM.
2737
2738         Args:
2739             name    (str):          name of VM
2740             description (str):      description
2741             start   (bool):         indicates if VM must start or boot in pause mode. Ignored
2742             image_id    (str)       image uuid
2743             flavor_id   (str)       flavor uuid
2744             affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
2745             net_list    (list):         list of interfaces, each one is a dictionary with:
2746                 name:   name of network
2747                 net_id:     network uuid to connect
2748                 vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
2749                 model:  interface model, ignored #TODO
2750                 mac_address:    used for  SR-IOV ifaces #TODO for other types
2751                 use:    'data', 'bridge',  'mgmt'
2752                 type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2753                 vim_id:     filled/added by this function
2754                 floating_ip:    True/False (or it can be None)
2755                 port_security:  True/False
2756             cloud_config    (dict): (optional) dictionary with:
2757                 key-pairs:      (optional) list of strings with the public key to be inserted to the default user
2758                 users:      (optional) list of users to be inserted, each item is a dict with:
2759                     name:   (mandatory) user name,
2760                     key-pairs: (optional) list of strings with the public key to be inserted to the user
2761                 user-data:  (optional) string is a text script to be passed directly to cloud-init
2762                 config-files:   (optional). List of files to be transferred. Each item is a dict with:
2763                     dest:   (mandatory) string with the destination absolute path
2764                     encoding:   (optional, by default text). Can be one of:
2765                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2766                     content :    (mandatory) string with the content of the file
2767                     permissions:    (optional) string with file permissions, typically octal notation '0644'
2768                     owner:  (optional) file owner, string with the format 'owner:group'
2769                 boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
2770             disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
2771                 image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2772                 size:   (mandatory) string with the size of the disk in GB
2773                 vim_id:  (optional) should use this existing volume id
2774             availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
2775             availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
2776                 availability_zone_index is None
2777                 #TODO ip, security groups
2778
2779         Returns:
2780             A tuple with the instance identifier and created_items or raises an exception on error
2781             created_items can be None or a dictionary where this method can include key-values that will be passed to
2782             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2783             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2784             as not present.
2785
2786         """
2787 1         self.logger.debug(
2788             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2789             image_id,
2790             flavor_id,
2791             str(net_list),
2792         )
2793 1         server = None
2794 1         created_items = {}
2795 1         net_list_vim = []
2796         # list of external networks to be connected to instance, later on used to create floating_ip
2797 1         external_network = []
2798         # List of ports with port-security disabled
2799 1         no_secured_ports = []
2800 1         block_device_mapping = {}
2801 1         existing_vim_volumes = []
2802 1         server_group_id = None
2803 1         scheduller_hints = {}
2804
2805 1         try:
2806             # Check the Openstack Connection
2807 1             self._reload_connection()
2808
2809             # Prepare network list
2810 1             self._prepare_network_for_vminstance(
2811                 name=name,
2812                 net_list=net_list,
2813                 created_items=created_items,
2814                 net_list_vim=net_list_vim,
2815                 external_network=external_network,
2816                 no_secured_ports=no_secured_ports,
2817             )
2818
2819             # Cloud config
2820 1             config_drive, userdata = self._create_user_data(cloud_config)
2821
2822             # Get availability Zone
2823 1             self.vm_av_zone = self._get_vm_availability_zone(
2824                 availability_zone_index, availability_zone_list
2825             )
2826
2827 1             storage_av_zone = (
2828                 self.storage_availability_zone
2829                 if self.storage_availability_zone
2830                 else self.vm_av_zone
2831             )
2832
2833 1             if disk_list:
2834                 # Prepare disks
2835 1                 self._prepare_disk_for_vminstance(
2836                     name=name,
2837                     existing_vim_volumes=existing_vim_volumes,
2838                     created_items=created_items,
2839                     storage_av_zone=storage_av_zone,
2840                     block_device_mapping=block_device_mapping,
2841                     disk_list=disk_list,
2842                 )
2843
2844 1             if affinity_group_list:
2845                 # Only first id on the list will be used. Openstack restriction
2846 1                 server_group_id = affinity_group_list[0]["affinity_group_id"]
2847 1                 scheduller_hints["group"] = server_group_id
2848
2849 1             self.logger.debug(
2850                 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2851                 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2852                 "block_device_mapping={}, server_group={})".format(
2853                     name,
2854                     image_id,
2855                     flavor_id,
2856                     net_list_vim,
2857                     self.config.get("security_groups"),
2858                     self.vm_av_zone,
2859                     self.config.get("keypair"),
2860                     userdata,
2861                     config_drive,
2862                     block_device_mapping,
2863                     server_group_id,
2864                 )
2865             )
2866             # Create VM
2867 1             server = self.nova.servers.create(
2868                 name=name,
2869                 image=image_id,
2870                 flavor=flavor_id,
2871                 nics=net_list_vim,
2872                 security_groups=self.config.get("security_groups"),
2873                 # TODO remove security_groups in future versions. Already at neutron port
2874                 availability_zone=self.vm_av_zone,
2875                 key_name=self.config.get("keypair"),
2876                 userdata=userdata,
2877                 config_drive=config_drive,
2878                 block_device_mapping=block_device_mapping,
2879                 scheduler_hints=scheduller_hints,
2880             )
2881
2882 1             vm_start_time = time.time()
2883
2884 1             self._update_port_security_for_vminstance(no_secured_ports, server)
2885
2886 1             self._prepare_external_network_for_vminstance(
2887                 external_network=external_network,
2888                 server=server,
2889                 created_items=created_items,
2890                 vm_start_time=vm_start_time,
2891             )
2892
2893 1             return server.id, created_items
2894
2895 1         except Exception as e:
2896 1             server_id = None
2897 1             if server:
2898 1                 server_id = server.id
2899
2900 1             try:
2901 1                 created_items = self.remove_keep_tag_from_persistent_volumes(
2902                     created_items
2903                 )
2904
2905 1                 self.delete_vminstance(server_id, created_items)
2906
2907 0             except Exception as e2:
2908 0                 self.logger.error("new_vminstance rollback fail {}".format(e2))
2909
2910 1             self._format_exception(e)
2911
2912 1     @staticmethod
2913 1     def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2914         """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2915
2916         Args:
2917             created_items (dict):       All created items belongs to VM
2918
2919         Returns:
2920             updated_created_items   (dict):     Dict which does not include keep flag for volumes.
2921
2922         """
2923 1         return {
2924             key.replace(":keep", ""): value for (key, value) in created_items.items()
2925         }
2926
2927 1     def get_vminstance(self, vm_id):
2928         """Returns the VM instance information from VIM"""
2929 0         return self._find_nova_server(vm_id)
2930
2931 1     @catch_any_exception
2932 1     def get_vminstance_console(self, vm_id, console_type="vnc"):
2933         """
2934         Get a console for the virtual machine
2935         Params:
2936             vm_id: uuid of the VM
2937             console_type, can be:
2938                 "novnc" (by default), "xvpvnc" for VNC types,
2939                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2940         Returns dict with the console parameters:
2941                 protocol: ssh, ftp, http, https, ...
2942                 server:   usually ip address
2943                 port:     the http, ssh, ... port
2944                 suffix:   extra text, e.g. the http path and query string
2945         """
2946 0         self.logger.debug("Getting VM CONSOLE from VIM")
2947 0         self._reload_connection()
2948 0         server = self.nova.servers.find(id=vm_id)
2949
2950 0         if console_type is None or console_type == "novnc":
2951 0             console_dict = server.get_vnc_console("novnc")
2952 0         elif console_type == "xvpvnc":
2953 0             console_dict = server.get_vnc_console(console_type)
2954 0         elif console_type == "rdp-html5":
2955 0             console_dict = server.get_rdp_console(console_type)
2956 0         elif console_type == "spice-html5":
2957 0             console_dict = server.get_spice_console(console_type)
2958         else:
2959 0             raise vimconn.VimConnException(
2960                 "console type '{}' not allowed".format(console_type),
2961                 http_code=vimconn.HTTP_Bad_Request,
2962             )
2963
2964 0         console_dict1 = console_dict.get("console")
2965
2966 0         if console_dict1:
2967 0             console_url = console_dict1.get("url")
2968
2969 0             if console_url:
2970                 # parse console_url
2971 0                 protocol_index = console_url.find("//")
2972 0                 suffix_index = (
2973                     console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2974                 )
2975 0                 port_index = (
2976                     console_url[protocol_index + 2 : suffix_index].find(":")
2977                     + protocol_index
2978                     + 2
2979                 )
2980
2981 0                 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2982 0                     return (
2983                         -vimconn.HTTP_Internal_Server_Error,
2984                         "Unexpected response from VIM",
2985                     )
2986
2987 0                 console_dict = {
2988                     "protocol": console_url[0:protocol_index],
2989                     "server": console_url[protocol_index + 2 : port_index],
2990                     "port": console_url[port_index:suffix_index],
2991                     "suffix": console_url[suffix_index + 1 :],
2992                 }
2993 0                 protocol_index += 2
2994
2995 0                 return console_dict
2996 0         raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2997
2998 1     def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2999         """Neutron delete ports by id.
3000         Args:
3001             k_id    (str):      Port id in the VIM
3002         """
3003 1         try:
3004 1             self.neutron.delete_port(k_id)
3005
3006 1         except (neExceptions.ConnectionFailed, ConnectionError) as e:
3007 0             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
3008             # If there is connection error, raise.
3009 0             self._format_exception(e)
3010 1         except Exception as e:
3011 1             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
3012
3013 1     def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
3014         """Cinder delete volume by id.
3015         Args:
3016             shared_volume_vim_id    (str):                  ID of shared volume in VIM
3017         """
3018 1         elapsed_time = 0
3019 1         try:
3020 1             while elapsed_time < server_timeout:
3021 1                 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3022 1                 if vol_status == "available":
3023 1                     self.cinder.volumes.delete(shared_volume_vim_id)
3024 1                     return True
3025
3026 0                 time.sleep(5)
3027 0                 elapsed_time += 5
3028
3029 0             if elapsed_time >= server_timeout:
3030 0                 raise vimconn.VimConnException(
3031                     "Timeout waiting for volume "
3032                     + shared_volume_vim_id
3033                     + " to be available",
3034                     http_code=vimconn.HTTP_Request_Timeout,
3035                 )
3036
3037 0         except Exception as e:
3038 0             self.logger.error(
3039                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3040             )
3041 0             self._format_exception(e)
3042
3043 1     def _delete_volumes_by_id_wth_cinder(
3044         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3045     ) -> bool:
3046         """Cinder delete volume by id.
3047         Args:
3048             k   (str):                      Full item name in created_items
3049             k_id    (str):                  ID of floating ip in VIM
3050             volumes_to_hold (list):          Volumes not to delete
3051             created_items   (dict):         All created items belongs to VM
3052         """
3053 1         try:
3054 1             if k_id in volumes_to_hold:
3055 1                 return False
3056
3057 1             if self.cinder.volumes.get(k_id).status != "available":
3058 1                 return True
3059
3060             else:
3061 1                 self.cinder.volumes.delete(k_id)
3062 1                 created_items[k] = None
3063
3064 1         except (cExceptions.ConnectionError, ConnectionError) as e:
3065 1             self.logger.error(
3066                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3067             )
3068 1             self._format_exception(e)
3069 1         except Exception as e:
3070 1             self.logger.error(
3071                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3072             )
3073
3074 1     def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3075         """Neutron delete floating ip by id.
3076         Args:
3077             k   (str):                      Full item name in created_items
3078             k_id    (str):                  ID of floating ip in VIM
3079             created_items   (dict):         All created items belongs to VM
3080         """
3081 1         try:
3082 1             self.neutron.delete_floatingip(k_id)
3083 1             created_items[k] = None
3084
3085 1         except (neExceptions.ConnectionFailed, ConnectionError) as e:
3086 1             self.logger.error(
3087                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3088             )
3089 1             self._format_exception(e)
3090 1         except Exception as e:
3091 1             self.logger.error(
3092                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3093             )
3094
3095 1     @staticmethod
3096 1     def _get_item_name_id(k: str) -> Tuple[str, str]:
3097 1         k_item, _, k_id = k.partition(":")
3098 1         return k_item, k_id
3099
3100 1     def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3101         """Delete VM ports attached to the networks before deleting virtual machine.
3102         Args:
3103             created_items   (dict):     All created items belongs to VM
3104         """
3105
3106 1         for k, v in created_items.items():
3107 1             if not v:  # skip already deleted
3108 1                 continue
3109
3110 1             try:
3111 1                 k_item, k_id = self._get_item_name_id(k)
3112 1                 if k_item == "port":
3113 1                     self._delete_ports_by_id_wth_neutron(k_id)
3114
3115 1             except (neExceptions.ConnectionFailed, ConnectionError) as e:
3116 0                 self.logger.error(
3117                     "Error deleting port: {}: {}".format(type(e).__name__, e)
3118                 )
3119 0                 self._format_exception(e)
3120 1             except Exception as e:
3121 1                 self.logger.error(
3122                     "Error deleting port: {}: {}".format(type(e).__name__, e)
3123                 )
3124
3125 1     def _delete_created_items(
3126         self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3127     ) -> bool:
3128         """Delete Volumes and floating ip if they exist in created_items."""
3129 1         for k, v in created_items.items():
3130 1             if not v:  # skip already deleted
3131 1                 continue
3132
3133 1             try:
3134 1                 k_item, k_id = self._get_item_name_id(k)
3135 1                 if k_item == "volume":
3136 1                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3137                         k, k_id, volumes_to_hold, created_items
3138                     )
3139
3140 1                     if unavailable_vol:
3141 1                         keep_waiting = True
3142
3143 1                 elif k_item == "floating_ip":
3144 1                     self._delete_floating_ip_by_id(k, k_id, created_items)
3145
3146 1             except (
3147                 cExceptions.ConnectionError,
3148                 neExceptions.ConnectionFailed,
3149                 ConnectionError,
3150                 AttributeError,
3151                 TypeError,
3152             ) as e:
3153 1                 self.logger.error("Error deleting {}: {}".format(k, e))
3154 1                 self._format_exception(e)
3155
3156 0             except Exception as e:
3157 0                 self.logger.error("Error deleting {}: {}".format(k, e))
3158
3159 1         return keep_waiting
3160
3161 1     @staticmethod
3162 1     def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3163         """Remove the volumes which has key flag from created_items
3164
3165         Args:
3166             created_items   (dict):         All created items belongs to VM
3167
3168         Returns:
3169             created_items   (dict):         Persistent volumes eliminated created_items
3170         """
3171 1         return {
3172             key: value
3173             for (key, value) in created_items.items()
3174             if len(key.split(":")) == 2
3175         }
3176
3177 1     @catch_any_exception
3178 1     def delete_vminstance(
3179         self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3180     ) -> None:
3181         """Removes a VM instance from VIM. Returns the old identifier.
3182         Args:
3183             vm_id   (str):              Identifier of VM instance
3184             created_items   (dict):     All created items belongs to VM
3185             volumes_to_hold (list):     Volumes_to_hold
3186         """
3187 1         if created_items is None:
3188 1             created_items = {}
3189 1         if volumes_to_hold is None:
3190 1             volumes_to_hold = []
3191
3192 1         try:
3193 1             created_items = self._extract_items_wth_keep_flag_from_created_items(
3194                 created_items
3195             )
3196
3197 1             self._reload_connection()
3198
3199             # Delete VM ports attached to the networks before the virtual machine
3200 1             if created_items:
3201 1                 self._delete_vm_ports_attached_to_network(created_items)
3202
3203 1             if vm_id:
3204 1                 self.nova.servers.delete(vm_id)
3205
3206             # Although having detached, volumes should have in active status before deleting.
3207             # We ensure in this loop
3208 1             keep_waiting = True
3209 1             elapsed_time = 0
3210
3211 1             while keep_waiting and elapsed_time < volume_timeout:
3212 1                 keep_waiting = False
3213
3214                 # Delete volumes and floating IP.
3215 1                 keep_waiting = self._delete_created_items(
3216                     created_items, volumes_to_hold, keep_waiting
3217                 )
3218
3219 1                 if keep_waiting:
3220 1                     time.sleep(1)
3221 1                     elapsed_time += 1
3222 1         except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
3223             # If VM does not exist, it does not raise
3224 0             self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
3225
3226 1     def refresh_vms_status(self, vm_list):
3227         """Get the status of the virtual machines and their interfaces/ports
3228         Params: the list of VM identifiers
3229         Returns a dictionary with:
3230             vm_id:          #VIM id of this Virtual Machine
3231                 status:     #Mandatory. Text with one of:
3232                             #  DELETED (not found at vim)
3233                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3234                             #  OTHER (Vim reported other status not understood)
3235                             #  ERROR (VIM indicates an ERROR status)
3236                             #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3237                             #  CREATING (on building process), ERROR
3238                             #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3239                             #
3240                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3241                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
3242                 interfaces:
3243                  -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
3244                     mac_address:      #Text format XX:XX:XX:XX:XX:XX
3245                     vim_net_id:       #network id where this interface is connected
3246                     vim_interface_id: #interface/port VIM id
3247                     ip_address:       #null, or text with IPv4, IPv6 address
3248                     compute_node:     #identification of compute node where PF,VF interface is allocated
3249                     pci:              #PCI address of the NIC that hosts the PF,VF
3250                     vlan:             #physical VLAN used for VF
3251         """
3252 0         vm_dict = {}
3253 0         self.logger.debug(
3254             "refresh_vms status: Getting tenant VM instance information from VIM"
3255         )
3256 0         for vm_id in vm_list:
3257 0             vm = {}
3258
3259 0             try:
3260 0                 vm_vim = self.get_vminstance(vm_id)
3261
3262 0                 if vm_vim["status"] in vmStatus2manoFormat:
3263 0                     vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3264                 else:
3265 0                     vm["status"] = "OTHER"
3266 0                     vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3267
3268 0                 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3269 0                 vm_vim.pop("user_data", None)
3270 0                 vm["vim_info"] = self.serialize(vm_vim)
3271
3272 0                 vm["interfaces"] = []
3273 0                 if vm_vim.get("fault"):
3274 0                     vm["error_msg"] = str(vm_vim["fault"])
3275
3276                 # get interfaces
3277 0                 try:
3278 0                     self._reload_connection()
3279 0                     port_dict = self.neutron.list_ports(device_id=vm_id)
3280
3281 0                     for port in port_dict["ports"]:
3282 0                         interface = {}
3283 0                         interface["vim_info"] = self.serialize(port)
3284 0                         interface["mac_address"] = port.get("mac_address")
3285 0                         interface["vim_net_id"] = port["network_id"]
3286 0                         interface["vim_interface_id"] = port["id"]
3287                         # check if OS-EXT-SRV-ATTR:host is there,
3288                         # in case of non-admin credentials, it will be missing
3289
3290 0                         if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3291 0                             interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3292
3293 0                         interface["pci"] = None
3294
3295                         # check if binding:profile is there,
3296                         # in case of non-admin credentials, it will be missing
3297 0                         if port.get("binding:profile"):
3298 0                             if port["binding:profile"].get("pci_slot"):
3299                                 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3300                                 #  the slot to 0x00
3301                                 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3302                                 #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
3303 0                                 pci = port["binding:profile"]["pci_slot"]
3304                                 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3305 0                                 interface["pci"] = pci
3306
3307 0                         interface["vlan"] = None
3308
3309 0                         if port.get("binding:vif_details"):
3310 0                             interface["vlan"] = port["binding:vif_details"].get("vlan")
3311
3312                         # Get vlan from network in case not present in port for those old openstacks and cases where
3313                         # it is needed vlan at PT
3314 0                         if not interface["vlan"]:
3315                             # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3316 0                             network = self.neutron.show_network(port["network_id"])
3317
3318 0                             if (
3319                                 network["network"].get("provider:network_type")
3320                                 == "vlan"
3321                             ):
3322                                 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3323 0                                 interface["vlan"] = network["network"].get(
3324                                     "provider:segmentation_id"
3325                                 )
3326
3327 0                         ips = []
3328                         # look for floating ip address
3329 0                         try:
3330 0                             floating_ip_dict = self.neutron.list_floatingips(
3331                                 port_id=port["id"]
3332                             )
3333
3334 0                             if floating_ip_dict.get("floatingips"):
3335 0                                 ips.append(
3336                                     floating_ip_dict["floatingips"][0].get(
3337                                         "floating_ip_address"
3338                                     )
3339                                 )
3340 0                         except Exception:
3341 0                             pass
3342
3343 0                         for subnet in port["fixed_ips"]:
3344 0                             ips.append(subnet["ip_address"])
3345
3346 0                         interface["ip_address"] = ";".join(ips)
3347 0                         vm["interfaces"].append(interface)
3348 0                 except Exception as e:
3349 0                     self.logger.error(
3350                         "Error getting vm interface information {}: {}".format(
3351                             type(e).__name__, e
3352                         ),
3353                         exc_info=True,
3354                     )
3355 0             except vimconn.VimConnNotFoundException as e:
3356 0                 self.logger.error("Exception getting vm status: %s", str(e))
3357 0                 vm["status"] = "DELETED"
3358 0                 vm["error_msg"] = str(e)
3359 0             except vimconn.VimConnException as e:
3360 0                 self.logger.error("Exception getting vm status: %s", str(e))
3361 0                 vm["status"] = "VIM_ERROR"
3362 0                 vm["error_msg"] = str(e)
3363
3364 0             vm_dict[vm_id] = vm
3365
3366 0         return vm_dict
3367
3368 1     @catch_any_exception
3369 1     def action_vminstance(self, vm_id, action_dict, created_items={}):
3370         """Send and action over a VM instance from VIM
3371         Returns None or the console dict if the action was successfully sent to the VIM
3372         """
3373 0         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3374 0         self._reload_connection()
3375 0         server = self.nova.servers.find(id=vm_id)
3376 0         if "start" in action_dict:
3377 0             if action_dict["start"] == "rebuild":
3378 0                 server.rebuild()
3379             else:
3380 0                 if server.status == "PAUSED":
3381 0                     server.unpause()
3382 0                 elif server.status == "SUSPENDED":
3383 0                     server.resume()
3384 0                 elif server.status == "SHUTOFF":
3385 0                     server.start()
3386                 else:
3387 0                     self.logger.debug(
3388                         "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3389                     )
3390 0                     raise vimconn.VimConnException(
3391                         "Cannot 'start' instance while it is in active state",
3392                         http_code=vimconn.HTTP_Bad_Request,
3393                     )
3394 0         elif "pause" in action_dict:
3395 0             server.pause()
3396 0         elif "resume" in action_dict:
3397 0             server.resume()
3398 0         elif "shutoff" in action_dict or "shutdown" in action_dict:
3399 0             self.logger.debug("server status %s", server.status)
3400 0             if server.status == "ACTIVE":
3401 0                 server.stop()
3402             else:
3403 0                 self.logger.debug("ERROR: VM is not in Active state")
3404 0                 raise vimconn.VimConnException(
3405                     "VM is not in active state, stop operation is not allowed",
3406                     http_code=vimconn.HTTP_Bad_Request,
3407                 )
3408 0         elif "forceOff" in action_dict:
3409 0             server.stop()  # TODO
3410 0         elif "terminate" in action_dict:
3411 0             server.delete()
3412 0         elif "createImage" in action_dict:
3413 0             server.create_image()
3414             # "path":path_schema,
3415             # "description":description_schema,
3416             # "name":name_schema,
3417             # "metadata":metadata_schema,
3418             # "imageRef": id_schema,
3419             # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3420 0         elif "rebuild" in action_dict:
3421 0             server.rebuild(server.image["id"])
3422 0         elif "reboot" in action_dict:
3423 0             server.reboot()  # reboot_type="SOFT"
3424 0         elif "console" in action_dict:
3425 0             console_type = action_dict["console"]
3426
3427 0             if console_type is None or console_type == "novnc":
3428 0                 console_dict = server.get_vnc_console("novnc")
3429 0             elif console_type == "xvpvnc":
3430 0                 console_dict = server.get_vnc_console(console_type)
3431 0             elif console_type == "rdp-html5":
3432 0                 console_dict = server.get_rdp_console(console_type)
3433 0             elif console_type == "spice-html5":
3434 0                 console_dict = server.get_spice_console(console_type)
3435             else:
3436 0                 raise vimconn.VimConnException(
3437                     "console type '{}' not allowed".format(console_type),
3438                     http_code=vimconn.HTTP_Bad_Request,
3439                 )
3440
3441 0             try:
3442 0                 console_url = console_dict["console"]["url"]
3443                 # parse console_url
3444 0                 protocol_index = console_url.find("//")
3445 0                 suffix_index = (
3446                     console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3447                 )
3448 0                 port_index = (
3449                     console_url[protocol_index + 2 : suffix_index].find(":")
3450                     + protocol_index
3451                     + 2
3452                 )
3453
3454 0                 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3455 0                     raise vimconn.VimConnException(
3456                         "Unexpected response from VIM " + str(console_dict)
3457                     )
3458
3459 0                 console_dict2 = {
3460                     "protocol": console_url[0:protocol_index],
3461                     "server": console_url[protocol_index + 2 : port_index],
3462                     "port": int(console_url[port_index + 1 : suffix_index]),
3463                     "suffix": console_url[suffix_index + 1 :],
3464                 }
3465
3466 0                 return console_dict2
3467 0             except Exception:
3468 0                 raise vimconn.VimConnException(
3469                     "Unexpected response from VIM " + str(console_dict)
3470                 )
3471
3472 0         return None
3473
3474     # ###### VIO Specific Changes #########
3475 1     def _generate_vlanID(self):
3476         """
3477         Method to get unused vlanID
3478             Args:
3479                 None
3480             Returns:
3481                 vlanID
3482         """
3483         # Get used VLAN IDs
3484 0         usedVlanIDs = []
3485 0         networks = self.get_network_list()
3486
3487 0         for net in networks:
3488 0             if net.get("provider:segmentation_id"):
3489 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3490
3491 0         used_vlanIDs = set(usedVlanIDs)
3492
3493         # find unused VLAN ID
3494 0         for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3495 0             try:
3496 0                 start_vlanid, end_vlanid = map(
3497                     int, vlanID_range.replace(" ", "").split("-")
3498                 )
3499
3500 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3501 0                     if vlanID not in used_vlanIDs:
3502 0                         return vlanID
3503 0             except Exception as exp:
3504 0                 raise vimconn.VimConnException(
3505                     "Exception {} occurred while generating VLAN ID.".format(exp)
3506                 )
3507         else:
3508 0             raise vimconn.VimConnConflictException(
3509                 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3510                     self.config.get("dataplane_net_vlan_range")
3511                 )
3512             )
3513
3514 1     def _generate_multisegment_vlanID(self):
3515         """
3516         Method to get unused vlanID
3517         Args:
3518             None
3519         Returns:
3520             vlanID
3521         """
3522         # Get used VLAN IDs
3523 0         usedVlanIDs = []
3524 0         networks = self.get_network_list()
3525 0         for net in networks:
3526 0             if net.get("provider:network_type") == "vlan" and net.get(
3527                 "provider:segmentation_id"
3528             ):
3529 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3530 0             elif net.get("segments"):
3531 0                 for segment in net.get("segments"):
3532 0                     if segment.get("provider:network_type") == "vlan" and segment.get(
3533                         "provider:segmentation_id"
3534                     ):
3535 0                         usedVlanIDs.append(segment.get("provider:segmentation_id"))
3536
3537 0         used_vlanIDs = set(usedVlanIDs)
3538
3539         # find unused VLAN ID
3540 0         for vlanID_range in self.config.get("multisegment_vlan_range"):
3541 0             try:
3542 0                 start_vlanid, end_vlanid = map(
3543                     int, vlanID_range.replace(" ", "").split("-")
3544                 )
3545
3546 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3547 0                     if vlanID not in used_vlanIDs:
3548 0                         return vlanID
3549 0             except Exception as exp:
3550 0                 raise vimconn.VimConnException(
3551                     "Exception {} occurred while generating VLAN ID.".format(exp)
3552                 )
3553         else:
3554 0             raise vimconn.VimConnConflictException(
3555                 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3556                     self.config.get("multisegment_vlan_range")
3557                 )
3558             )
3559
3560 1     def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3561         """
3562         Method to validate user given vlanID ranges
3563             Args:  None
3564             Returns: None
3565         """
3566 0         for vlanID_range in input_vlan_range:
3567 0             vlan_range = vlanID_range.replace(" ", "")
3568             # validate format
3569 0             vlanID_pattern = r"(\d)*-(\d)*$"
3570 0             match_obj = re.match(vlanID_pattern, vlan_range)
3571 0             if not match_obj:
3572 0                 raise vimconn.VimConnConflictException(
3573                     "Invalid VLAN range for {}: {}.You must provide "
3574                     "'{}' in format [start_ID - end_ID].".format(
3575                         text_vlan_range, vlanID_range, text_vlan_range
3576                     )
3577                 )
3578
3579 0             start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3580 0             if start_vlanid <= 0:
3581 0                 raise vimconn.VimConnConflictException(
3582                     "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3583                     "networks valid IDs are 1 to 4094 ".format(
3584                         text_vlan_range, vlanID_range
3585                     )
3586                 )
3587
3588 0             if end_vlanid > 4094:
3589 0                 raise vimconn.VimConnConflictException(
3590                     "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3591                     "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3592                         text_vlan_range, vlanID_range
3593                     )
3594                 )
3595
3596 0             if start_vlanid > end_vlanid:
3597 0                 raise vimconn.VimConnConflictException(
3598                     "Invalid VLAN range for {}: {}. You must provide '{}'"
3599                     " in format start_ID - end_ID and start_ID < end_ID ".format(
3600                         text_vlan_range, vlanID_range, text_vlan_range
3601                     )
3602                 )
3603
3604 1     def get_hosts_info(self):
3605         """Get the information of deployed hosts
3606         Returns the hosts content"""
3607 0         if self.debug:
3608 0             print("osconnector: Getting Host info from VIM")
3609
3610 0         try:
3611 0             h_list = []
3612 0             self._reload_connection()
3613 0             hypervisors = self.nova.hypervisors.list()
3614
3615 0             for hype in hypervisors:
3616 0                 h_list.append(hype.to_dict())
3617
3618 0             return 1, {"hosts": h_list}
3619 0         except nvExceptions.NotFound as e:
3620 0             error_value = -vimconn.HTTP_Not_Found
3621 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3622 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3623 0             error_value = -vimconn.HTTP_Bad_Request
3624 0             error_text = (
3625                 type(e).__name__
3626                 + ": "
3627                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3628             )
3629
3630         # TODO insert exception vimconn.HTTP_Unauthorized
3631         # if reaching here is because an exception
3632 0         self.logger.debug("get_hosts_info " + error_text)
3633
3634 0         return error_value, error_text
3635
3636 1     def get_hosts(self, vim_tenant):
3637         """Get the hosts and deployed instances
3638         Returns the hosts content"""
3639 0         r, hype_dict = self.get_hosts_info()
3640
3641 0         if r < 0:
3642 0             return r, hype_dict
3643
3644 0         hypervisors = hype_dict["hosts"]
3645
3646 0         try:
3647 0             servers = self.nova.servers.list()
3648 0             for hype in hypervisors:
3649 0                 for server in servers:
3650 0                     if (
3651                         server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3652                         == hype["hypervisor_hostname"]
3653                     ):
3654 0                         if "vm" in hype:
3655 0                             hype["vm"].append(server.id)
3656                         else:
3657 0                             hype["vm"] = [server.id]
3658
3659 0             return 1, hype_dict
3660 0         except nvExceptions.NotFound as e:
3661 0             error_value = -vimconn.HTTP_Not_Found
3662 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3663 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3664 0             error_value = -vimconn.HTTP_Bad_Request
3665 0             error_text = (
3666                 type(e).__name__
3667                 + ": "
3668                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3669             )
3670
3671         # TODO insert exception vimconn.HTTP_Unauthorized
3672         # if reaching here is because an exception
3673 0         self.logger.debug("get_hosts " + error_text)
3674
3675 0         return error_value, error_text
3676
3677 1     def new_classification(self, name, ctype, definition):
3678 0         self.logger.debug(
3679             "Adding a new (Traffic) Classification to VIM, named %s", name
3680         )
3681
3682 0         try:
3683 0             new_class = None
3684 0             self._reload_connection()
3685
3686 0             if ctype not in supportedClassificationTypes:
3687 0                 raise vimconn.VimConnNotSupportedException(
3688                     "OpenStack VIM connector does not support provided "
3689                     "Classification Type {}, supported ones are: {}".format(
3690                         ctype, supportedClassificationTypes
3691                     )
3692                 )
3693
3694 0             if not self._validate_classification(ctype, definition):
3695 0                 raise vimconn.VimConnException(
3696                     "Incorrect Classification definition for the type specified."
3697                 )
3698
3699 0             classification_dict = definition
3700 0             classification_dict["name"] = name
3701
3702 0             self.logger.info(
3703                 "Adding a new (Traffic) Classification to VIM, named {} and {}.".format(
3704                     name, classification_dict
3705                 )
3706             )
3707 0             new_class = self.neutron.create_sfc_flow_classifier(
3708                 {"flow_classifier": classification_dict}
3709             )
3710
3711 0             return new_class["flow_classifier"]["id"]
3712 0         except (
3713             neExceptions.ConnectionFailed,
3714             ksExceptions.ClientException,
3715             neExceptions.NeutronException,
3716             ConnectionError,
3717         ) as e:
3718 0             self.logger.error("Creation of Classification failed.")
3719 0             self._format_exception(e)
3720
3721 1     def get_classification(self, class_id):
3722 0         self.logger.debug(" Getting Classification %s from VIM", class_id)
3723 0         filter_dict = {"id": class_id}
3724 0         class_list = self.get_classification_list(filter_dict)
3725
3726 0         if len(class_list) == 0:
3727 0             raise vimconn.VimConnNotFoundException(
3728                 "Classification '{}' not found".format(class_id)
3729             )
3730 0         elif len(class_list) > 1:
3731 0             raise vimconn.VimConnConflictException(
3732                 "Found more than one Classification with this criteria"
3733             )
3734
3735 0         classification = class_list[0]
3736
3737 0         return classification
3738
3739 1     def get_classification_list(self, filter_dict={}):
3740 0         self.logger.debug(
3741             "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3742         )
3743
3744 0         try:
3745 0             filter_dict_os = filter_dict.copy()
3746 0             self._reload_connection()
3747
3748 0             if self.api_version3 and "tenant_id" in filter_dict_os:
3749 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3750
3751 0             classification_dict = self.neutron.list_sfc_flow_classifiers(
3752                 **filter_dict_os
3753             )
3754 0             classification_list = classification_dict["flow_classifiers"]
3755 0             self.__classification_os2mano(classification_list)
3756
3757 0             return classification_list
3758 0         except (
3759             neExceptions.ConnectionFailed,
3760             ksExceptions.ClientException,
3761             neExceptions.NeutronException,
3762             ConnectionError,
3763         ) as e:
3764 0             self._format_exception(e)
3765
3766 1     def delete_classification(self, class_id):
3767 0         self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3768
3769 0         try:
3770 0             self._reload_connection()
3771 0             self.neutron.delete_sfc_flow_classifier(class_id)
3772
3773 0             return class_id
3774 0         except (
3775             neExceptions.ConnectionFailed,
3776             neExceptions.NeutronException,
3777             ksExceptions.ClientException,
3778             neExceptions.NeutronException,
3779             ConnectionError,
3780         ) as e:
3781 0             self._format_exception(e)
3782
3783 1     def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3784 0         self.logger.debug(
3785             "Adding a new Service Function Instance to VIM, named '%s'", name
3786         )
3787
3788 0         try:
3789 0             new_sfi = None
3790 0             self._reload_connection()
3791 0             correlation = None
3792
3793 0             if sfc_encap:
3794 0                 correlation = "nsh"
3795
3796 0             if len(ingress_ports) != 1:
3797 0                 raise vimconn.VimConnNotSupportedException(
3798                     "OpenStack VIM connector can only have 1 ingress port per SFI"
3799                 )
3800
3801 0             if len(egress_ports) != 1:
3802 0                 raise vimconn.VimConnNotSupportedException(
3803                     "OpenStack VIM connector can only have 1 egress port per SFI"
3804                 )
3805
3806 0             sfi_dict = {
3807                 "name": name,
3808                 "ingress": ingress_ports[0],
3809                 "egress": egress_ports[0],
3810                 "service_function_parameters": {"correlation": correlation},
3811             }
3812 0             self.logger.info("Adding a new SFI to VIM, {}.".format(sfi_dict))
3813 0             new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3814
3815 0             return new_sfi["port_pair"]["id"]
3816 0         except (
3817             neExceptions.ConnectionFailed,
3818             ksExceptions.ClientException,
3819             neExceptions.NeutronException,
3820             ConnectionError,
3821         ) as e:
3822 0             if new_sfi:
3823 0                 try:
3824 0                     self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3825 0                 except Exception:
3826 0                     self.logger.error(
3827                         "Creation of Service Function Instance failed, with "
3828                         "subsequent deletion failure as well."
3829                     )
3830
3831 0             self._format_exception(e)
3832
3833 1     def get_sfi(self, sfi_id):
3834 0         self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3835 0         filter_dict = {"id": sfi_id}
3836 0         sfi_list = self.get_sfi_list(filter_dict)
3837
3838 0         if len(sfi_list) == 0:
3839 0             raise vimconn.VimConnNotFoundException(
3840                 "Service Function Instance '{}' not found".format(sfi_id)
3841             )
3842 0         elif len(sfi_list) > 1:
3843 0             raise vimconn.VimConnConflictException(
3844                 "Found more than one Service Function Instance with this criteria"
3845             )
3846
3847 0         sfi = sfi_list[0]
3848
3849 0         return sfi
3850
3851 1     def get_sfi_list(self, filter_dict={}):
3852 0         self.logger.debug(
3853             "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3854         )
3855
3856 0         try:
3857 0             self._reload_connection()
3858 0             filter_dict_os = filter_dict.copy()
3859
3860 0             if self.api_version3 and "tenant_id" in filter_dict_os:
3861 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3862
3863 0             sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3864 0             sfi_list = sfi_dict["port_pairs"]
3865 0             self.__sfi_os2mano(sfi_list)
3866
3867 0             return sfi_list
3868 0         except (
3869             neExceptions.ConnectionFailed,
3870             ksExceptions.ClientException,
3871             neExceptions.NeutronException,
3872             ConnectionError,
3873         ) as e:
3874 0             self._format_exception(e)
3875
3876 1     def delete_sfi(self, sfi_id):
3877 0         self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3878
3879 0         try:
3880 0             self._reload_connection()
3881 0             self.neutron.delete_sfc_port_pair(sfi_id)
3882
3883 0             return sfi_id
3884 0         except (
3885             neExceptions.ConnectionFailed,
3886             neExceptions.NeutronException,
3887             ksExceptions.ClientException,
3888             neExceptions.NeutronException,
3889             ConnectionError,
3890         ) as e:
3891 0             self._format_exception(e)
3892
3893 1     def new_sf(self, name, sfis, sfc_encap=True):
3894 0         self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3895
3896 0         new_sf = None
3897
3898 0         try:
3899 0             self._reload_connection()
3900
3901 0             for instance in sfis:
3902 0                 sfi = self.get_sfi(instance)
3903
3904 0                 if sfi.get("sfc_encap") != sfc_encap:
3905 0                     raise vimconn.VimConnNotSupportedException(
3906                         "OpenStack VIM connector requires all SFIs of the "
3907                         "same SF to share the same SFC Encapsulation"
3908                     )
3909
3910 0             sf_dict = {"name": name, "port_pairs": sfis}
3911
3912 0             self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict))
3913 0             new_sf = self.neutron.create_sfc_port_pair_group(
3914                 {"port_pair_group": sf_dict}
3915             )
3916
3917 0             return new_sf["port_pair_group"]["id"]
3918 0         except (
3919             neExceptions.ConnectionFailed,
3920             ksExceptions.ClientException,
3921             neExceptions.NeutronException,
3922             ConnectionError,
3923         ) as e:
3924 0             if new_sf:
3925 0                 try:
3926 0                     new_sf_id = new_sf.get("port_pair_group").get("id")
3927 0                     self.neutron.delete_sfc_port_pair_group(new_sf_id)
3928 0                 except Exception:
3929 0                     self.logger.error(
3930                         "Creation of Service Function failed, with "
3931                         "subsequent deletion failure as well."
3932                     )
3933
3934 0             self._format_exception(e)
3935
3936 1     def get_sf(self, sf_id):
3937 0         self.logger.debug("Getting Service Function %s from VIM", sf_id)
3938 0         filter_dict = {"id": sf_id}
3939 0         sf_list = self.get_sf_list(filter_dict)
3940
3941 0         if len(sf_list) == 0:
3942 0             raise vimconn.VimConnNotFoundException(
3943                 "Service Function '{}' not found".format(sf_id)
3944             )
3945 0         elif len(sf_list) > 1:
3946 0             raise vimconn.VimConnConflictException(
3947                 "Found more than one Service Function with this criteria"
3948             )
3949
3950 0         sf = sf_list[0]
3951
3952 0         return sf
3953
3954 1     def get_sf_list(self, filter_dict={}):
3955 0         self.logger.debug(
3956             "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3957         )
3958
3959 0         try:
3960 0             self._reload_connection()
3961 0             filter_dict_os = filter_dict.copy()
3962
3963 0             if self.api_version3 and "tenant_id" in filter_dict_os:
3964 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3965
3966 0             sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3967 0             sf_list = sf_dict["port_pair_groups"]
3968 0             self.__sf_os2mano(sf_list)
3969
3970 0             return sf_list
3971 0         except (
3972             neExceptions.ConnectionFailed,
3973             ksExceptions.ClientException,
3974             neExceptions.NeutronException,
3975             ConnectionError,
3976         ) as e:
3977 0             self._format_exception(e)
3978
3979 1     def delete_sf(self, sf_id):
3980 0         self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3981
3982 0         try:
3983 0             self._reload_connection()
3984 0             self.neutron.delete_sfc_port_pair_group(sf_id)
3985
3986 0             return sf_id
3987 0         except (
3988             neExceptions.ConnectionFailed,
3989             neExceptions.NeutronException,
3990             ksExceptions.ClientException,
3991             neExceptions.NeutronException,
3992             ConnectionError,
3993         ) as e:
3994 0             self._format_exception(e)
3995
3996 1     def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3997 0         self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3998
3999 0         new_sfp = None
4000
4001 0         try:
4002 0             self._reload_connection()
4003             # In networking-sfc the MPLS encapsulation is legacy
4004             # should be used when no full SFC Encapsulation is intended
4005 0             correlation = "mpls"
4006
4007 0             if sfc_encap:
4008 0                 correlation = "nsh"
4009
4010 0             sfp_dict = {
4011                 "name": name,
4012                 "flow_classifiers": classifications,
4013                 "port_pair_groups": sfs,
4014                 "chain_parameters": {"correlation": correlation},
4015             }
4016
4017 0             if spi:
4018 0                 sfp_dict["chain_id"] = spi
4019
4020 0             self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict))
4021 0             new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
4022
4023 0             return new_sfp["port_chain"]["id"]
4024 0         except (
4025             neExceptions.ConnectionFailed,
4026             ksExceptions.ClientException,
4027             neExceptions.NeutronException,
4028             ConnectionError,
4029         ) as e:
4030 0             if new_sfp:
4031 0                 try:
4032 0                     new_sfp_id = new_sfp.get("port_chain").get("id")
4033 0                     self.neutron.delete_sfc_port_chain(new_sfp_id)
4034 0                 except Exception:
4035 0                     self.logger.error(
4036                         "Creation of Service Function Path failed, with "
4037                         "subsequent deletion failure as well."
4038                     )
4039
4040 0             self._format_exception(e)
4041
4042 1     def get_sfp(self, sfp_id):
4043 0         self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
4044
4045 0         filter_dict = {"id": sfp_id}
4046 0         sfp_list = self.get_sfp_list(filter_dict)
4047
4048 0         if len(sfp_list) == 0:
4049 0             raise vimconn.VimConnNotFoundException(
4050                 "Service Function Path '{}' not found".format(sfp_id)
4051             )
4052 0         elif len(sfp_list) > 1:
4053 0             raise vimconn.VimConnConflictException(
4054                 "Found more than one Service Function Path with this criteria"
4055             )
4056
4057 0         sfp = sfp_list[0]
4058
4059 0         return sfp
4060
4061 1     def get_sfp_list(self, filter_dict={}):
4062 0         self.logger.debug(
4063             "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
4064         )
4065
4066 0         try:
4067 0             self._reload_connection()
4068 0             filter_dict_os = filter_dict.copy()
4069
4070 0             if self.api_version3 and "tenant_id" in filter_dict_os:
4071 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
4072
4073 0             sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
4074 0             sfp_list = sfp_dict["port_chains"]
4075 0             self.__sfp_os2mano(sfp_list)
4076
4077 0             return sfp_list
4078 0         except (
4079             neExceptions.ConnectionFailed,
4080             ksExceptions.ClientException,
4081             neExceptions.NeutronException,
4082             ConnectionError,
4083         ) as e:
4084 0             self._format_exception(e)
4085
4086 1     def delete_sfp(self, sfp_id):
4087 0         self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
4088
4089 0         try:
4090 0             self._reload_connection()
4091 0             self.neutron.delete_sfc_port_chain(sfp_id)
4092
4093 0             return sfp_id
4094 0         except (
4095             neExceptions.ConnectionFailed,
4096             neExceptions.NeutronException,
4097             ksExceptions.ClientException,
4098             neExceptions.NeutronException,
4099             ConnectionError,
4100         ) as e:
4101 0             self._format_exception(e)
4102
4103 1     def refresh_sfps_status(self, sfp_list):
4104         """Get the status of the service function path
4105         Params: the list of sfp identifiers
4106         Returns a dictionary with:
4107             vm_id:          #VIM id of this service function path
4108                 status:     #Mandatory. Text with one of:
4109                             #  DELETED (not found at vim)
4110                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4111                             #  OTHER (Vim reported other status not understood)
4112                             #  ERROR (VIM indicates an ERROR status)
4113                             #  ACTIVE,
4114                             #  CREATING (on building process)
4115                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4116                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
4117         """
4118 0         sfp_dict = {}
4119 0         self.logger.debug(
4120             "refresh_sfps status: Getting tenant SFP information from VIM"
4121         )
4122
4123 0         for sfp_id in sfp_list:
4124 0             sfp = {}
4125
4126 0             try:
4127 0                 sfp_vim = self.get_sfp(sfp_id)
4128
4129 0                 if sfp_vim["spi"]:
4130 0                     sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4131                 else:
4132 0                     sfp["status"] = "OTHER"
4133 0                     sfp["error_msg"] = "VIM status reported " + sfp["status"]
4134
4135 0                 sfp["vim_info"] = self.serialize(sfp_vim)
4136
4137 0                 if sfp_vim.get("fault"):
4138 0                     sfp["error_msg"] = str(sfp_vim["fault"])
4139 0             except vimconn.VimConnNotFoundException as e:
4140 0                 self.logger.error("Exception getting sfp status: %s", str(e))
4141 0                 sfp["status"] = "DELETED"
4142 0                 sfp["error_msg"] = str(e)
4143 0             except vimconn.VimConnException as e:
4144 0                 self.logger.error("Exception getting sfp status: %s", str(e))
4145 0                 sfp["status"] = "VIM_ERROR"
4146 0                 sfp["error_msg"] = str(e)
4147
4148 0             sfp_dict[sfp_id] = sfp
4149
4150 0         return sfp_dict
4151
4152 1     def refresh_sfis_status(self, sfi_list):
4153         """Get the status of the service function instances
4154         Params: the list of sfi identifiers
4155         Returns a dictionary with:
4156             vm_id:          #VIM id of this service function instance
4157                 status:     #Mandatory. Text with one of:
4158                             #  DELETED (not found at vim)
4159                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4160                             #  OTHER (Vim reported other status not understood)
4161                             #  ERROR (VIM indicates an ERROR status)
4162                             #  ACTIVE,
4163                             #  CREATING (on building process)
4164                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4165                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4166         """
4167 0         sfi_dict = {}
4168 0         self.logger.debug(
4169             "refresh_sfis status: Getting tenant sfi information from VIM"
4170         )
4171
4172 0         for sfi_id in sfi_list:
4173 0             sfi = {}
4174
4175 0             try:
4176 0                 sfi_vim = self.get_sfi(sfi_id)
4177
4178 0                 if sfi_vim:
4179 0                     sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4180                 else:
4181 0                     sfi["status"] = "OTHER"
4182 0                     sfi["error_msg"] = "VIM status reported " + sfi["status"]
4183
4184 0                 sfi["vim_info"] = self.serialize(sfi_vim)
4185
4186 0                 if sfi_vim.get("fault"):
4187 0                     sfi["error_msg"] = str(sfi_vim["fault"])
4188 0             except vimconn.VimConnNotFoundException as e:
4189 0                 self.logger.error("Exception getting sfi status: %s", str(e))
4190 0                 sfi["status"] = "DELETED"
4191 0                 sfi["error_msg"] = str(e)
4192 0             except vimconn.VimConnException as e:
4193 0                 self.logger.error("Exception getting sfi status: %s", str(e))
4194 0                 sfi["status"] = "VIM_ERROR"
4195 0                 sfi["error_msg"] = str(e)
4196
4197 0             sfi_dict[sfi_id] = sfi
4198
4199 0         return sfi_dict
4200
4201 1     def refresh_sfs_status(self, sf_list):
4202         """Get the status of the service functions
4203         Params: the list of sf identifiers
4204         Returns a dictionary with:
4205             vm_id:          #VIM id of this service function
4206                 status:     #Mandatory. Text with one of:
4207                             #  DELETED (not found at vim)
4208                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4209                             #  OTHER (Vim reported other status not understood)
4210                             #  ERROR (VIM indicates an ERROR status)
4211                             #  ACTIVE,
4212                             #  CREATING (on building process)
4213                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4214                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4215         """
4216 0         sf_dict = {}
4217 0         self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4218
4219 0         for sf_id in sf_list:
4220 0             sf = {}
4221
4222 0             try:
4223 0                 sf_vim = self.get_sf(sf_id)
4224
4225 0                 if sf_vim:
4226 0                     sf["status"] = vmStatus2manoFormat["ACTIVE"]
4227                 else:
4228 0                     sf["status"] = "OTHER"
4229 0                     sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4230
4231 0                 sf["vim_info"] = self.serialize(sf_vim)
4232
4233 0                 if sf_vim.get("fault"):
4234 0                     sf["error_msg"] = str(sf_vim["fault"])
4235 0             except vimconn.VimConnNotFoundException as e:
4236 0                 self.logger.error("Exception getting sf status: %s", str(e))
4237 0                 sf["status"] = "DELETED"
4238 0                 sf["error_msg"] = str(e)
4239 0             except vimconn.VimConnException as e:
4240 0                 self.logger.error("Exception getting sf status: %s", str(e))
4241 0                 sf["status"] = "VIM_ERROR"
4242 0                 sf["error_msg"] = str(e)
4243
4244 0             sf_dict[sf_id] = sf
4245
4246 0         return sf_dict
4247
4248 1     def refresh_classifications_status(self, classification_list):
4249         """Get the status of the classifications
4250         Params: the list of classification identifiers
4251         Returns a dictionary with:
4252             vm_id:          #VIM id of this classifier
4253                 status:     #Mandatory. Text with one of:
4254                             #  DELETED (not found at vim)
4255                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4256                             #  OTHER (Vim reported other status not understood)
4257                             #  ERROR (VIM indicates an ERROR status)
4258                             #  ACTIVE,
4259                             #  CREATING (on building process)
4260                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4261                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4262         """
4263 0         classification_dict = {}
4264 0         self.logger.debug(
4265             "refresh_classifications status: Getting tenant classification information from VIM"
4266         )
4267
4268 0         for classification_id in classification_list:
4269 0             classification = {}
4270
4271 0             try:
4272 0                 classification_vim = self.get_classification(classification_id)
4273
4274 0                 if classification_vim:
4275 0                     classification["status"] = vmStatus2manoFormat["ACTIVE"]
4276                 else:
4277 0                     classification["status"] = "OTHER"
4278 0                     classification["error_msg"] = (
4279                         "VIM status reported " + classification["status"]
4280                     )
4281
4282 0                 classification["vim_info"] = self.serialize(classification_vim)
4283
4284 0                 if classification_vim.get("fault"):
4285 0                     classification["error_msg"] = str(classification_vim["fault"])
4286 0             except vimconn.VimConnNotFoundException as e:
4287 0                 self.logger.error("Exception getting classification status: %s", str(e))
4288 0                 classification["status"] = "DELETED"
4289 0                 classification["error_msg"] = str(e)
4290 0             except vimconn.VimConnException as e:
4291 0                 self.logger.error("Exception getting classification status: %s", str(e))
4292 0                 classification["status"] = "VIM_ERROR"
4293 0                 classification["error_msg"] = str(e)
4294
4295 0             classification_dict[classification_id] = classification
4296
4297 0         return classification_dict
4298
4299 1     @catch_any_exception
4300 1     def new_affinity_group(self, affinity_group_data):
4301         """Adds a server group to VIM
4302             affinity_group_data contains a dictionary with information, keys:
4303                 name: name in VIM for the server group
4304                 type: affinity or anti-affinity
4305                 scope: Only nfvi-node allowed
4306         Returns the server group identifier"""
4307 0         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4308 0         name = affinity_group_data["name"]
4309 0         policy = affinity_group_data["type"]
4310 0         self._reload_connection()
4311 0         new_server_group = self.nova.server_groups.create(name, policy)
4312 0         return new_server_group.id
4313
4314 1     @catch_any_exception
4315 1     def get_affinity_group(self, affinity_group_id):
4316         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4317 0         self.logger.debug("Getting flavor '%s'", affinity_group_id)
4318 0         self._reload_connection()
4319 0         server_group = self.nova.server_groups.find(id=affinity_group_id)
4320 0         return server_group.to_dict()
4321
4322 1     @catch_any_exception
4323 1     def delete_affinity_group(self, affinity_group_id):
4324         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4325 0         self.logger.debug("Getting server group '%s'", affinity_group_id)
4326 0         self._reload_connection()
4327 0         self.nova.server_groups.delete(affinity_group_id)
4328 0         return affinity_group_id
4329
4330 1     @catch_any_exception
4331 1     def get_vdu_state(self, vm_id, host_is_required=False) -> list:
4332         """Getting the state of a VDU.
4333         Args:
4334             vm_id   (str): ID of an instance
4335             host_is_required    (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
4336                                            and if this is set to True, it raises KeyError.
4337         Returns:
4338             vdu_data    (list): VDU details including state, flavor, host_info, AZ
4339         """
4340 0         self.logger.debug("Getting the status of VM")
4341 0         self.logger.debug("VIM VM ID %s", vm_id)
4342 0         self._reload_connection()
4343 0         server_dict = self._find_nova_server(vm_id)
4344 0         srv_attr = "OS-EXT-SRV-ATTR:host"
4345 0         host_info = (
4346             server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
4347         )
4348 0         vdu_data = [
4349             server_dict["status"],
4350             server_dict["flavor"]["id"],
4351             host_info,
4352             server_dict["OS-EXT-AZ:availability_zone"],
4353         ]
4354 0         self.logger.debug("vdu_data %s", vdu_data)
4355 0         return vdu_data
4356
4357 1     def check_compute_availability(self, host, server_flavor_details):
4358 0         self._reload_connection()
4359 0         hypervisor_search = self.nova.hypervisors.search(
4360             hypervisor_match=host, servers=True
4361         )
4362 0         for hypervisor in hypervisor_search:
4363 0             hypervisor_id = hypervisor.to_dict()["id"]
4364 0             hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4365 0             hypervisor_dict = hypervisor_details.to_dict()
4366 0             hypervisor_temp = json.dumps(hypervisor_dict)
4367 0             hypervisor_json = json.loads(hypervisor_temp)
4368 0             resources_available = [
4369                 hypervisor_json["free_ram_mb"],
4370                 hypervisor_json["disk_available_least"],
4371                 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4372             ]
4373 0             compute_available = all(
4374                 x > y for x, y in zip(resources_available, server_flavor_details)
4375             )
4376 0             if compute_available:
4377 0                 return host
4378
4379 1     def check_availability_zone(
4380         self, old_az, server_flavor_details, old_host, host=None
4381     ):
4382 0         self._reload_connection()
4383 0         az_check = {"zone_check": False, "compute_availability": None}
4384 0         aggregates_list = self.nova.aggregates.list()
4385 0         for aggregate in aggregates_list:
4386 0             aggregate_details = aggregate.to_dict()
4387 0             aggregate_temp = json.dumps(aggregate_details)
4388 0             aggregate_json = json.loads(aggregate_temp)
4389 0             if aggregate_json["availability_zone"] == old_az:
4390 0                 hosts_list = aggregate_json["hosts"]
4391 0                 if host is not None:
4392 0                     if host in hosts_list:
4393 0                         az_check["zone_check"] = True
4394 0                         available_compute_id = self.check_compute_availability(
4395                             host, server_flavor_details
4396                         )
4397 0                         if available_compute_id is not None:
4398 0                             az_check["compute_availability"] = available_compute_id
4399                 else:
4400 0                     for check_host in hosts_list:
4401 0                         if check_host != old_host:
4402 0                             available_compute_id = self.check_compute_availability(
4403                                 check_host, server_flavor_details
4404                             )
4405 0                             if available_compute_id is not None:
4406 0                                 az_check["zone_check"] = True
4407 0                                 az_check["compute_availability"] = available_compute_id
4408 0                                 break
4409                     else:
4410 0                         az_check["zone_check"] = True
4411 0         return az_check
4412
4413 1     @catch_any_exception
4414 1     def migrate_instance(self, vm_id, compute_host=None):
4415         """
4416         Migrate a vdu
4417         param:
4418             vm_id: ID of an instance
4419             compute_host: Host to migrate the vdu to
4420         """
4421 0         self._reload_connection()
4422 0         vm_state = False
4423 0         instance_state = self.get_vdu_state(vm_id, host_is_required=True)
4424 0         server_flavor_id = instance_state[1]
4425 0         server_hypervisor_name = instance_state[2]
4426 0         server_availability_zone = instance_state[3]
4427 0         server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4428 0         server_flavor_details = [
4429             server_flavor["ram"],
4430             server_flavor["disk"],
4431             server_flavor["vcpus"],
4432         ]
4433 0         if compute_host == server_hypervisor_name:
4434 0             raise vimconn.VimConnException(
4435                 "Unable to migrate instance '{}' to the same host '{}'".format(
4436                     vm_id, compute_host
4437                 ),
4438                 http_code=vimconn.HTTP_Bad_Request,
4439             )
4440 0         az_status = self.check_availability_zone(
4441             server_availability_zone,
4442             server_flavor_details,
4443             server_hypervisor_name,
4444             compute_host,
4445         )
4446 0         availability_zone_check = az_status["zone_check"]
4447 0         available_compute_id = az_status.get("compute_availability")
4448
4449 0         if availability_zone_check is False:
4450 0             raise vimconn.VimConnException(
4451                 "Unable to migrate instance '{}' to a different availability zone".format(
4452                     vm_id
4453                 ),
4454                 http_code=vimconn.HTTP_Bad_Request,
4455             )
4456 0         if available_compute_id is not None:
4457             # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
4458 0             self.nova.servers.live_migrate(
4459                 server=vm_id,
4460                 host=available_compute_id,
4461                 block_migration=True,
4462             )
4463 0             state = "MIGRATING"
4464 0             changed_compute_host = ""
4465 0             if state == "MIGRATING":
4466 0                 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4467 0                 changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
4468                     2
4469                 ]
4470 0             if vm_state and changed_compute_host == available_compute_id:
4471 0                 self.logger.debug(
4472                     "Instance '{}' migrated to the new compute host '{}'".format(
4473                         vm_id, changed_compute_host
4474                     )
4475                 )
4476 0                 return state, available_compute_id
4477             else:
4478 0                 raise vimconn.VimConnException(
4479                     "Migration Failed. Instance '{}' not moved to the new host {}".format(
4480                         vm_id, available_compute_id
4481                     ),
4482                     http_code=vimconn.HTTP_Bad_Request,
4483                 )
4484         else:
4485 0             raise vimconn.VimConnException(
4486                 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4487                     available_compute_id
4488                 ),
4489                 http_code=vimconn.HTTP_Bad_Request,
4490             )
4491
4492 1     @catch_any_exception
4493 1     def resize_instance(self, vm_id, new_flavor_id):
4494         """
4495         For resizing the vm based on the given
4496         flavor details
4497         param:
4498             vm_id : ID of an instance
4499             new_flavor_id : Flavor id to be resized
4500         Return the status of a resized instance
4501         """
4502 0         self._reload_connection()
4503 0         self.logger.debug("resize the flavor of an instance")
4504 0         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4505 0         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4506 0         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4507 0         if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4508 0             if old_flavor_disk > new_flavor_disk:
4509 0                 raise nvExceptions.BadRequest(
4510                     400,
4511                     message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4512                 )
4513             else:
4514 0                 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4515 0                 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4516 0                 if vm_state:
4517 0                     instance_resized_status = self.confirm_resize(vm_id)
4518 0                     return instance_resized_status
4519                 else:
4520 0                     raise nvExceptions.BadRequest(
4521                         409,
4522                         message="Cannot 'resize' vm_state is in ERROR",
4523                     )
4524
4525         else:
4526 0             self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4527 0             raise nvExceptions.BadRequest(
4528                 409,
4529                 message="Cannot 'resize' instance while it is in vm_state resized",
4530             )
4531
4532 1     def confirm_resize(self, vm_id):
4533         """
4534         Confirm the resize of an instance
4535         param:
4536             vm_id: ID of an instance
4537         """
4538 0         self._reload_connection()
4539 0         self.nova.servers.confirm_resize(server=vm_id)
4540 0         if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4541 0             self.__wait_for_vm(vm_id, "ACTIVE")
4542 0         instance_status = self.get_vdu_state(vm_id)[0]
4543 0         return instance_status
4544
4545 1     def get_monitoring_data(self):
4546 1         try:
4547 1             self.logger.debug("Getting servers and ports data from Openstack VIMs.")
4548 1             self._reload_connection()
4549 1             all_servers = self.nova.servers.list(detailed=True)
4550 1             try:
4551 1                 for server in all_servers:
4552 1                     if server.flavor.get("original_name"):
4553 1                         server.flavor["id"] = self.nova.flavors.find(
4554                             name=server.flavor["original_name"]
4555                         ).id
4556 0             except nClient.exceptions.NotFound as e:
4557 0                 self.logger.warning(str(e.message))
4558 1             all_ports = self.neutron.list_ports()
4559 1             return all_servers, all_ports
4560 1         except Exception as e:
4561 1             raise vimconn.VimConnException(
4562                 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
4563             )