Code Coverage

Cobertura Coverage Report > RO-VIM-openstack.osm_rovim_openstack >

vimconn_openstack.py

Trend

File Coverage summary

NameClassesLinesConditionals
vimconn_openstack.py
100%
1/1
39%
618/1586
100%
0/0

Coverage Breakdown by Class

NameLinesConditionals
vimconn_openstack.py
39%
618/1586
N/A

Source

RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 #         http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 1 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 1 import copy
34 1 from http.client import HTTPException
35 1 import json
36 1 import logging
37 1 from pprint import pformat
38 1 import random
39 1 import re
40 1 import time
41 1 from typing import Dict, List, Optional, Tuple
42
43 1 from cinderclient import client as cClient
44 1 from glanceclient import client as glClient
45 1 import glanceclient.exc as gl1Exceptions
46 1 from keystoneauth1 import session
47 1 from keystoneauth1.identity import v2, v3
48 1 import keystoneclient.exceptions as ksExceptions
49 1 import keystoneclient.v2_0.client as ksClient_v2
50 1 import keystoneclient.v3.client as ksClient_v3
51 1 import netaddr
52 1 from neutronclient.common import exceptions as neExceptions
53 1 from neutronclient.neutron import client as neClient
54 1 from novaclient import client as nClient, exceptions as nvExceptions
55 1 from osm_ro_plugin import vimconn
56 1 from requests.exceptions import ConnectionError
57 1 import yaml
58
59 1 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 1 __date__ = "$22-sep-2017 23:59:59$"
61
62 1 """contain the openstack virtual machine status to openmano status"""
63 1 vmStatus2manoFormat = {
64     "ACTIVE": "ACTIVE",
65     "PAUSED": "PAUSED",
66     "SUSPENDED": "SUSPENDED",
67     "SHUTOFF": "INACTIVE",
68     "BUILD": "BUILD",
69     "ERROR": "ERROR",
70     "DELETED": "DELETED",
71 }
72 1 netStatus2manoFormat = {
73     "ACTIVE": "ACTIVE",
74     "PAUSED": "PAUSED",
75     "INACTIVE": "INACTIVE",
76     "BUILD": "BUILD",
77     "ERROR": "ERROR",
78     "DELETED": "DELETED",
79 }
80
81 1 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 1 volume_timeout = 1800
85 1 server_timeout = 1800
86
87
88 1 class SafeDumper(yaml.SafeDumper):
89 1     def represent_data(self, data):
90         # Openstack APIs use custom subclasses of dict and YAML safe dumper
91         # is designed to not handle that (reference issue 142 of pyyaml)
92 0         if isinstance(data, dict) and data.__class__ != dict:
93             # A simple solution is to convert those items back to dicts
94 0             data = dict(data.items())
95
96 0         return super(SafeDumper, self).represent_data(data)
97
98
99 1 class vimconnector(vimconn.VimConnector):
100 1     def __init__(
101         self,
102         uuid,
103         name,
104         tenant_id,
105         tenant_name,
106         url,
107         url_admin=None,
108         user=None,
109         passwd=None,
110         log_level=None,
111         config={},
112         persistent_info={},
113     ):
114         """using common constructor parameters. In this case
115         'url' is the keystone authorization url,
116         'url_admin' is not use
117         """
118 1         api_version = config.get("APIversion")
119
120 1         if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 0             raise vimconn.VimConnException(
122                 "Invalid value '{}' for config:APIversion. "
123                 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124             )
125
126 1         vim_type = config.get("vim_type")
127
128 1         if vim_type and vim_type not in ("vio", "VIO"):
129 0             raise vimconn.VimConnException(
130                 "Invalid value '{}' for config:vim_type."
131                 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132             )
133
134 1         if config.get("dataplane_net_vlan_range") is not None:
135             # validate vlan ranges provided by user
136 0             self._validate_vlan_ranges(
137                 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138             )
139
140 1         if config.get("multisegment_vlan_range") is not None:
141             # validate vlan ranges provided by user
142 0             self._validate_vlan_ranges(
143                 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144             )
145
146 1         vimconn.VimConnector.__init__(
147             self,
148             uuid,
149             name,
150             tenant_id,
151             tenant_name,
152             url,
153             url_admin,
154             user,
155             passwd,
156             log_level,
157             config,
158         )
159
160 1         if self.config.get("insecure") and self.config.get("ca_cert"):
161 0             raise vimconn.VimConnException(
162                 "options insecure and ca_cert are mutually exclusive"
163             )
164
165 1         self.verify = True
166
167 1         if self.config.get("insecure"):
168 0             self.verify = False
169
170 1         if self.config.get("ca_cert"):
171 0             self.verify = self.config.get("ca_cert")
172
173 1         if not url:
174 0             raise TypeError("url param can not be NoneType")
175
176 1         self.persistent_info = persistent_info
177 1         self.availability_zone = persistent_info.get("availability_zone", None)
178 1         self.session = persistent_info.get("session", {"reload_client": True})
179 1         self.my_tenant_id = self.session.get("my_tenant_id")
180 1         self.nova = self.session.get("nova")
181 1         self.neutron = self.session.get("neutron")
182 1         self.cinder = self.session.get("cinder")
183 1         self.glance = self.session.get("glance")
184         # self.glancev1 = self.session.get("glancev1")
185 1         self.keystone = self.session.get("keystone")
186 1         self.api_version3 = self.session.get("api_version3")
187 1         self.vim_type = self.config.get("vim_type")
188
189 1         if self.vim_type:
190 0             self.vim_type = self.vim_type.upper()
191
192 1         if self.config.get("use_internal_endpoint"):
193 0             self.endpoint_type = "internalURL"
194         else:
195 1             self.endpoint_type = None
196
197 1         logging.getLogger("urllib3").setLevel(logging.WARNING)
198 1         logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 1         logging.getLogger("novaclient").setLevel(logging.WARNING)
200 1         self.logger = logging.getLogger("ro.vim.openstack")
201
202         # allow security_groups to be a list or a single string
203 1         if isinstance(self.config.get("security_groups"), str):
204 0             self.config["security_groups"] = [self.config["security_groups"]]
205
206 1         self.security_groups_id = None
207
208         # ###### VIO Specific Changes #########
209 1         if self.vim_type == "VIO":
210 0             self.logger = logging.getLogger("ro.vim.vio")
211
212 1         if log_level:
213 0             self.logger.setLevel(getattr(logging, log_level))
214
215 1     def __getitem__(self, index):
216         """Get individuals parameters.
217         Throw KeyError"""
218 0         if index == "project_domain_id":
219 0             return self.config.get("project_domain_id")
220 0         elif index == "user_domain_id":
221 0             return self.config.get("user_domain_id")
222         else:
223 0             return vimconn.VimConnector.__getitem__(self, index)
224
225 1     def __setitem__(self, index, value):
226         """Set individuals parameters and it is marked as dirty so to force connection reload.
227         Throw KeyError"""
228 0         if index == "project_domain_id":
229 0             self.config["project_domain_id"] = value
230 0         elif index == "user_domain_id":
231 0             self.config["user_domain_id"] = value
232         else:
233 0             vimconn.VimConnector.__setitem__(self, index, value)
234
235 0         self.session["reload_client"] = True
236
237 1     def serialize(self, value):
238         """Serialization of python basic types.
239
240         In the case value is not serializable a message will be logged and a
241         simple representation of the data that cannot be converted back to
242         python is returned.
243         """
244 0         if isinstance(value, str):
245 0             return value
246
247 0         try:
248 0             return yaml.dump(
249                 value, Dumper=SafeDumper, default_flow_style=True, width=256
250             )
251 0         except yaml.representer.RepresenterError:
252 0             self.logger.debug(
253                 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254                 pformat(value),
255                 exc_info=True,
256             )
257
258 0             return str(value)
259
260 1     def _reload_connection(self):
261         """Called before any operation, it check if credentials has changed
262         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263         """
264         # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 0         if self.session["reload_client"]:
266 0             if self.config.get("APIversion"):
267 0                 self.api_version3 = (
268                     self.config["APIversion"] == "v3.3"
269                     or self.config["APIversion"] == "3"
270                 )
271             else:  # get from ending auth_url that end with v3 or with v2.0
272 0                 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273                     "/v3/"
274                 )
275
276 0             self.session["api_version3"] = self.api_version3
277
278 0             if self.api_version3:
279 0                 if self.config.get("project_domain_id") or self.config.get(
280                     "project_domain_name"
281                 ):
282 0                     project_domain_id_default = None
283                 else:
284 0                     project_domain_id_default = "default"
285
286 0                 if self.config.get("user_domain_id") or self.config.get(
287                     "user_domain_name"
288                 ):
289 0                     user_domain_id_default = None
290                 else:
291 0                     user_domain_id_default = "default"
292 0                 auth = v3.Password(
293                     auth_url=self.url,
294                     username=self.user,
295                     password=self.passwd,
296                     project_name=self.tenant_name,
297                     project_id=self.tenant_id,
298                     project_domain_id=self.config.get(
299                         "project_domain_id", project_domain_id_default
300                     ),
301                     user_domain_id=self.config.get(
302                         "user_domain_id", user_domain_id_default
303                     ),
304                     project_domain_name=self.config.get("project_domain_name"),
305                     user_domain_name=self.config.get("user_domain_name"),
306                 )
307             else:
308 0                 auth = v2.Password(
309                     auth_url=self.url,
310                     username=self.user,
311                     password=self.passwd,
312                     tenant_name=self.tenant_name,
313                     tenant_id=self.tenant_id,
314                 )
315
316 0             sess = session.Session(auth=auth, verify=self.verify)
317             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318             # Titanium cloud and StarlingX
319 0             region_name = self.config.get("region_name")
320
321 0             if self.api_version3:
322 0                 self.keystone = ksClient_v3.Client(
323                     session=sess,
324                     endpoint_type=self.endpoint_type,
325                     region_name=region_name,
326                 )
327             else:
328 0                 self.keystone = ksClient_v2.Client(
329                     session=sess, endpoint_type=self.endpoint_type
330                 )
331
332 0             self.session["keystone"] = self.keystone
333             # In order to enable microversion functionality an explicit microversion must be specified in "config".
334             # This implementation approach is due to the warning message in
335             # https://developer.openstack.org/api-guide/compute/microversions.html
336             # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337             # always require an specific microversion.
338             # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 0             version = self.config.get("microversion")
340
341 0             if not version:
342 0                 version = "2.60"
343
344             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345             # Titanium cloud and StarlingX
346 0             self.nova = self.session["nova"] = nClient.Client(
347                 str(version),
348                 session=sess,
349                 endpoint_type=self.endpoint_type,
350                 region_name=region_name,
351             )
352 0             self.neutron = self.session["neutron"] = neClient.Client(
353                 "2.0",
354                 session=sess,
355                 endpoint_type=self.endpoint_type,
356                 region_name=region_name,
357             )
358
359 0             if sess.get_all_version_data(service_type="volumev2"):
360 0                 self.cinder = self.session["cinder"] = cClient.Client(
361                     2,
362                     session=sess,
363                     endpoint_type=self.endpoint_type,
364                     region_name=region_name,
365                 )
366             else:
367 0                 self.cinder = self.session["cinder"] = cClient.Client(
368                     3,
369                     session=sess,
370                     endpoint_type=self.endpoint_type,
371                     region_name=region_name,
372                 )
373
374 0             try:
375 0                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 0             except Exception:
377 0                 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 0             if self.endpoint_type == "internalURL":
380 0                 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 0                 glance_endpoint = self.keystone.endpoints.list(
382                     glance_service_id, interface="internal"
383                 )[0].url
384             else:
385 0                 glance_endpoint = None
386
387 0             self.glance = self.session["glance"] = glClient.Client(
388                 2, session=sess, endpoint=glance_endpoint
389             )
390             # using version 1 of glance client in new_image()
391             # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392             #                                                            endpoint=glance_endpoint)
393 0             self.session["reload_client"] = False
394 0             self.persistent_info["session"] = self.session
395             # add availablity zone info inside  self.persistent_info
396 0             self._set_availablity_zones()
397 0             self.persistent_info["availability_zone"] = self.availability_zone
398             # force to get again security_groups_ids next time they are needed
399 0             self.security_groups_id = None
400
401 1     def __net_os2mano(self, net_list_dict):
402         """Transform the net openstack format to mano format
403         net_list_dict can be a list of dict or a single dict"""
404 0         if type(net_list_dict) is dict:
405 0             net_list_ = (net_list_dict,)
406 0         elif type(net_list_dict) is list:
407 0             net_list_ = net_list_dict
408         else:
409 0             raise TypeError("param net_list_dict must be a list or a dictionary")
410 0         for net in net_list_:
411 0             if net.get("provider:network_type") == "vlan":
412 0                 net["type"] = "data"
413             else:
414 0                 net["type"] = "bridge"
415
416 1     def __classification_os2mano(self, class_list_dict):
417         """Transform the openstack format (Flow Classifier) to mano format
418         (Classification) class_list_dict can be a list of dict or a single dict
419         """
420 0         if isinstance(class_list_dict, dict):
421 0             class_list_ = [class_list_dict]
422 0         elif isinstance(class_list_dict, list):
423 0             class_list_ = class_list_dict
424         else:
425 0             raise TypeError("param class_list_dict must be a list or a dictionary")
426 0         for classification in class_list_:
427 0             id = classification.pop("id")
428 0             name = classification.pop("name")
429 0             description = classification.pop("description")
430 0             project_id = classification.pop("project_id")
431 0             tenant_id = classification.pop("tenant_id")
432 0             original_classification = copy.deepcopy(classification)
433 0             classification.clear()
434 0             classification["ctype"] = "legacy_flow_classifier"
435 0             classification["definition"] = original_classification
436 0             classification["id"] = id
437 0             classification["name"] = name
438 0             classification["description"] = description
439 0             classification["project_id"] = project_id
440 0             classification["tenant_id"] = tenant_id
441
442 1     def __sfi_os2mano(self, sfi_list_dict):
443         """Transform the openstack format (Port Pair) to mano format (SFI)
444         sfi_list_dict can be a list of dict or a single dict
445         """
446 0         if isinstance(sfi_list_dict, dict):
447 0             sfi_list_ = [sfi_list_dict]
448 0         elif isinstance(sfi_list_dict, list):
449 0             sfi_list_ = sfi_list_dict
450         else:
451 0             raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 0         for sfi in sfi_list_:
454 0             sfi["ingress_ports"] = []
455 0             sfi["egress_ports"] = []
456
457 0             if sfi.get("ingress"):
458 0                 sfi["ingress_ports"].append(sfi["ingress"])
459
460 0             if sfi.get("egress"):
461 0                 sfi["egress_ports"].append(sfi["egress"])
462
463 0             del sfi["ingress"]
464 0             del sfi["egress"]
465 0             params = sfi.get("service_function_parameters")
466 0             sfc_encap = False
467
468 0             if params:
469 0                 correlation = params.get("correlation")
470
471 0                 if correlation:
472 0                     sfc_encap = True
473
474 0             sfi["sfc_encap"] = sfc_encap
475 0             del sfi["service_function_parameters"]
476
477 1     def __sf_os2mano(self, sf_list_dict):
478         """Transform the openstack format (Port Pair Group) to mano format (SF)
479         sf_list_dict can be a list of dict or a single dict
480         """
481 0         if isinstance(sf_list_dict, dict):
482 0             sf_list_ = [sf_list_dict]
483 0         elif isinstance(sf_list_dict, list):
484 0             sf_list_ = sf_list_dict
485         else:
486 0             raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 0         for sf in sf_list_:
489 0             del sf["port_pair_group_parameters"]
490 0             sf["sfis"] = sf["port_pairs"]
491 0             del sf["port_pairs"]
492
493 1     def __sfp_os2mano(self, sfp_list_dict):
494         """Transform the openstack format (Port Chain) to mano format (SFP)
495         sfp_list_dict can be a list of dict or a single dict
496         """
497 0         if isinstance(sfp_list_dict, dict):
498 0             sfp_list_ = [sfp_list_dict]
499 0         elif isinstance(sfp_list_dict, list):
500 0             sfp_list_ = sfp_list_dict
501         else:
502 0             raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 0         for sfp in sfp_list_:
505 0             params = sfp.pop("chain_parameters")
506 0             sfc_encap = False
507
508 0             if params:
509 0                 correlation = params.get("correlation")
510
511 0                 if correlation:
512 0                     sfc_encap = True
513
514 0             sfp["sfc_encap"] = sfc_encap
515 0             sfp["spi"] = sfp.pop("chain_id")
516 0             sfp["classifications"] = sfp.pop("flow_classifiers")
517 0             sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519     # placeholder for now; read TODO note below
520 1     def _validate_classification(self, type, definition):
521         # only legacy_flow_classifier Type is supported at this point
522 0         return True
523         # TODO(igordcard): this method should be an abstract method of an
524         # abstract Classification class to be implemented by the specific
525         # Types. Also, abstract vimconnector should call the validation
526         # method before the implemented VIM connectors are called.
527
528 1     def _format_exception(self, exception):
529         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
530 0         message_error = str(exception)
531 0         tip = ""
532
533 0         if isinstance(
534             exception,
535             (
536                 neExceptions.NetworkNotFoundClient,
537                 nvExceptions.NotFound,
538                 ksExceptions.NotFound,
539                 gl1Exceptions.HTTPNotFound,
540             ),
541         ):
542 0             raise vimconn.VimConnNotFoundException(
543                 type(exception).__name__ + ": " + message_error
544             )
545 0         elif isinstance(
546             exception,
547             (
548                 HTTPException,
549                 gl1Exceptions.HTTPException,
550                 gl1Exceptions.CommunicationError,
551                 ConnectionError,
552                 ksExceptions.ConnectionError,
553                 neExceptions.ConnectionFailed,
554             ),
555         ):
556 0             if type(exception).__name__ == "SSLError":
557 0                 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 0             raise vimconn.VimConnConnectionException(
560                 "Invalid URL or credentials{}: {}".format(tip, message_error)
561             )
562 0         elif isinstance(
563             exception,
564             (
565                 KeyError,
566                 nvExceptions.BadRequest,
567                 ksExceptions.BadRequest,
568             ),
569         ):
570 0             if message_error == "OS-EXT-SRV-ATTR:host":
571 0                 tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
572 0                 raise vimconn.VimConnInsufficientCredentials(
573                     type(exception).__name__ + ": " + message_error + tip
574                 )
575 0             raise vimconn.VimConnException(
576                 type(exception).__name__ + ": " + message_error
577             )
578
579 0         elif isinstance(
580             exception,
581             (
582                 nvExceptions.ClientException,
583                 ksExceptions.ClientException,
584                 neExceptions.NeutronException,
585             ),
586         ):
587 0             raise vimconn.VimConnUnexpectedResponse(
588                 type(exception).__name__ + ": " + message_error
589             )
590 0         elif isinstance(exception, nvExceptions.Conflict):
591 0             raise vimconn.VimConnConflictException(
592                 type(exception).__name__ + ": " + message_error
593             )
594 0         elif isinstance(exception, vimconn.VimConnException):
595 0             raise exception
596         else:  # ()
597 0             self.logger.error("General Exception " + message_error, exc_info=True)
598
599 0             raise vimconn.VimConnConnectionException(
600                 type(exception).__name__ + ": " + message_error
601             )
602
603 1     def _get_ids_from_name(self):
604         """
605          Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
606         :return: None
607         """
608         # get tenant_id if only tenant_name is supplied
609 0         self._reload_connection()
610
611 0         if not self.my_tenant_id:
612 0             raise vimconn.VimConnConnectionException(
613                 "Error getting tenant information from name={} id={}".format(
614                     self.tenant_name, self.tenant_id
615                 )
616             )
617
618 0         if self.config.get("security_groups") and not self.security_groups_id:
619             # convert from name to id
620 0             neutron_sg_list = self.neutron.list_security_groups(
621                 tenant_id=self.my_tenant_id
622             )["security_groups"]
623
624 0             self.security_groups_id = []
625 0             for sg in self.config.get("security_groups"):
626 0                 for neutron_sg in neutron_sg_list:
627 0                     if sg in (neutron_sg["id"], neutron_sg["name"]):
628 0                         self.security_groups_id.append(neutron_sg["id"])
629 0                         break
630                 else:
631 0                     self.security_groups_id = None
632
633 0                     raise vimconn.VimConnConnectionException(
634                         "Not found security group {} for this tenant".format(sg)
635                     )
636
637 1     def _find_nova_server(self, vm_id):
638         """
639         Returns the VM instance from Openstack and completes it with flavor ID
640         Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
641         """
642 0         try:
643 0             self._reload_connection()
644 0             server = self.nova.servers.find(id=vm_id)
645             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
646 0             server_dict = server.to_dict()
647 0             try:
648 0                 if server_dict["flavor"].get("original_name"):
649 0                     server_dict["flavor"]["id"] = self.nova.flavors.find(
650                         name=server_dict["flavor"]["original_name"]
651                     ).id
652 0             except nClient.exceptions.NotFound as e:
653 0                 self.logger.warning(str(e.message))
654 0             return server_dict
655 0         except (
656             ksExceptions.ClientException,
657             nvExceptions.ClientException,
658             nvExceptions.NotFound,
659             ConnectionError,
660         ) as e:
661 0             self._format_exception(e)
662
663 1     def check_vim_connectivity(self):
664         # just get network list to check connectivity and credentials
665 0         self.get_network_list(filter_dict={})
666
667 1     def get_tenant_list(self, filter_dict={}):
668         """Obtain tenants of VIM
669         filter_dict can contain the following keys:
670             name: filter by tenant name
671             id: filter by tenant uuid/id
672             <other VIM specific>
673         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
674         """
675 0         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
676
677 0         try:
678 0             self._reload_connection()
679
680 0             if self.api_version3:
681 0                 project_class_list = self.keystone.projects.list(
682                     name=filter_dict.get("name")
683                 )
684             else:
685 0                 project_class_list = self.keystone.tenants.findall(**filter_dict)
686
687 0             project_list = []
688
689 0             for project in project_class_list:
690 0                 if filter_dict.get("id") and filter_dict["id"] != project.id:
691 0                     continue
692
693 0                 project_list.append(project.to_dict())
694
695 0             return project_list
696 0         except (
697             ksExceptions.ConnectionError,
698             ksExceptions.ClientException,
699             ConnectionError,
700         ) as e:
701 0             self._format_exception(e)
702
703 1     def new_tenant(self, tenant_name, tenant_description):
704         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
705 0         self.logger.debug("Adding a new tenant name: %s", tenant_name)
706
707 0         try:
708 0             self._reload_connection()
709
710 0             if self.api_version3:
711 0                 project = self.keystone.projects.create(
712                     tenant_name,
713                     self.config.get("project_domain_id", "default"),
714                     description=tenant_description,
715                     is_domain=False,
716                 )
717             else:
718 0                 project = self.keystone.tenants.create(tenant_name, tenant_description)
719
720 0             return project.id
721 0         except (
722             ksExceptions.ConnectionError,
723             ksExceptions.ClientException,
724             ksExceptions.BadRequest,
725             ConnectionError,
726         ) as e:
727 0             self._format_exception(e)
728
729 1     def delete_tenant(self, tenant_id):
730         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
731 0         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
732
733 0         try:
734 0             self._reload_connection()
735
736 0             if self.api_version3:
737 0                 self.keystone.projects.delete(tenant_id)
738             else:
739 0                 self.keystone.tenants.delete(tenant_id)
740
741 0             return tenant_id
742 0         except (
743             ksExceptions.ConnectionError,
744             ksExceptions.ClientException,
745             ksExceptions.NotFound,
746             ConnectionError,
747         ) as e:
748 0             self._format_exception(e)
749
750 1     def new_network(
751         self,
752         net_name,
753         net_type,
754         ip_profile=None,
755         shared=False,
756         provider_network_profile=None,
757     ):
758         """Adds a tenant network to VIM
759         Params:
760             'net_name': name of the network
761             'net_type': one of:
762                 'bridge': overlay isolated network
763                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
764                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
765             'ip_profile': is a dict containing the IP parameters of the network
766                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
767                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
768                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
769                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
770                 'dhcp_enabled': True or False
771                 'dhcp_start_address': ip_schema, first IP to grant
772                 'dhcp_count': number of IPs to grant.
773             'shared': if this network can be seen/use by other tenants/organization
774             'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
775                                                              physical-network: physnet-label}
776         Returns a tuple with the network identifier and created_items, or raises an exception on error
777             created_items can be None or a dictionary where this method can include key-values that will be passed to
778             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
779             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
780             as not present.
781         """
782 0         self.logger.debug(
783             "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
784         )
785         # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
786
787 0         try:
788 0             vlan = None
789
790 0             if provider_network_profile:
791 0                 vlan = provider_network_profile.get("segmentation-id")
792
793 0             new_net = None
794 0             created_items = {}
795 0             self._reload_connection()
796 0             network_dict = {"name": net_name, "admin_state_up": True}
797
798 0             if net_type in ("data", "ptp") or provider_network_profile:
799 0                 provider_physical_network = None
800
801 0                 if provider_network_profile and provider_network_profile.get(
802                     "physical-network"
803                 ):
804 0                     provider_physical_network = provider_network_profile.get(
805                         "physical-network"
806                     )
807
808                     # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
809                     # or not declared, just ignore the checking
810 0                     if (
811                         isinstance(
812                             self.config.get("dataplane_physical_net"), (tuple, list)
813                         )
814                         and provider_physical_network
815                         not in self.config["dataplane_physical_net"]
816                     ):
817 0                         raise vimconn.VimConnConflictException(
818                             "Invalid parameter 'provider-network:physical-network' "
819                             "for network creation. '{}' is not one of the declared "
820                             "list at VIM_config:dataplane_physical_net".format(
821                                 provider_physical_network
822                             )
823                         )
824
825                 # use the default dataplane_physical_net
826 0                 if not provider_physical_network:
827 0                     provider_physical_network = self.config.get(
828                         "dataplane_physical_net"
829                     )
830
831                     # if it is non empty list, use the first value. If it is a string use the value directly
832 0                     if (
833                         isinstance(provider_physical_network, (tuple, list))
834                         and provider_physical_network
835                     ):
836 0                         provider_physical_network = provider_physical_network[0]
837
838 0                 if not provider_physical_network:
839 0                     raise vimconn.VimConnConflictException(
840                         "missing information needed for underlay networks. Provide "
841                         "'dataplane_physical_net' configuration at VIM or use the NS "
842                         "instantiation parameter 'provider-network.physical-network'"
843                         " for the VLD"
844                     )
845
846 0                 if not self.config.get("multisegment_support"):
847 0                     network_dict[
848                         "provider:physical_network"
849                     ] = provider_physical_network
850
851 0                     if (
852                         provider_network_profile
853                         and "network-type" in provider_network_profile
854                     ):
855 0                         network_dict[
856                             "provider:network_type"
857                         ] = provider_network_profile["network-type"]
858                     else:
859 0                         network_dict["provider:network_type"] = self.config.get(
860                             "dataplane_network_type", "vlan"
861                         )
862
863 0                     if vlan:
864 0                         network_dict["provider:segmentation_id"] = vlan
865                 else:
866                     # Multi-segment case
867 0                     segment_list = []
868 0                     segment1_dict = {
869                         "provider:physical_network": "",
870                         "provider:network_type": "vxlan",
871                     }
872 0                     segment_list.append(segment1_dict)
873 0                     segment2_dict = {
874                         "provider:physical_network": provider_physical_network,
875                         "provider:network_type": "vlan",
876                     }
877
878 0                     if vlan:
879 0                         segment2_dict["provider:segmentation_id"] = vlan
880 0                     elif self.config.get("multisegment_vlan_range"):
881 0                         vlanID = self._generate_multisegment_vlanID()
882 0                         segment2_dict["provider:segmentation_id"] = vlanID
883
884                     # else
885                     #     raise vimconn.VimConnConflictException(
886                     #         "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
887                     #         network")
888 0                     segment_list.append(segment2_dict)
889 0                     network_dict["segments"] = segment_list
890
891                 # VIO Specific Changes. It needs a concrete VLAN
892 0                 if self.vim_type == "VIO" and vlan is None:
893 0                     if self.config.get("dataplane_net_vlan_range") is None:
894 0                         raise vimconn.VimConnConflictException(
895                             "You must provide 'dataplane_net_vlan_range' in format "
896                             "[start_ID - end_ID] at VIM_config for creating underlay "
897                             "networks"
898                         )
899
900 0                     network_dict["provider:segmentation_id"] = self._generate_vlanID()
901
902 0             network_dict["shared"] = shared
903
904 0             if self.config.get("disable_network_port_security"):
905 0                 network_dict["port_security_enabled"] = False
906
907 0             if self.config.get("neutron_availability_zone_hints"):
908 0                 hints = self.config.get("neutron_availability_zone_hints")
909
910 0                 if isinstance(hints, str):
911 0                     hints = [hints]
912
913 0                 network_dict["availability_zone_hints"] = hints
914
915 0             new_net = self.neutron.create_network({"network": network_dict})
916             # print new_net
917             # create subnetwork, even if there is no profile
918
919 0             if not ip_profile:
920 0                 ip_profile = {}
921
922 0             if not ip_profile.get("subnet_address"):
923                 # Fake subnet is required
924 0                 subnet_rand = random.SystemRandom().randint(0, 255)
925 0                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
926
927 0             if "ip_version" not in ip_profile:
928 0                 ip_profile["ip_version"] = "IPv4"
929
930 0             subnet = {
931                 "name": net_name + "-subnet",
932                 "network_id": new_net["network"]["id"],
933                 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
934                 "cidr": ip_profile["subnet_address"],
935             }
936
937             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
938 0             if ip_profile.get("gateway_address"):
939 0                 subnet["gateway_ip"] = ip_profile["gateway_address"]
940             else:
941 0                 subnet["gateway_ip"] = None
942
943 0             if ip_profile.get("dns_address"):
944 0                 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
945
946 0             if "dhcp_enabled" in ip_profile:
947 0                 subnet["enable_dhcp"] = (
948                     False
949                     if ip_profile["dhcp_enabled"] == "false"
950                     or ip_profile["dhcp_enabled"] is False
951                     else True
952                 )
953
954 0             if ip_profile.get("dhcp_start_address"):
955 0                 subnet["allocation_pools"] = []
956 0                 subnet["allocation_pools"].append(dict())
957 0                 subnet["allocation_pools"][0]["start"] = ip_profile[
958                     "dhcp_start_address"
959                 ]
960
961 0             if ip_profile.get("dhcp_count"):
962                 # parts = ip_profile["dhcp_start_address"].split(".")
963                 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
964 0                 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
965 0                 ip_int += ip_profile["dhcp_count"] - 1
966 0                 ip_str = str(netaddr.IPAddress(ip_int))
967 0                 subnet["allocation_pools"][0]["end"] = ip_str
968
969 0             if (
970                 ip_profile.get("ipv6_address_mode")
971                 and ip_profile["ip_version"] != "IPv4"
972             ):
973 0                 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
974                 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
975                 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
976 0                 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
977
978             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
979 0             self.neutron.create_subnet({"subnet": subnet})
980
981 0             if net_type == "data" and self.config.get("multisegment_support"):
982 0                 if self.config.get("l2gw_support"):
983 0                     l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
984 0                     for l2gw in l2gw_list:
985 0                         l2gw_conn = {
986                             "l2_gateway_id": l2gw["id"],
987                             "network_id": new_net["network"]["id"],
988                             "segmentation_id": str(vlanID),
989                         }
990 0                         new_l2gw_conn = self.neutron.create_l2_gateway_connection(
991                             {"l2_gateway_connection": l2gw_conn}
992                         )
993 0                         created_items[
994                             "l2gwconn:"
995                             + str(new_l2gw_conn["l2_gateway_connection"]["id"])
996                         ] = True
997
998 0             return new_net["network"]["id"], created_items
999 0         except Exception as e:
1000             # delete l2gw connections (if any) before deleting the network
1001 0             for k, v in created_items.items():
1002 0                 if not v:  # skip already deleted
1003 0                     continue
1004
1005 0                 try:
1006 0                     k_item, _, k_id = k.partition(":")
1007
1008 0                     if k_item == "l2gwconn":
1009 0                         self.neutron.delete_l2_gateway_connection(k_id)
1010 0                 except Exception as e2:
1011 0                     self.logger.error(
1012                         "Error deleting l2 gateway connection: {}: {}".format(
1013                             type(e2).__name__, e2
1014                         )
1015                     )
1016
1017 0             if new_net:
1018 0                 self.neutron.delete_network(new_net["network"]["id"])
1019
1020 0             self._format_exception(e)
1021
1022 1     def get_network_list(self, filter_dict={}):
1023         """Obtain tenant networks of VIM
1024         Filter_dict can be:
1025             name: network name
1026             id: network uuid
1027             shared: boolean
1028             tenant_id: tenant
1029             admin_state_up: boolean
1030             status: 'ACTIVE'
1031         Returns the network list of dictionaries
1032         """
1033 0         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1034
1035 0         try:
1036 0             self._reload_connection()
1037 0             filter_dict_os = filter_dict.copy()
1038
1039 0             if self.api_version3 and "tenant_id" in filter_dict_os:
1040                 # TODO check
1041 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1042
1043 0             net_dict = self.neutron.list_networks(**filter_dict_os)
1044 0             net_list = net_dict["networks"]
1045 0             self.__net_os2mano(net_list)
1046
1047 0             return net_list
1048 0         except (
1049             neExceptions.ConnectionFailed,
1050             ksExceptions.ClientException,
1051             neExceptions.NeutronException,
1052             ConnectionError,
1053         ) as e:
1054 0             self._format_exception(e)
1055
1056 1     def get_network(self, net_id):
1057         """Obtain details of network from VIM
1058         Returns the network information from a network id"""
1059 0         self.logger.debug(" Getting tenant network %s from VIM", net_id)
1060 0         filter_dict = {"id": net_id}
1061 0         net_list = self.get_network_list(filter_dict)
1062
1063 0         if len(net_list) == 0:
1064 0             raise vimconn.VimConnNotFoundException(
1065                 "Network '{}' not found".format(net_id)
1066             )
1067 0         elif len(net_list) > 1:
1068 0             raise vimconn.VimConnConflictException(
1069                 "Found more than one network with this criteria"
1070             )
1071
1072 0         net = net_list[0]
1073 0         subnets = []
1074 0         for subnet_id in net.get("subnets", ()):
1075 0             try:
1076 0                 subnet = self.neutron.show_subnet(subnet_id)
1077 0             except Exception as e:
1078 0                 self.logger.error(
1079                     "osconnector.get_network(): Error getting subnet %s %s"
1080                     % (net_id, str(e))
1081                 )
1082 0                 subnet = {"id": subnet_id, "fault": str(e)}
1083
1084 0             subnets.append(subnet)
1085
1086 0         net["subnets"] = subnets
1087 0         net["encapsulation"] = net.get("provider:network_type")
1088 0         net["encapsulation_type"] = net.get("provider:network_type")
1089 0         net["segmentation_id"] = net.get("provider:segmentation_id")
1090 0         net["encapsulation_id"] = net.get("provider:segmentation_id")
1091
1092 0         return net
1093
1094 1     def delete_network(self, net_id, created_items=None):
1095         """
1096         Removes a tenant network from VIM and its associated elements
1097         :param net_id: VIM identifier of the network, provided by method new_network
1098         :param created_items: dictionary with extra items to be deleted. provided by method new_network
1099         Returns the network identifier or raises an exception upon error or when network is not found
1100         """
1101 0         self.logger.debug("Deleting network '%s' from VIM", net_id)
1102
1103 0         if created_items is None:
1104 0             created_items = {}
1105
1106 0         try:
1107 0             self._reload_connection()
1108             # delete l2gw connections (if any) before deleting the network
1109 0             for k, v in created_items.items():
1110 0                 if not v:  # skip already deleted
1111 0                     continue
1112
1113 0                 try:
1114 0                     k_item, _, k_id = k.partition(":")
1115 0                     if k_item == "l2gwconn":
1116 0                         self.neutron.delete_l2_gateway_connection(k_id)
1117 0                 except Exception as e:
1118 0                     self.logger.error(
1119                         "Error deleting l2 gateway connection: {}: {}".format(
1120                             type(e).__name__, e
1121                         )
1122                     )
1123
1124             # delete VM ports attached to this networks before the network
1125 0             ports = self.neutron.list_ports(network_id=net_id)
1126 0             for p in ports["ports"]:
1127 0                 try:
1128 0                     self.neutron.delete_port(p["id"])
1129 0                 except Exception as e:
1130 0                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1131
1132 0             self.neutron.delete_network(net_id)
1133
1134 0             return net_id
1135 0         except (
1136             neExceptions.ConnectionFailed,
1137             neExceptions.NetworkNotFoundClient,
1138             neExceptions.NeutronException,
1139             ksExceptions.ClientException,
1140             neExceptions.NeutronException,
1141             ConnectionError,
1142         ) as e:
1143 0             self._format_exception(e)
1144
1145 1     def refresh_nets_status(self, net_list):
1146         """Get the status of the networks
1147         Params: the list of network identifiers
1148         Returns a dictionary with:
1149             net_id:         #VIM id of this network
1150                 status:     #Mandatory. Text with one of:
1151                             #  DELETED (not found at vim)
1152                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1153                             #  OTHER (Vim reported other status not understood)
1154                             #  ERROR (VIM indicates an ERROR status)
1155                             #  ACTIVE, INACTIVE, DOWN (admin down),
1156                             #  BUILD (on building process)
1157                             #
1158                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
1159                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
1160         """
1161 0         net_dict = {}
1162
1163 0         for net_id in net_list:
1164 0             net = {}
1165
1166 0             try:
1167 0                 net_vim = self.get_network(net_id)
1168
1169 0                 if net_vim["status"] in netStatus2manoFormat:
1170 0                     net["status"] = netStatus2manoFormat[net_vim["status"]]
1171                 else:
1172 0                     net["status"] = "OTHER"
1173 0                     net["error_msg"] = "VIM status reported " + net_vim["status"]
1174
1175 0                 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1176 0                     net["status"] = "DOWN"
1177
1178 0                 net["vim_info"] = self.serialize(net_vim)
1179
1180 0                 if net_vim.get("fault"):  # TODO
1181 0                     net["error_msg"] = str(net_vim["fault"])
1182 0             except vimconn.VimConnNotFoundException as e:
1183 0                 self.logger.error("Exception getting net status: %s", str(e))
1184 0                 net["status"] = "DELETED"
1185 0                 net["error_msg"] = str(e)
1186 0             except vimconn.VimConnException as e:
1187 0                 self.logger.error("Exception getting net status: %s", str(e))
1188 0                 net["status"] = "VIM_ERROR"
1189 0                 net["error_msg"] = str(e)
1190 0             net_dict[net_id] = net
1191 0         return net_dict
1192
1193 1     def get_flavor(self, flavor_id):
1194         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
1195 0         self.logger.debug("Getting flavor '%s'", flavor_id)
1196
1197 0         try:
1198 0             self._reload_connection()
1199 0             flavor = self.nova.flavors.find(id=flavor_id)
1200             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1201
1202 0             return flavor.to_dict()
1203 0         except (
1204             nvExceptions.NotFound,
1205             nvExceptions.ClientException,
1206             ksExceptions.ClientException,
1207             ConnectionError,
1208         ) as e:
1209 0             self._format_exception(e)
1210
1211 1     def get_flavor_id_from_data(self, flavor_dict):
1212         """Obtain flavor id that match the flavor description
1213         Returns the flavor_id or raises a vimconnNotFoundException
1214         flavor_dict: contains the required ram, vcpus, disk
1215         If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1216             and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1217             vimconnNotFoundException is raised
1218         """
1219 0         exact_match = False if self.config.get("use_existing_flavors") else True
1220
1221 0         try:
1222 0             self._reload_connection()
1223 0             flavor_candidate_id = None
1224 0             flavor_candidate_data = (10000, 10000, 10000)
1225 0             flavor_target = (
1226                 flavor_dict["ram"],
1227                 flavor_dict["vcpus"],
1228                 flavor_dict["disk"],
1229                 flavor_dict.get("ephemeral", 0),
1230                 flavor_dict.get("swap", 0),
1231             )
1232             # numa=None
1233 0             extended = flavor_dict.get("extended", {})
1234 0             if extended:
1235                 # TODO
1236 0                 raise vimconn.VimConnNotFoundException(
1237                     "Flavor with EPA still not implemented"
1238                 )
1239                 # if len(numas) > 1:
1240                 #     raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1241                 # numa=numas[0]
1242                 # numas = extended.get("numas")
1243 0             for flavor in self.nova.flavors.list():
1244 0                 epa = flavor.get_keys()
1245
1246 0                 if epa:
1247 0                     continue
1248                     # TODO
1249
1250 0                 flavor_data = (
1251                     flavor.ram,
1252                     flavor.vcpus,
1253                     flavor.disk,
1254                     flavor.ephemeral,
1255                     flavor.swap if isinstance(flavor.swap, int) else 0,
1256                 )
1257 0                 if flavor_data == flavor_target:
1258 0                     return flavor.id
1259 0                 elif (
1260                     not exact_match
1261                     and flavor_target < flavor_data < flavor_candidate_data
1262                 ):
1263 0                     flavor_candidate_id = flavor.id
1264 0                     flavor_candidate_data = flavor_data
1265
1266 0             if not exact_match and flavor_candidate_id:
1267 0                 return flavor_candidate_id
1268
1269 0             raise vimconn.VimConnNotFoundException(
1270                 "Cannot find any flavor matching '{}'".format(flavor_dict)
1271             )
1272 0         except (
1273             nvExceptions.NotFound,
1274             nvExceptions.ClientException,
1275             ksExceptions.ClientException,
1276             ConnectionError,
1277         ) as e:
1278 0             self._format_exception(e)
1279
1280 1     @staticmethod
1281 1     def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1282         """Process resource quota and fill up extra_specs.
1283         Args:
1284             quota       (dict):         Keeping the quota of resurces
1285             prefix      (str)           Prefix
1286             extra_specs (dict)          Dict to be filled to be used during flavor creation
1287
1288         """
1289 0         if "limit" in quota:
1290 0             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1291
1292 0         if "reserve" in quota:
1293 0             extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1294
1295 0         if "shares" in quota:
1296 0             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1297 0             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1298
1299 1     @staticmethod
1300 1     def process_numa_memory(
1301         numa: dict, node_id: Optional[int], extra_specs: dict
1302     ) -> None:
1303         """Set the memory in extra_specs.
1304         Args:
1305             numa        (dict):         A dictionary which includes numa information
1306             node_id     (int):          ID of numa node
1307             extra_specs (dict):         To be filled.
1308
1309         """
1310 1         if not numa.get("memory"):
1311 1             return
1312 1         memory_mb = numa["memory"] * 1024
1313 1         memory = "hw:numa_mem.{}".format(node_id)
1314 1         extra_specs[memory] = int(memory_mb)
1315
1316 1     @staticmethod
1317 1     def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1318         """Set the cpu in extra_specs.
1319         Args:
1320             numa        (dict):         A dictionary which includes numa information
1321             node_id     (int):          ID of numa node
1322             extra_specs (dict):         To be filled.
1323
1324         """
1325 1         if not numa.get("vcpu"):
1326 1             return
1327 1         vcpu = numa["vcpu"]
1328 1         cpu = "hw:numa_cpus.{}".format(node_id)
1329 1         vcpu = ",".join(map(str, vcpu))
1330 1         extra_specs[cpu] = vcpu
1331
1332 1     @staticmethod
1333 1     def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1334         """Fill up extra_specs if numa has paired-threads.
1335         Args:
1336             numa        (dict):         A dictionary which includes numa information
1337             extra_specs (dict):         To be filled.
1338
1339         Returns:
1340             threads       (int)           Number of virtual cpus
1341
1342         """
1343 1         if not numa.get("paired-threads"):
1344 1             return
1345
1346         # cpu_thread_policy "require" implies that compute node must have an STM architecture
1347 1         threads = numa["paired-threads"] * 2
1348 1         extra_specs["hw:cpu_thread_policy"] = "require"
1349 1         extra_specs["hw:cpu_policy"] = "dedicated"
1350 1         return threads
1351
1352 1     @staticmethod
1353 1     def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1354         """Fill up extra_specs if numa has cores.
1355         Args:
1356             numa        (dict):         A dictionary which includes numa information
1357             extra_specs (dict):         To be filled.
1358
1359         Returns:
1360             cores       (int)           Number of virtual cpus
1361
1362         """
1363         # cpu_thread_policy "isolate" implies that the host must not have an SMT
1364         # architecture, or a non-SMT architecture will be emulated
1365 1         if not numa.get("cores"):
1366 1             return
1367 1         cores = numa["cores"]
1368 1         extra_specs["hw:cpu_thread_policy"] = "isolate"
1369 1         extra_specs["hw:cpu_policy"] = "dedicated"
1370 1         return cores
1371
1372 1     @staticmethod
1373 1     def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1374         """Fill up extra_specs if numa has threads.
1375         Args:
1376             numa        (dict):         A dictionary which includes numa information
1377             extra_specs (dict):         To be filled.
1378
1379         Returns:
1380             threads       (int)           Number of virtual cpus
1381
1382         """
1383         # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1384 1         if not numa.get("threads"):
1385 1             return
1386 1         threads = numa["threads"]
1387 1         extra_specs["hw:cpu_thread_policy"] = "prefer"
1388 1         extra_specs["hw:cpu_policy"] = "dedicated"
1389 1         return threads
1390
1391 1     def _process_numa_parameters_of_flavor(
1392         self, numas: List, extra_specs: Dict
1393     ) -> None:
1394         """Process numa parameters and fill up extra_specs.
1395
1396         Args:
1397             numas   (list):             List of dictionary which includes numa information
1398             extra_specs (dict):         To be filled.
1399
1400         """
1401 1         numa_nodes = len(numas)
1402 1         extra_specs["hw:numa_nodes"] = str(numa_nodes)
1403 1         cpu_cores, cpu_threads = 0, 0
1404
1405 1         if self.vim_type == "VIO":
1406 1             self.process_vio_numa_nodes(numa_nodes, extra_specs)
1407
1408 1         for numa in numas:
1409 1             if "id" in numa:
1410 1                 node_id = numa["id"]
1411                 # overwrite ram and vcpus
1412                 # check if key "memory" is present in numa else use ram value at flavor
1413 1                 self.process_numa_memory(numa, node_id, extra_specs)
1414 1                 self.process_numa_vcpu(numa, node_id, extra_specs)
1415
1416             # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1417 1             extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1418
1419 1             if "paired-threads" in numa:
1420 1                 threads = self.process_numa_paired_threads(numa, extra_specs)
1421 1                 cpu_threads += threads
1422
1423 1             elif "cores" in numa:
1424 1                 cores = self.process_numa_cores(numa, extra_specs)
1425 1                 cpu_cores += cores
1426
1427 1             elif "threads" in numa:
1428 1                 threads = self.process_numa_threads(numa, extra_specs)
1429 1                 cpu_threads += threads
1430
1431 1         if cpu_cores:
1432 1             extra_specs["hw:cpu_cores"] = str(cpu_cores)
1433 1         if cpu_threads:
1434 1             extra_specs["hw:cpu_threads"] = str(cpu_threads)
1435
1436 1     @staticmethod
1437 1     def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1438         """According to number of numa nodes, updates the extra_specs for VIO.
1439
1440         Args:
1441
1442             numa_nodes      (int):         List keeps the numa node numbers
1443             extra_specs     (dict):        Extra specs dict to be updated
1444
1445         """
1446         # If there are several numas, we do not define specific affinity.
1447 1         extra_specs["vmware:latency_sensitivity_level"] = "high"
1448
1449 1     def _change_flavor_name(
1450         self, name: str, name_suffix: int, flavor_data: dict
1451     ) -> str:
1452         """Change the flavor name if the name already exists.
1453
1454         Args:
1455             name    (str):          Flavor name to be checked
1456             name_suffix (int):      Suffix to be appended to name
1457             flavor_data (dict):     Flavor dict
1458
1459         Returns:
1460             name    (str):          New flavor name to be used
1461
1462         """
1463         # Get used names
1464 1         fl = self.nova.flavors.list()
1465 1         fl_names = [f.name for f in fl]
1466
1467 1         while name in fl_names:
1468 1             name_suffix += 1
1469 1             name = flavor_data["name"] + "-" + str(name_suffix)
1470
1471 1         return name
1472
1473 1     def _process_extended_config_of_flavor(
1474         self, extended: dict, extra_specs: dict
1475     ) -> None:
1476         """Process the extended dict to fill up extra_specs.
1477         Args:
1478
1479             extended                    (dict):         Keeping the extra specification of flavor
1480             extra_specs                 (dict)          Dict to be filled to be used during flavor creation
1481
1482         """
1483 1         quotas = {
1484             "cpu-quota": "cpu",
1485             "mem-quota": "memory",
1486             "vif-quota": "vif",
1487             "disk-io-quota": "disk_io",
1488         }
1489
1490 1         page_sizes = {
1491             "LARGE": "large",
1492             "SMALL": "small",
1493             "SIZE_2MB": "2MB",
1494             "SIZE_1GB": "1GB",
1495             "PREFER_LARGE": "any",
1496         }
1497
1498 1         policies = {
1499             "cpu-pinning-policy": "hw:cpu_policy",
1500             "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1501             "mem-policy": "hw:numa_mempolicy",
1502         }
1503
1504 1         numas = extended.get("numas")
1505 1         if numas:
1506 1             self._process_numa_parameters_of_flavor(numas, extra_specs)
1507
1508 1         for quota, item in quotas.items():
1509 1             if quota in extended.keys():
1510 1                 self.process_resource_quota(extended.get(quota), item, extra_specs)
1511
1512         # Set the mempage size as specified in the descriptor
1513 1         if extended.get("mempage-size"):
1514 1             if extended["mempage-size"] in page_sizes.keys():
1515 1                 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1516             else:
1517                 # Normally, validations in NBI should not allow to this condition.
1518 1                 self.logger.debug(
1519                     "Invalid mempage-size %s. Will be ignored",
1520                     extended.get("mempage-size"),
1521                 )
1522
1523 1         for policy, hw_policy in policies.items():
1524 1             if extended.get(policy):
1525 1                 extra_specs[hw_policy] = extended[policy].lower()
1526
1527 1     @staticmethod
1528 1     def _get_flavor_details(flavor_data: dict) -> Tuple:
1529         """Returns the details of flavor
1530         Args:
1531             flavor_data     (dict):     Dictionary that includes required flavor details
1532
1533         Returns:
1534             ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
1535
1536         """
1537 1         return (
1538             flavor_data.get("ram", 64),
1539             flavor_data.get("vcpus", 1),
1540             {},
1541             flavor_data.get("extended"),
1542         )
1543
1544 1     def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1545         """Adds a tenant flavor to openstack VIM.
1546         if change_name_if_used is True, it will change name in case of conflict,
1547         because it is not supported name repetition.
1548
1549         Args:
1550             flavor_data (dict):             Flavor details to be processed
1551             change_name_if_used (bool):     Change name in case of conflict
1552
1553         Returns:
1554              flavor_id  (str):     flavor identifier
1555
1556         """
1557 1         self.logger.debug("Adding flavor '%s'", str(flavor_data))
1558 1         retry = 0
1559 1         max_retries = 3
1560 1         name_suffix = 0
1561
1562 1         try:
1563 1             name = flavor_data["name"]
1564 1             while retry < max_retries:
1565 1                 retry += 1
1566 1                 try:
1567 1                     self._reload_connection()
1568
1569 1                     if change_name_if_used:
1570 1                         name = self._change_flavor_name(name, name_suffix, flavor_data)
1571
1572 1                     ram, vcpus, extra_specs, extended = self._get_flavor_details(
1573                         flavor_data
1574                     )
1575 1                     if extended:
1576 1                         self._process_extended_config_of_flavor(extended, extra_specs)
1577
1578                     # Create flavor
1579
1580 1                     new_flavor = self.nova.flavors.create(
1581                         name=name,
1582                         ram=ram,
1583                         vcpus=vcpus,
1584                         disk=flavor_data.get("disk", 0),
1585                         ephemeral=flavor_data.get("ephemeral", 0),
1586                         swap=flavor_data.get("swap", 0),
1587                         is_public=flavor_data.get("is_public", True),
1588                     )
1589
1590                     # Add metadata
1591 1                     if extra_specs:
1592 1                         new_flavor.set_keys(extra_specs)
1593
1594 1                     return new_flavor.id
1595
1596 1                 except nvExceptions.Conflict as e:
1597 1                     if change_name_if_used and retry < max_retries:
1598 1                         continue
1599
1600 1                     self._format_exception(e)
1601
1602 1         except (
1603             ksExceptions.ClientException,
1604             nvExceptions.ClientException,
1605             ConnectionError,
1606             KeyError,
1607         ) as e:
1608 1             self._format_exception(e)
1609
1610 1     def delete_flavor(self, flavor_id):
1611         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1612 0         try:
1613 0             self._reload_connection()
1614 0             self.nova.flavors.delete(flavor_id)
1615
1616 0             return flavor_id
1617         # except nvExceptions.BadRequest as e:
1618 0         except (
1619             nvExceptions.NotFound,
1620             ksExceptions.ClientException,
1621             nvExceptions.ClientException,
1622             ConnectionError,
1623         ) as e:
1624 0             self._format_exception(e)
1625
1626 1     def new_image(self, image_dict):
1627         """
1628         Adds a tenant image to VIM. imge_dict is a dictionary with:
1629             name: name
1630             disk_format: qcow2, vhd, vmdk, raw (by default), ...
1631             location: path or URI
1632             public: "yes" or "no"
1633             metadata: metadata of the image
1634         Returns the image_id
1635         """
1636 0         retry = 0
1637 0         max_retries = 3
1638
1639 0         while retry < max_retries:
1640 0             retry += 1
1641 0             try:
1642 0                 self._reload_connection()
1643
1644                 # determine format  http://docs.openstack.org/developer/glance/formats.html
1645 0                 if "disk_format" in image_dict:
1646 0                     disk_format = image_dict["disk_format"]
1647                 else:  # autodiscover based on extension
1648 0                     if image_dict["location"].endswith(".qcow2"):
1649 0                         disk_format = "qcow2"
1650 0                     elif image_dict["location"].endswith(".vhd"):
1651 0                         disk_format = "vhd"
1652 0                     elif image_dict["location"].endswith(".vmdk"):
1653 0                         disk_format = "vmdk"
1654 0                     elif image_dict["location"].endswith(".vdi"):
1655 0                         disk_format = "vdi"
1656 0                     elif image_dict["location"].endswith(".iso"):
1657 0                         disk_format = "iso"
1658 0                     elif image_dict["location"].endswith(".aki"):
1659 0                         disk_format = "aki"
1660 0                     elif image_dict["location"].endswith(".ari"):
1661 0                         disk_format = "ari"
1662 0                     elif image_dict["location"].endswith(".ami"):
1663 0                         disk_format = "ami"
1664                     else:
1665 0                         disk_format = "raw"
1666
1667 0                 self.logger.debug(
1668                     "new_image: '%s' loading from '%s'",
1669                     image_dict["name"],
1670                     image_dict["location"],
1671                 )
1672 0                 if self.vim_type == "VIO":
1673 0                     container_format = "bare"
1674 0                     if "container_format" in image_dict:
1675 0                         container_format = image_dict["container_format"]
1676
1677 0                     new_image = self.glance.images.create(
1678                         name=image_dict["name"],
1679                         container_format=container_format,
1680                         disk_format=disk_format,
1681                     )
1682                 else:
1683 0                     new_image = self.glance.images.create(name=image_dict["name"])
1684
1685 0                 if image_dict["location"].startswith("http"):
1686                     # TODO there is not a method to direct download. It must be downloaded locally with requests
1687 0                     raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1688                 else:  # local path
1689 0                     with open(image_dict["location"]) as fimage:
1690 0                         self.glance.images.upload(new_image.id, fimage)
1691                         # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1692                         #  image_dict.get("public","yes")=="yes",
1693                         #    container_format="bare", data=fimage, disk_format=disk_format)
1694
1695 0                 metadata_to_load = image_dict.get("metadata")
1696
1697                 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1698                 #  for openstack
1699 0                 if self.vim_type == "VIO":
1700 0                     metadata_to_load["upload_location"] = image_dict["location"]
1701                 else:
1702 0                     metadata_to_load["location"] = image_dict["location"]
1703
1704 0                 self.glance.images.update(new_image.id, **metadata_to_load)
1705
1706 0                 return new_image.id
1707 0             except (
1708                 nvExceptions.Conflict,
1709                 ksExceptions.ClientException,
1710                 nvExceptions.ClientException,
1711             ) as e:
1712 0                 self._format_exception(e)
1713 0             except (
1714                 HTTPException,
1715                 gl1Exceptions.HTTPException,
1716                 gl1Exceptions.CommunicationError,
1717                 ConnectionError,
1718             ) as e:
1719 0                 if retry == max_retries:
1720 0                     continue
1721
1722 0                 self._format_exception(e)
1723 0             except IOError as e:  # can not open the file
1724 0                 raise vimconn.VimConnConnectionException(
1725                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1726                     http_code=vimconn.HTTP_Bad_Request,
1727                 )
1728
1729 1     def delete_image(self, image_id):
1730         """Deletes a tenant image from openstack VIM. Returns the old id"""
1731 0         try:
1732 0             self._reload_connection()
1733 0             self.glance.images.delete(image_id)
1734
1735 0             return image_id
1736 0         except (
1737             nvExceptions.NotFound,
1738             ksExceptions.ClientException,
1739             nvExceptions.ClientException,
1740             gl1Exceptions.CommunicationError,
1741             gl1Exceptions.HTTPNotFound,
1742             ConnectionError,
1743         ) as e:  # TODO remove
1744 0             self._format_exception(e)
1745
1746 1     def get_image_id_from_path(self, path):
1747         """Get the image id from image path in the VIM database. Returns the image_id"""
1748 0         try:
1749 0             self._reload_connection()
1750 0             images = self.glance.images.list()
1751
1752 0             for image in images:
1753 0                 if image.metadata.get("location") == path:
1754 0                     return image.id
1755
1756 0             raise vimconn.VimConnNotFoundException(
1757                 "image with location '{}' not found".format(path)
1758             )
1759 0         except (
1760             ksExceptions.ClientException,
1761             nvExceptions.ClientException,
1762             gl1Exceptions.CommunicationError,
1763             ConnectionError,
1764         ) as e:
1765 0             self._format_exception(e)
1766
1767 1     def get_image_list(self, filter_dict={}):
1768         """Obtain tenant images from VIM
1769         Filter_dict can be:
1770             id: image id
1771             name: image name
1772             checksum: image checksum
1773         Returns the image list of dictionaries:
1774             [{<the fields at Filter_dict plus some VIM specific>}, ...]
1775             List can be empty
1776         """
1777 0         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1778
1779 0         try:
1780 0             self._reload_connection()
1781             # filter_dict_os = filter_dict.copy()
1782             # First we filter by the available filter fields: name, id. The others are removed.
1783 0             image_list = self.glance.images.list()
1784 0             filtered_list = []
1785
1786 0             for image in image_list:
1787 0                 try:
1788 0                     if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1789 0                         continue
1790
1791 0                     if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1792 0                         continue
1793
1794 0                     if (
1795                         filter_dict.get("checksum")
1796                         and image["checksum"] != filter_dict["checksum"]
1797                     ):
1798 0                         continue
1799
1800 0                     filtered_list.append(image.copy())
1801 0                 except gl1Exceptions.HTTPNotFound:
1802 0                     pass
1803
1804 0             return filtered_list
1805 0         except (
1806             ksExceptions.ClientException,
1807             nvExceptions.ClientException,
1808             gl1Exceptions.CommunicationError,
1809             ConnectionError,
1810         ) as e:
1811 0             self._format_exception(e)
1812
1813 1     def __wait_for_vm(self, vm_id, status):
1814         """wait until vm is in the desired status and return True.
1815         If the VM gets in ERROR status, return false.
1816         If the timeout is reached generate an exception"""
1817 0         elapsed_time = 0
1818 0         while elapsed_time < server_timeout:
1819 0             vm_status = self.nova.servers.get(vm_id).status
1820
1821 0             if vm_status == status:
1822 0                 return True
1823
1824 0             if vm_status == "ERROR":
1825 0                 return False
1826
1827 0             time.sleep(5)
1828 0             elapsed_time += 5
1829
1830         # if we exceeded the timeout rollback
1831 0         if elapsed_time >= server_timeout:
1832 0             raise vimconn.VimConnException(
1833                 "Timeout waiting for instance " + vm_id + " to get " + status,
1834                 http_code=vimconn.HTTP_Request_Timeout,
1835             )
1836
1837 1     def _get_openstack_availablity_zones(self):
1838         """
1839         Get from openstack availability zones available
1840         :return:
1841         """
1842 0         try:
1843 0             openstack_availability_zone = self.nova.availability_zones.list()
1844 0             openstack_availability_zone = [
1845                 str(zone.zoneName)
1846                 for zone in openstack_availability_zone
1847                 if zone.zoneName != "internal"
1848             ]
1849
1850 0             return openstack_availability_zone
1851 0         except Exception:
1852 0             return None
1853
1854 1     def _set_availablity_zones(self):
1855         """
1856         Set vim availablity zone
1857         :return:
1858         """
1859 0         if "availability_zone" in self.config:
1860 0             vim_availability_zones = self.config.get("availability_zone")
1861
1862 0             if isinstance(vim_availability_zones, str):
1863 0                 self.availability_zone = [vim_availability_zones]
1864 0             elif isinstance(vim_availability_zones, list):
1865 0                 self.availability_zone = vim_availability_zones
1866         else:
1867 0             self.availability_zone = self._get_openstack_availablity_zones()
1868
1869 1     def _get_vm_availability_zone(
1870         self, availability_zone_index, availability_zone_list
1871     ):
1872         """
1873         Return thge availability zone to be used by the created VM.
1874         :return: The VIM availability zone to be used or None
1875         """
1876 0         if availability_zone_index is None:
1877 0             if not self.config.get("availability_zone"):
1878 0                 return None
1879 0             elif isinstance(self.config.get("availability_zone"), str):
1880 0                 return self.config["availability_zone"]
1881             else:
1882                 # TODO consider using a different parameter at config for default AV and AV list match
1883 0                 return self.config["availability_zone"][0]
1884
1885 0         vim_availability_zones = self.availability_zone
1886         # check if VIM offer enough availability zones describe in the VNFD
1887 0         if vim_availability_zones and len(availability_zone_list) <= len(
1888             vim_availability_zones
1889         ):
1890             # check if all the names of NFV AV match VIM AV names
1891 0             match_by_index = False
1892 0             for av in availability_zone_list:
1893 0                 if av not in vim_availability_zones:
1894 0                     match_by_index = True
1895 0                     break
1896
1897 0             if match_by_index:
1898 0                 return vim_availability_zones[availability_zone_index]
1899             else:
1900 0                 return availability_zone_list[availability_zone_index]
1901         else:
1902 0             raise vimconn.VimConnConflictException(
1903                 "No enough availability zones at VIM for this deployment"
1904             )
1905
1906 1     def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1907         """Fill up the security_groups in the port_dict.
1908
1909         Args:
1910             net (dict):             Network details
1911             port_dict   (dict):     Port details
1912
1913         """
1914 1         if (
1915             self.config.get("security_groups")
1916             and net.get("port_security") is not False
1917             and not self.config.get("no_port_security_extension")
1918         ):
1919 1             if not self.security_groups_id:
1920 1                 self._get_ids_from_name()
1921
1922 1             port_dict["security_groups"] = self.security_groups_id
1923
1924 1     def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1925         """Fill up the network binding depending on network type in the port_dict.
1926
1927         Args:
1928             net (dict):             Network details
1929             port_dict   (dict):     Port details
1930
1931         """
1932 1         if not net.get("type"):
1933 1             raise vimconn.VimConnException("Type is missing in the network details.")
1934
1935 1         if net["type"] == "virtual":
1936 1             pass
1937
1938         # For VF
1939 1         elif net["type"] == "VF" or net["type"] == "SR-IOV":
1940 1             port_dict["binding:vnic_type"] = "direct"
1941
1942             # VIO specific Changes
1943 1             if self.vim_type == "VIO":
1944                 # Need to create port with port_security_enabled = False and no-security-groups
1945 1                 port_dict["port_security_enabled"] = False
1946 1                 port_dict["provider_security_groups"] = []
1947 1                 port_dict["security_groups"] = []
1948
1949         else:
1950             # For PT PCI-PASSTHROUGH
1951 1             port_dict["binding:vnic_type"] = "direct-physical"
1952
1953 1     @staticmethod
1954 1     def _set_fixed_ip(new_port: dict, net: dict) -> None:
1955         """Set the "ip" parameter in net dictionary.
1956
1957         Args:
1958             new_port    (dict):     New created port
1959             net         (dict):     Network details
1960
1961         """
1962 1         fixed_ips = new_port["port"].get("fixed_ips")
1963
1964 1         if fixed_ips:
1965 1             net["ip"] = fixed_ips[0].get("ip_address")
1966         else:
1967 1             net["ip"] = None
1968
1969 1     @staticmethod
1970 1     def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1971         """Fill up the mac_address and fixed_ips in port_dict.
1972
1973         Args:
1974             net (dict):             Network details
1975             port_dict   (dict):     Port details
1976
1977         """
1978 1         if net.get("mac_address"):
1979 1             port_dict["mac_address"] = net["mac_address"]
1980
1981 1         ip_dual_list = []
1982 1         if ip_list := net.get("ip_address"):
1983 1             if not isinstance(ip_list, list):
1984 1                 ip_list = [ip_list]
1985 1             for ip in ip_list:
1986 1                 ip_dict = {"ip_address": ip}
1987 1                 ip_dual_list.append(ip_dict)
1988 1             port_dict["fixed_ips"] = ip_dual_list
1989             # TODO add "subnet_id": <subnet_id>
1990
1991 1     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1992         """Create new port using neutron.
1993
1994         Args:
1995             port_dict   (dict):         Port details
1996             created_items   (dict):     All created items
1997             net (dict):                 Network details
1998
1999         Returns:
2000             new_port    (dict):         New created port
2001
2002         """
2003 1         new_port = self.neutron.create_port({"port": port_dict})
2004 1         created_items["port:" + str(new_port["port"]["id"])] = True
2005 1         net["mac_address"] = new_port["port"]["mac_address"]
2006 1         net["vim_id"] = new_port["port"]["id"]
2007
2008 1         return new_port
2009
2010 1     def _create_port(
2011         self, net: dict, name: str, created_items: dict
2012     ) -> Tuple[dict, dict]:
2013         """Create port using net details.
2014
2015         Args:
2016             net (dict):                 Network details
2017             name    (str):              Name to be used as network name if net dict does not include name
2018             created_items   (dict):     All created items
2019
2020         Returns:
2021             new_port, port              New created port, port dictionary
2022
2023         """
2024
2025 1         port_dict = {
2026             "network_id": net["net_id"],
2027             "name": net.get("name"),
2028             "admin_state_up": True,
2029         }
2030
2031 1         if not port_dict["name"]:
2032 1             port_dict["name"] = name
2033
2034 1         self._prepare_port_dict_security_groups(net, port_dict)
2035
2036 1         self._prepare_port_dict_binding(net, port_dict)
2037
2038 1         vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2039
2040 1         new_port = self._create_new_port(port_dict, created_items, net)
2041
2042 1         vimconnector._set_fixed_ip(new_port, net)
2043
2044 1         port = {"port-id": new_port["port"]["id"]}
2045
2046 1         if float(self.nova.api_version.get_string()) >= 2.32:
2047 1             port["tag"] = new_port["port"]["name"]
2048
2049 1         return new_port, port
2050
2051 1     def _prepare_network_for_vminstance(
2052         self,
2053         name: str,
2054         net_list: list,
2055         created_items: dict,
2056         net_list_vim: list,
2057         external_network: list,
2058         no_secured_ports: list,
2059     ) -> None:
2060         """Create port and fill up net dictionary for new VM instance creation.
2061
2062         Args:
2063             name    (str):                  Name of network
2064             net_list    (list):             List of networks
2065             created_items   (dict):         All created items belongs to a VM
2066             net_list_vim    (list):         List of ports
2067             external_network    (list):     List of external-networks
2068             no_secured_ports    (list):     Port security disabled ports
2069         """
2070
2071 1         self._reload_connection()
2072
2073 1         for net in net_list:
2074             # Skip non-connected iface
2075 1             if not net.get("net_id"):
2076 1                 continue
2077
2078 1             new_port, port = self._create_port(net, name, created_items)
2079
2080 1             net_list_vim.append(port)
2081
2082 1             if net.get("floating_ip", False):
2083 1                 net["exit_on_floating_ip_error"] = True
2084 1                 external_network.append(net)
2085
2086 1             elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2087 1                 net["exit_on_floating_ip_error"] = False
2088 1                 external_network.append(net)
2089 1                 net["floating_ip"] = self.config.get("use_floating_ip")
2090
2091             # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2092             # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2093 1             if net.get("port_security") is False and not self.config.get(
2094                 "no_port_security_extension"
2095             ):
2096 1                 no_secured_ports.append(
2097                     (
2098                         new_port["port"]["id"],
2099                         net.get("port_security_disable_strategy"),
2100                     )
2101                 )
2102
2103 1     def _prepare_persistent_root_volumes(
2104         self,
2105         name: str,
2106         vm_av_zone: list,
2107         disk: dict,
2108         base_disk_index: int,
2109         block_device_mapping: dict,
2110         existing_vim_volumes: list,
2111         created_items: dict,
2112     ) -> Optional[str]:
2113         """Prepare persistent root volumes for new VM instance.
2114
2115         Args:
2116             name    (str):                      Name of VM instance
2117             vm_av_zone  (list):                 List of availability zones
2118             disk    (dict):                     Disk details
2119             base_disk_index (int):              Disk index
2120             block_device_mapping    (dict):     Block device details
2121             existing_vim_volumes    (list):     Existing disk details
2122             created_items   (dict):             All created items belongs to VM
2123
2124         Returns:
2125             boot_volume_id  (str):              ID of boot volume
2126
2127         """
2128         # Disk may include only vim_volume_id or only vim_id."
2129         # Use existing persistent root volume finding with volume_id or vim_id
2130 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2131
2132 1         if disk.get(key_id):
2133 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2134 1             existing_vim_volumes.append({"id": disk[key_id]})
2135
2136         else:
2137             # Create persistent root volume
2138 1             volume = self.cinder.volumes.create(
2139                 size=disk["size"],
2140                 name=name + "vd" + chr(base_disk_index),
2141                 imageRef=disk["image_id"],
2142                 # Make sure volume is in the same AZ as the VM to be attached to
2143                 availability_zone=vm_av_zone,
2144             )
2145 1             boot_volume_id = volume.id
2146 1             self.update_block_device_mapping(
2147                 volume=volume,
2148                 block_device_mapping=block_device_mapping,
2149                 base_disk_index=base_disk_index,
2150                 disk=disk,
2151                 created_items=created_items,
2152             )
2153
2154 1             return boot_volume_id
2155
2156 1     @staticmethod
2157 1     def update_block_device_mapping(
2158         volume: object,
2159         block_device_mapping: dict,
2160         base_disk_index: int,
2161         disk: dict,
2162         created_items: dict,
2163     ) -> None:
2164         """Add volume information to block device mapping dict.
2165         Args:
2166             volume  (object):                   Created volume object
2167             block_device_mapping    (dict):     Block device details
2168             base_disk_index (int):              Disk index
2169             disk    (dict):                     Disk details
2170             created_items   (dict):             All created items belongs to VM
2171         """
2172 1         if not volume:
2173 1             raise vimconn.VimConnException("Volume is empty.")
2174
2175 1         if not hasattr(volume, "id"):
2176 1             raise vimconn.VimConnException(
2177                 "Created volume is not valid, does not have id attribute."
2178             )
2179
2180 1         block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2181 1         if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
2182 0             return
2183 1         volume_txt = "volume:" + str(volume.id)
2184 1         if disk.get("keep"):
2185 1             volume_txt += ":keep"
2186 1         created_items[volume_txt] = True
2187
2188 1     def new_shared_volumes(self, shared_volume_data) -> (str, str):
2189 1         try:
2190 1             volume = self.cinder.volumes.create(
2191                 size=shared_volume_data["size"],
2192                 name=shared_volume_data["name"],
2193                 volume_type="multiattach",
2194             )
2195 1             return (volume.name, volume.id)
2196 0         except (ConnectionError, KeyError) as e:
2197 0             self._format_exception(e)
2198
2199 1     def _prepare_shared_volumes(
2200         self,
2201         name: str,
2202         disk: dict,
2203         base_disk_index: int,
2204         block_device_mapping: dict,
2205         existing_vim_volumes: list,
2206         created_items: dict,
2207     ):
2208 1         volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2209 1         if volumes.get(disk["name"]):
2210 1             sv_id = volumes[disk["name"]]
2211 1             max_retries = 3
2212 1             vol_status = ""
2213             # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2214 1             while max_retries:
2215 1                 max_retries -= 1
2216 1                 volume = self.cinder.volumes.get(sv_id)
2217 1                 vol_status = volume.status
2218 1                 if volume.status not in ("in-use", "available"):
2219 0                     time.sleep(5)
2220 0                     continue
2221 1                 self.update_block_device_mapping(
2222                     volume=volume,
2223                     block_device_mapping=block_device_mapping,
2224                     base_disk_index=base_disk_index,
2225                     disk=disk,
2226                     created_items=created_items,
2227                 )
2228 1                 return
2229 0             raise vimconn.VimConnException(
2230                 "Shared volume is not prepared, status is: {}".format(vol_status),
2231                 http_code=vimconn.HTTP_Internal_Server_Error,
2232             )
2233
2234 1     def _prepare_non_root_persistent_volumes(
2235         self,
2236         name: str,
2237         disk: dict,
2238         vm_av_zone: list,
2239         block_device_mapping: dict,
2240         base_disk_index: int,
2241         existing_vim_volumes: list,
2242         created_items: dict,
2243     ) -> None:
2244         """Prepare persistent volumes for new VM instance.
2245
2246         Args:
2247             name    (str):                      Name of VM instance
2248             disk    (dict):                     Disk details
2249             vm_av_zone  (list):                 List of availability zones
2250             block_device_mapping    (dict):     Block device details
2251             base_disk_index (int):              Disk index
2252             existing_vim_volumes    (list):     Existing disk details
2253             created_items   (dict):             All created items belongs to VM
2254         """
2255         # Non-root persistent volumes
2256         # Disk may include only vim_volume_id or only vim_id."
2257 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2258 1         if disk.get(key_id):
2259             # Use existing persistent volume
2260 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2261 1             existing_vim_volumes.append({"id": disk[key_id]})
2262         else:
2263 1             volume_name = f"{name}vd{chr(base_disk_index)}"
2264 1             volume = self.cinder.volumes.create(
2265                 size=disk["size"],
2266                 name=volume_name,
2267                 # Make sure volume is in the same AZ as the VM to be attached to
2268                 availability_zone=vm_av_zone,
2269             )
2270 1             self.update_block_device_mapping(
2271                 volume=volume,
2272                 block_device_mapping=block_device_mapping,
2273                 base_disk_index=base_disk_index,
2274                 disk=disk,
2275                 created_items=created_items,
2276             )
2277
2278 1     def _wait_for_created_volumes_availability(
2279         self, elapsed_time: int, created_items: dict
2280     ) -> Optional[int]:
2281         """Wait till created volumes become available.
2282
2283         Args:
2284             elapsed_time    (int):          Passed time while waiting
2285             created_items   (dict):         All created items belongs to VM
2286
2287         Returns:
2288             elapsed_time    (int):          Time spent while waiting
2289
2290         """
2291 1         while elapsed_time < volume_timeout:
2292 1             for created_item in created_items:
2293 1                 v, volume_id = (
2294                     created_item.split(":")[0],
2295                     created_item.split(":")[1],
2296                 )
2297 1                 if v == "volume":
2298 1                     volume = self.cinder.volumes.get(volume_id)
2299 1                     if (
2300                         volume.volume_type == "multiattach"
2301                         and volume.status == "in-use"
2302                     ):
2303 0                         return elapsed_time
2304 1                     elif volume.status != "available":
2305 1                         break
2306             else:
2307                 # All ready: break from while
2308 1                 break
2309
2310 1             time.sleep(5)
2311 1             elapsed_time += 5
2312
2313 1         return elapsed_time
2314
2315 1     def _wait_for_existing_volumes_availability(
2316         self, elapsed_time: int, existing_vim_volumes: list
2317     ) -> Optional[int]:
2318         """Wait till existing volumes become available.
2319
2320         Args:
2321             elapsed_time    (int):          Passed time while waiting
2322             existing_vim_volumes   (list):  Existing volume details
2323
2324         Returns:
2325             elapsed_time    (int):          Time spent while waiting
2326
2327         """
2328
2329 1         while elapsed_time < volume_timeout:
2330 1             for volume in existing_vim_volumes:
2331 1                 v = self.cinder.volumes.get(volume["id"])
2332 1                 if v.volume_type == "multiattach" and v.status == "in-use":
2333 0                     return elapsed_time
2334 1                 elif v.status != "available":
2335 1                     break
2336             else:  # all ready: break from while
2337 1                 break
2338
2339 1             time.sleep(5)
2340 1             elapsed_time += 5
2341
2342 1         return elapsed_time
2343
2344 1     def _prepare_disk_for_vminstance(
2345         self,
2346         name: str,
2347         existing_vim_volumes: list,
2348         created_items: dict,
2349         vm_av_zone: list,
2350         block_device_mapping: dict,
2351         disk_list: list = None,
2352     ) -> None:
2353         """Prepare all volumes for new VM instance.
2354
2355         Args:
2356             name    (str):                      Name of Instance
2357             existing_vim_volumes    (list):     List of existing volumes
2358             created_items   (dict):             All created items belongs to VM
2359             vm_av_zone  (list):                 VM availability zone
2360             block_device_mapping (dict):        Block devices to be attached to VM
2361             disk_list   (list):                 List of disks
2362
2363         """
2364         # Create additional volumes in case these are present in disk_list
2365 1         base_disk_index = ord("b")
2366 1         boot_volume_id = None
2367 1         elapsed_time = 0
2368 1         for disk in disk_list:
2369 1             if "image_id" in disk:
2370                 # Root persistent volume
2371 1                 base_disk_index = ord("a")
2372 1                 boot_volume_id = self._prepare_persistent_root_volumes(
2373                     name=name,
2374                     vm_av_zone=vm_av_zone,
2375                     disk=disk,
2376                     base_disk_index=base_disk_index,
2377                     block_device_mapping=block_device_mapping,
2378                     existing_vim_volumes=existing_vim_volumes,
2379                     created_items=created_items,
2380                 )
2381 1             elif disk.get("multiattach"):
2382 0                 self._prepare_shared_volumes(
2383                     name=name,
2384                     disk=disk,
2385                     base_disk_index=base_disk_index,
2386                     block_device_mapping=block_device_mapping,
2387                     existing_vim_volumes=existing_vim_volumes,
2388                     created_items=created_items,
2389                 )
2390             else:
2391                 # Non-root persistent volume
2392 1                 self._prepare_non_root_persistent_volumes(
2393                     name=name,
2394                     disk=disk,
2395                     vm_av_zone=vm_av_zone,
2396                     block_device_mapping=block_device_mapping,
2397                     base_disk_index=base_disk_index,
2398                     existing_vim_volumes=existing_vim_volumes,
2399                     created_items=created_items,
2400                 )
2401 1             base_disk_index += 1
2402
2403         # Wait until created volumes are with status available
2404 1         elapsed_time = self._wait_for_created_volumes_availability(
2405             elapsed_time, created_items
2406         )
2407         # Wait until existing volumes in vim are with status available
2408 1         elapsed_time = self._wait_for_existing_volumes_availability(
2409             elapsed_time, existing_vim_volumes
2410         )
2411         # If we exceeded the timeout rollback
2412 1         if elapsed_time >= volume_timeout:
2413 1             raise vimconn.VimConnException(
2414                 "Timeout creating volumes for instance " + name,
2415                 http_code=vimconn.HTTP_Request_Timeout,
2416             )
2417 1         if boot_volume_id:
2418 1             self.cinder.volumes.set_bootable(boot_volume_id, True)
2419
2420 1     def _find_the_external_network_for_floating_ip(self):
2421         """Get the external network ip in order to create floating IP.
2422
2423         Returns:
2424             pool_id (str):      External network pool ID
2425
2426         """
2427
2428         # Find the external network
2429 1         external_nets = list()
2430
2431 1         for net in self.neutron.list_networks()["networks"]:
2432 1             if net["router:external"]:
2433 1                 external_nets.append(net)
2434
2435 1         if len(external_nets) == 0:
2436 1             raise vimconn.VimConnException(
2437                 "Cannot create floating_ip automatically since "
2438                 "no external network is present",
2439                 http_code=vimconn.HTTP_Conflict,
2440             )
2441
2442 1         if len(external_nets) > 1:
2443 1             raise vimconn.VimConnException(
2444                 "Cannot create floating_ip automatically since "
2445                 "multiple external networks are present",
2446                 http_code=vimconn.HTTP_Conflict,
2447             )
2448
2449         # Pool ID
2450 1         return external_nets[0].get("id")
2451
2452 1     def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2453         """Trigger neutron to create a new floating IP using external network ID.
2454
2455         Args:
2456             param   (dict):             Input parameters to create a floating IP
2457             created_items   (dict):     All created items belongs to new VM instance
2458
2459         Raises:
2460
2461             VimConnException
2462         """
2463 1         try:
2464 1             self.logger.debug("Creating floating IP")
2465 1             new_floating_ip = self.neutron.create_floatingip(param)
2466 1             free_floating_ip = new_floating_ip["floatingip"]["id"]
2467 1             created_items["floating_ip:" + str(free_floating_ip)] = True
2468
2469 1         except Exception as e:
2470 1             raise vimconn.VimConnException(
2471                 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2472                 http_code=vimconn.HTTP_Conflict,
2473             )
2474
2475 1     def _create_floating_ip(
2476         self, floating_network: dict, server: object, created_items: dict
2477     ) -> None:
2478         """Get the available Pool ID and create a new floating IP.
2479
2480         Args:
2481             floating_network    (dict):         Dict including external network ID
2482             server   (object):                  Server object
2483             created_items   (dict):             All created items belongs to new VM instance
2484
2485         """
2486
2487         # Pool_id is available
2488 1         if (
2489             isinstance(floating_network["floating_ip"], str)
2490             and floating_network["floating_ip"].lower() != "true"
2491         ):
2492 1             pool_id = floating_network["floating_ip"]
2493
2494         # Find the Pool_id
2495         else:
2496 1             pool_id = self._find_the_external_network_for_floating_ip()
2497
2498 1         param = {
2499             "floatingip": {
2500                 "floating_network_id": pool_id,
2501                 "tenant_id": server.tenant_id,
2502             }
2503         }
2504
2505 1         self._neutron_create_float_ip(param, created_items)
2506
2507 1     def _find_floating_ip(
2508         self,
2509         server: object,
2510         floating_ips: list,
2511         floating_network: dict,
2512     ) -> Optional[str]:
2513         """Find the available free floating IPs if there are.
2514
2515         Args:
2516             server  (object):                   Server object
2517             floating_ips    (list):             List of floating IPs
2518             floating_network    (dict):         Details of floating network such as ID
2519
2520         Returns:
2521             free_floating_ip    (str):          Free floating ip address
2522
2523         """
2524 1         for fip in floating_ips:
2525 1             if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2526 1                 continue
2527
2528 1             if isinstance(floating_network["floating_ip"], str):
2529 1                 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2530 0                     continue
2531
2532 1             return fip["id"]
2533
2534 1     def _assign_floating_ip(
2535         self, free_floating_ip: str, floating_network: dict
2536     ) -> Dict:
2537         """Assign the free floating ip address to port.
2538
2539         Args:
2540             free_floating_ip    (str):          Floating IP to be assigned
2541             floating_network    (dict):         ID of floating network
2542
2543         Returns:
2544             fip (dict)          (dict):         Floating ip details
2545
2546         """
2547         # The vim_id key contains the neutron.port_id
2548 1         self.neutron.update_floatingip(
2549             free_floating_ip,
2550             {"floatingip": {"port_id": floating_network["vim_id"]}},
2551         )
2552         # For race condition ensure not re-assigned to other VM after 5 seconds
2553 1         time.sleep(5)
2554
2555 1         return self.neutron.show_floatingip(free_floating_ip)
2556
2557 1     def _get_free_floating_ip(
2558         self, server: object, floating_network: dict
2559     ) -> Optional[str]:
2560         """Get the free floating IP address.
2561
2562         Args:
2563             server  (object):               Server Object
2564             floating_network    (dict):     Floating network details
2565
2566         Returns:
2567             free_floating_ip    (str):      Free floating ip addr
2568
2569         """
2570
2571 1         floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2572
2573         # Randomize
2574 1         random.shuffle(floating_ips)
2575
2576 1         return self._find_floating_ip(server, floating_ips, floating_network)
2577
2578 1     def _prepare_external_network_for_vminstance(
2579         self,
2580         external_network: list,
2581         server: object,
2582         created_items: dict,
2583         vm_start_time: float,
2584     ) -> None:
2585         """Assign floating IP address for VM instance.
2586
2587         Args:
2588             external_network    (list):         ID of External network
2589             server  (object):                   Server Object
2590             created_items   (dict):             All created items belongs to new VM instance
2591             vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
2592
2593         Raises:
2594             VimConnException
2595
2596         """
2597 1         for floating_network in external_network:
2598 1             try:
2599 1                 assigned = False
2600 1                 floating_ip_retries = 3
2601                 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2602                 # several times
2603 1                 while not assigned:
2604 1                     free_floating_ip = self._get_free_floating_ip(
2605                         server, floating_network
2606                     )
2607
2608 1                     if not free_floating_ip:
2609 1                         self._create_floating_ip(
2610                             floating_network, server, created_items
2611                         )
2612
2613 1                     try:
2614                         # For race condition ensure not already assigned
2615 1                         fip = self.neutron.show_floatingip(free_floating_ip)
2616
2617 1                         if fip["floatingip"].get("port_id"):
2618 1                             continue
2619
2620                         # Assign floating ip
2621 1                         fip = self._assign_floating_ip(
2622                             free_floating_ip, floating_network
2623                         )
2624
2625 1                         if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2626 1                             self.logger.warning(
2627                                 "floating_ip {} re-assigned to other port".format(
2628                                     free_floating_ip
2629                                 )
2630                             )
2631 1                             continue
2632
2633 1                         self.logger.debug(
2634                             "Assigned floating_ip {} to VM {}".format(
2635                                 free_floating_ip, server.id
2636                             )
2637                         )
2638
2639 1                         assigned = True
2640
2641 1                     except Exception as e:
2642                         # Openstack need some time after VM creation to assign an IP. So retry if fails
2643 1                         vm_status = self.nova.servers.get(server.id).status
2644
2645 1                         if vm_status not in ("ACTIVE", "ERROR"):
2646 1                             if time.time() - vm_start_time < server_timeout:
2647 1                                 time.sleep(5)
2648 1                                 continue
2649 1                         elif floating_ip_retries > 0:
2650 1                             floating_ip_retries -= 1
2651 1                             continue
2652
2653 1                         raise vimconn.VimConnException(
2654                             "Cannot create floating_ip: {} {}".format(
2655                                 type(e).__name__, e
2656                             ),
2657                             http_code=vimconn.HTTP_Conflict,
2658                         )
2659
2660 1             except Exception as e:
2661 1                 if not floating_network["exit_on_floating_ip_error"]:
2662 1                     self.logger.error("Cannot create floating_ip. %s", str(e))
2663 1                     continue
2664
2665 1                 raise
2666
2667 1     def _update_port_security_for_vminstance(
2668         self,
2669         no_secured_ports: list,
2670         server: object,
2671     ) -> None:
2672         """Updates the port security according to no_secured_ports list.
2673
2674         Args:
2675             no_secured_ports    (list):     List of ports that security will be disabled
2676             server  (object):               Server Object
2677
2678         Raises:
2679             VimConnException
2680
2681         """
2682         # Wait until the VM is active and then disable the port-security
2683 1         if no_secured_ports:
2684 1             self.__wait_for_vm(server.id, "ACTIVE")
2685
2686 1         for port in no_secured_ports:
2687 1             port_update = {
2688                 "port": {"port_security_enabled": False, "security_groups": None}
2689             }
2690
2691 1             if port[1] == "allow-address-pairs":
2692 1                 port_update = {
2693                     "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2694                 }
2695
2696 1             try:
2697 1                 self.neutron.update_port(port[0], port_update)
2698
2699 1             except Exception:
2700 1                 raise vimconn.VimConnException(
2701                     "It was not possible to disable port security for port {}".format(
2702                         port[0]
2703                     )
2704                 )
2705
2706 1     def new_vminstance(
2707         self,
2708         name: str,
2709         description: str,
2710         start: bool,
2711         image_id: str,
2712         flavor_id: str,
2713         affinity_group_list: list,
2714         net_list: list,
2715         cloud_config=None,
2716         disk_list=None,
2717         availability_zone_index=None,
2718         availability_zone_list=None,
2719     ) -> tuple:
2720         """Adds a VM instance to VIM.
2721
2722         Args:
2723             name    (str):          name of VM
2724             description (str):      description
2725             start   (bool):         indicates if VM must start or boot in pause mode. Ignored
2726             image_id    (str)       image uuid
2727             flavor_id   (str)       flavor uuid
2728             affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
2729             net_list    (list):         list of interfaces, each one is a dictionary with:
2730                 name:   name of network
2731                 net_id:     network uuid to connect
2732                 vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
2733                 model:  interface model, ignored #TODO
2734                 mac_address:    used for  SR-IOV ifaces #TODO for other types
2735                 use:    'data', 'bridge',  'mgmt'
2736                 type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2737                 vim_id:     filled/added by this function
2738                 floating_ip:    True/False (or it can be None)
2739                 port_security:  True/False
2740             cloud_config    (dict): (optional) dictionary with:
2741                 key-pairs:      (optional) list of strings with the public key to be inserted to the default user
2742                 users:      (optional) list of users to be inserted, each item is a dict with:
2743                     name:   (mandatory) user name,
2744                     key-pairs: (optional) list of strings with the public key to be inserted to the user
2745                 user-data:  (optional) string is a text script to be passed directly to cloud-init
2746                 config-files:   (optional). List of files to be transferred. Each item is a dict with:
2747                     dest:   (mandatory) string with the destination absolute path
2748                     encoding:   (optional, by default text). Can be one of:
2749                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2750                     content :    (mandatory) string with the content of the file
2751                     permissions:    (optional) string with file permissions, typically octal notation '0644'
2752                     owner:  (optional) file owner, string with the format 'owner:group'
2753                 boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
2754             disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
2755                 image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2756                 size:   (mandatory) string with the size of the disk in GB
2757                 vim_id:  (optional) should use this existing volume id
2758             availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
2759             availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
2760                 availability_zone_index is None
2761                 #TODO ip, security groups
2762
2763         Returns:
2764             A tuple with the instance identifier and created_items or raises an exception on error
2765             created_items can be None or a dictionary where this method can include key-values that will be passed to
2766             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2767             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2768             as not present.
2769
2770         """
2771 1         self.logger.debug(
2772             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2773             image_id,
2774             flavor_id,
2775             str(net_list),
2776         )
2777
2778 1         try:
2779 1             server = None
2780 1             created_items = {}
2781 1             net_list_vim = []
2782             # list of external networks to be connected to instance, later on used to create floating_ip
2783 1             external_network = []
2784             # List of ports with port-security disabled
2785 1             no_secured_ports = []
2786 1             block_device_mapping = {}
2787 1             existing_vim_volumes = []
2788 1             server_group_id = None
2789 1             scheduller_hints = {}
2790
2791             # Check the Openstack Connection
2792 1             self._reload_connection()
2793
2794             # Prepare network list
2795 1             self._prepare_network_for_vminstance(
2796                 name=name,
2797                 net_list=net_list,
2798                 created_items=created_items,
2799                 net_list_vim=net_list_vim,
2800                 external_network=external_network,
2801                 no_secured_ports=no_secured_ports,
2802             )
2803
2804             # Cloud config
2805 1             config_drive, userdata = self._create_user_data(cloud_config)
2806
2807             # Get availability Zone
2808 1             vm_av_zone = self._get_vm_availability_zone(
2809                 availability_zone_index, availability_zone_list
2810             )
2811
2812 1             if disk_list:
2813                 # Prepare disks
2814 1                 self._prepare_disk_for_vminstance(
2815                     name=name,
2816                     existing_vim_volumes=existing_vim_volumes,
2817                     created_items=created_items,
2818                     vm_av_zone=vm_av_zone,
2819                     block_device_mapping=block_device_mapping,
2820                     disk_list=disk_list,
2821                 )
2822
2823 1             if affinity_group_list:
2824                 # Only first id on the list will be used. Openstack restriction
2825 1                 server_group_id = affinity_group_list[0]["affinity_group_id"]
2826 1                 scheduller_hints["group"] = server_group_id
2827
2828 1             self.logger.debug(
2829                 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2830                 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2831                 "block_device_mapping={}, server_group={})".format(
2832                     name,
2833                     image_id,
2834                     flavor_id,
2835                     net_list_vim,
2836                     self.config.get("security_groups"),
2837                     vm_av_zone,
2838                     self.config.get("keypair"),
2839                     userdata,
2840                     config_drive,
2841                     block_device_mapping,
2842                     server_group_id,
2843                 )
2844             )
2845             # Create VM
2846 1             server = self.nova.servers.create(
2847                 name=name,
2848                 image=image_id,
2849                 flavor=flavor_id,
2850                 nics=net_list_vim,
2851                 security_groups=self.config.get("security_groups"),
2852                 # TODO remove security_groups in future versions. Already at neutron port
2853                 availability_zone=vm_av_zone,
2854                 key_name=self.config.get("keypair"),
2855                 userdata=userdata,
2856                 config_drive=config_drive,
2857                 block_device_mapping=block_device_mapping,
2858                 scheduler_hints=scheduller_hints,
2859             )
2860
2861 1             vm_start_time = time.time()
2862
2863 1             self._update_port_security_for_vminstance(no_secured_ports, server)
2864
2865 1             self._prepare_external_network_for_vminstance(
2866                 external_network=external_network,
2867                 server=server,
2868                 created_items=created_items,
2869                 vm_start_time=vm_start_time,
2870             )
2871
2872 1             return server.id, created_items
2873
2874 1         except Exception as e:
2875 1             server_id = None
2876 1             if server:
2877 1                 server_id = server.id
2878
2879 1             try:
2880 1                 created_items = self.remove_keep_tag_from_persistent_volumes(
2881                     created_items
2882                 )
2883
2884 1                 self.delete_vminstance(server_id, created_items)
2885
2886 0             except Exception as e2:
2887 0                 self.logger.error("new_vminstance rollback fail {}".format(e2))
2888
2889 1             self._format_exception(e)
2890
2891 1     @staticmethod
2892 1     def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2893         """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2894
2895         Args:
2896             created_items (dict):       All created items belongs to VM
2897
2898         Returns:
2899             updated_created_items   (dict):     Dict which does not include keep flag for volumes.
2900
2901         """
2902 1         return {
2903             key.replace(":keep", ""): value for (key, value) in created_items.items()
2904         }
2905
2906 1     def get_vminstance(self, vm_id):
2907         """Returns the VM instance information from VIM"""
2908 0         return self._find_nova_server(vm_id)
2909
2910 1     def get_vminstance_console(self, vm_id, console_type="vnc"):
2911         """
2912         Get a console for the virtual machine
2913         Params:
2914             vm_id: uuid of the VM
2915             console_type, can be:
2916                 "novnc" (by default), "xvpvnc" for VNC types,
2917                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2918         Returns dict with the console parameters:
2919                 protocol: ssh, ftp, http, https, ...
2920                 server:   usually ip address
2921                 port:     the http, ssh, ... port
2922                 suffix:   extra text, e.g. the http path and query string
2923         """
2924 0         self.logger.debug("Getting VM CONSOLE from VIM")
2925
2926 0         try:
2927 0             self._reload_connection()
2928 0             server = self.nova.servers.find(id=vm_id)
2929
2930 0             if console_type is None or console_type == "novnc":
2931 0                 console_dict = server.get_vnc_console("novnc")
2932 0             elif console_type == "xvpvnc":
2933 0                 console_dict = server.get_vnc_console(console_type)
2934 0             elif console_type == "rdp-html5":
2935 0                 console_dict = server.get_rdp_console(console_type)
2936 0             elif console_type == "spice-html5":
2937 0                 console_dict = server.get_spice_console(console_type)
2938             else:
2939 0                 raise vimconn.VimConnException(
2940                     "console type '{}' not allowed".format(console_type),
2941                     http_code=vimconn.HTTP_Bad_Request,
2942                 )
2943
2944 0             console_dict1 = console_dict.get("console")
2945
2946 0             if console_dict1:
2947 0                 console_url = console_dict1.get("url")
2948
2949 0                 if console_url:
2950                     # parse console_url
2951 0                     protocol_index = console_url.find("//")
2952 0                     suffix_index = (
2953                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2954                     )
2955 0                     port_index = (
2956                         console_url[protocol_index + 2 : suffix_index].find(":")
2957                         + protocol_index
2958                         + 2
2959                     )
2960
2961 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2962 0                         return (
2963                             -vimconn.HTTP_Internal_Server_Error,
2964                             "Unexpected response from VIM",
2965                         )
2966
2967 0                     console_dict = {
2968                         "protocol": console_url[0:protocol_index],
2969                         "server": console_url[protocol_index + 2 : port_index],
2970                         "port": console_url[port_index:suffix_index],
2971                         "suffix": console_url[suffix_index + 1 :],
2972                     }
2973 0                     protocol_index += 2
2974
2975 0                     return console_dict
2976 0             raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2977 0         except (
2978             nvExceptions.NotFound,
2979             ksExceptions.ClientException,
2980             nvExceptions.ClientException,
2981             nvExceptions.BadRequest,
2982             ConnectionError,
2983         ) as e:
2984 0             self._format_exception(e)
2985
2986 1     def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2987         """Neutron delete ports by id.
2988         Args:
2989             k_id    (str):      Port id in the VIM
2990         """
2991 1         try:
2992 1             self.neutron.delete_port(k_id)
2993
2994 1         except Exception as e:
2995 1             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2996
2997 1     def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
2998         """Cinder delete volume by id.
2999         Args:
3000             shared_volume_vim_id    (str):                  ID of shared volume in VIM
3001         """
3002 1         elapsed_time = 0
3003 1         try:
3004 1             while elapsed_time < server_timeout:
3005 1                 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3006 1                 if vol_status == "available":
3007 1                     self.cinder.volumes.delete(shared_volume_vim_id)
3008 1                     return True
3009
3010 0                 time.sleep(5)
3011 0                 elapsed_time += 5
3012
3013 0             if elapsed_time >= server_timeout:
3014 0                 raise vimconn.VimConnException(
3015                     "Timeout waiting for volume "
3016                     + shared_volume_vim_id
3017                     + " to be available",
3018                     http_code=vimconn.HTTP_Request_Timeout,
3019                 )
3020
3021 0         except Exception as e:
3022 0             self.logger.error(
3023                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3024             )
3025 0             self._format_exception(e)
3026
3027 1     def _delete_volumes_by_id_wth_cinder(
3028         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3029     ) -> bool:
3030         """Cinder delete volume by id.
3031         Args:
3032             k   (str):                      Full item name in created_items
3033             k_id    (str):                  ID of floating ip in VIM
3034             volumes_to_hold (list):          Volumes not to delete
3035             created_items   (dict):         All created items belongs to VM
3036         """
3037 1         try:
3038 1             if k_id in volumes_to_hold:
3039 1                 return
3040
3041 1             if self.cinder.volumes.get(k_id).status != "available":
3042 1                 return True
3043
3044             else:
3045 1                 self.cinder.volumes.delete(k_id)
3046 1                 created_items[k] = None
3047
3048 1         except Exception as e:
3049 1             self.logger.error(
3050                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3051             )
3052
3053 1     def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3054         """Neutron delete floating ip by id.
3055         Args:
3056             k   (str):                      Full item name in created_items
3057             k_id    (str):                  ID of floating ip in VIM
3058             created_items   (dict):         All created items belongs to VM
3059         """
3060 1         try:
3061 1             self.neutron.delete_floatingip(k_id)
3062 1             created_items[k] = None
3063
3064 1         except Exception as e:
3065 1             self.logger.error(
3066                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3067             )
3068
3069 1     @staticmethod
3070 1     def _get_item_name_id(k: str) -> Tuple[str, str]:
3071 1         k_item, _, k_id = k.partition(":")
3072 1         return k_item, k_id
3073
3074 1     def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3075         """Delete VM ports attached to the networks before deleting virtual machine.
3076         Args:
3077             created_items   (dict):     All created items belongs to VM
3078         """
3079
3080 1         for k, v in created_items.items():
3081 1             if not v:  # skip already deleted
3082 1                 continue
3083
3084 1             try:
3085 1                 k_item, k_id = self._get_item_name_id(k)
3086 1                 if k_item == "port":
3087 1                     self._delete_ports_by_id_wth_neutron(k_id)
3088
3089 1             except Exception as e:
3090 1                 self.logger.error(
3091                     "Error deleting port: {}: {}".format(type(e).__name__, e)
3092                 )
3093
3094 1     def _delete_created_items(
3095         self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3096     ) -> bool:
3097         """Delete Volumes and floating ip if they exist in created_items."""
3098 1         for k, v in created_items.items():
3099 1             if not v:  # skip already deleted
3100 1                 continue
3101
3102 1             try:
3103 1                 k_item, k_id = self._get_item_name_id(k)
3104 1                 if k_item == "volume":
3105 1                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3106                         k, k_id, volumes_to_hold, created_items
3107                     )
3108
3109 1                     if unavailable_vol:
3110 1                         keep_waiting = True
3111
3112 1                 elif k_item == "floating_ip":
3113 1                     self._delete_floating_ip_by_id(k, k_id, created_items)
3114
3115 1             except Exception as e:
3116 1                 self.logger.error("Error deleting {}: {}".format(k, e))
3117
3118 1         return keep_waiting
3119
3120 1     @staticmethod
3121 1     def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3122         """Remove the volumes which has key flag from created_items
3123
3124         Args:
3125             created_items   (dict):         All created items belongs to VM
3126
3127         Returns:
3128             created_items   (dict):         Persistent volumes eliminated created_items
3129         """
3130 1         return {
3131             key: value
3132             for (key, value) in created_items.items()
3133             if len(key.split(":")) == 2
3134         }
3135
3136 1     def delete_vminstance(
3137         self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3138     ) -> None:
3139         """Removes a VM instance from VIM. Returns the old identifier.
3140         Args:
3141             vm_id   (str):              Identifier of VM instance
3142             created_items   (dict):     All created items belongs to VM
3143             volumes_to_hold (list):     Volumes_to_hold
3144         """
3145 1         if created_items is None:
3146 1             created_items = {}
3147 1         if volumes_to_hold is None:
3148 1             volumes_to_hold = []
3149
3150 1         try:
3151 1             created_items = self._extract_items_wth_keep_flag_from_created_items(
3152                 created_items
3153             )
3154
3155 1             self._reload_connection()
3156
3157             # Delete VM ports attached to the networks before the virtual machine
3158 1             if created_items:
3159 1                 self._delete_vm_ports_attached_to_network(created_items)
3160
3161 1             if vm_id:
3162 1                 self.nova.servers.delete(vm_id)
3163
3164             # Although having detached, volumes should have in active status before deleting.
3165             # We ensure in this loop
3166 1             keep_waiting = True
3167 1             elapsed_time = 0
3168
3169 1             while keep_waiting and elapsed_time < volume_timeout:
3170 1                 keep_waiting = False
3171
3172                 # Delete volumes and floating IP.
3173 1                 keep_waiting = self._delete_created_items(
3174                     created_items, volumes_to_hold, keep_waiting
3175                 )
3176
3177 1                 if keep_waiting:
3178 1                     time.sleep(1)
3179 1                     elapsed_time += 1
3180
3181 1         except (
3182             nvExceptions.NotFound,
3183             ksExceptions.ClientException,
3184             nvExceptions.ClientException,
3185             ConnectionError,
3186         ) as e:
3187 0             self._format_exception(e)
3188
3189 1     def refresh_vms_status(self, vm_list):
3190         """Get the status of the virtual machines and their interfaces/ports
3191         Params: the list of VM identifiers
3192         Returns a dictionary with:
3193             vm_id:          #VIM id of this Virtual Machine
3194                 status:     #Mandatory. Text with one of:
3195                             #  DELETED (not found at vim)
3196                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3197                             #  OTHER (Vim reported other status not understood)
3198                             #  ERROR (VIM indicates an ERROR status)
3199                             #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3200                             #  CREATING (on building process), ERROR
3201                             #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3202                             #
3203                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3204                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
3205                 interfaces:
3206                  -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
3207                     mac_address:      #Text format XX:XX:XX:XX:XX:XX
3208                     vim_net_id:       #network id where this interface is connected
3209                     vim_interface_id: #interface/port VIM id
3210                     ip_address:       #null, or text with IPv4, IPv6 address
3211                     compute_node:     #identification of compute node where PF,VF interface is allocated
3212                     pci:              #PCI address of the NIC that hosts the PF,VF
3213                     vlan:             #physical VLAN used for VF
3214         """
3215 0         vm_dict = {}
3216 0         self.logger.debug(
3217             "refresh_vms status: Getting tenant VM instance information from VIM"
3218         )
3219
3220 0         for vm_id in vm_list:
3221 0             vm = {}
3222
3223 0             try:
3224 0                 vm_vim = self.get_vminstance(vm_id)
3225
3226 0                 if vm_vim["status"] in vmStatus2manoFormat:
3227 0                     vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3228                 else:
3229 0                     vm["status"] = "OTHER"
3230 0                     vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3231
3232 0                 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3233 0                 vm_vim.pop("user_data", None)
3234 0                 vm["vim_info"] = self.serialize(vm_vim)
3235
3236 0                 vm["interfaces"] = []
3237 0                 if vm_vim.get("fault"):
3238 0                     vm["error_msg"] = str(vm_vim["fault"])
3239
3240                 # get interfaces
3241 0                 try:
3242 0                     self._reload_connection()
3243 0                     port_dict = self.neutron.list_ports(device_id=vm_id)
3244
3245 0                     for port in port_dict["ports"]:
3246 0                         interface = {}
3247 0                         interface["vim_info"] = self.serialize(port)
3248 0                         interface["mac_address"] = port.get("mac_address")
3249 0                         interface["vim_net_id"] = port["network_id"]
3250 0                         interface["vim_interface_id"] = port["id"]
3251                         # check if OS-EXT-SRV-ATTR:host is there,
3252                         # in case of non-admin credentials, it will be missing
3253
3254 0                         if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3255 0                             interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3256
3257 0                         interface["pci"] = None
3258
3259                         # check if binding:profile is there,
3260                         # in case of non-admin credentials, it will be missing
3261 0                         if port.get("binding:profile"):
3262 0                             if port["binding:profile"].get("pci_slot"):
3263                                 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3264                                 #  the slot to 0x00
3265                                 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3266                                 #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
3267 0                                 pci = port["binding:profile"]["pci_slot"]
3268                                 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3269 0                                 interface["pci"] = pci
3270
3271 0                         interface["vlan"] = None
3272
3273 0                         if port.get("binding:vif_details"):
3274 0                             interface["vlan"] = port["binding:vif_details"].get("vlan")
3275
3276                         # Get vlan from network in case not present in port for those old openstacks and cases where
3277                         # it is needed vlan at PT
3278 0                         if not interface["vlan"]:
3279                             # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3280 0                             network = self.neutron.show_network(port["network_id"])
3281
3282 0                             if (
3283                                 network["network"].get("provider:network_type")
3284                                 == "vlan"
3285                             ):
3286                                 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3287 0                                 interface["vlan"] = network["network"].get(
3288                                     "provider:segmentation_id"
3289                                 )
3290
3291 0                         ips = []
3292                         # look for floating ip address
3293 0                         try:
3294 0                             floating_ip_dict = self.neutron.list_floatingips(
3295                                 port_id=port["id"]
3296                             )
3297
3298 0                             if floating_ip_dict.get("floatingips"):
3299 0                                 ips.append(
3300                                     floating_ip_dict["floatingips"][0].get(
3301                                         "floating_ip_address"
3302                                     )
3303                                 )
3304 0                         except Exception:
3305 0                             pass
3306
3307 0                         for subnet in port["fixed_ips"]:
3308 0                             ips.append(subnet["ip_address"])
3309
3310 0                         interface["ip_address"] = ";".join(ips)
3311 0                         vm["interfaces"].append(interface)
3312 0                 except Exception as e:
3313 0                     self.logger.error(
3314                         "Error getting vm interface information {}: {}".format(
3315                             type(e).__name__, e
3316                         ),
3317                         exc_info=True,
3318                     )
3319 0             except vimconn.VimConnNotFoundException as e:
3320 0                 self.logger.error("Exception getting vm status: %s", str(e))
3321 0                 vm["status"] = "DELETED"
3322 0                 vm["error_msg"] = str(e)
3323 0             except vimconn.VimConnException as e:
3324 0                 self.logger.error("Exception getting vm status: %s", str(e))
3325 0                 vm["status"] = "VIM_ERROR"
3326 0                 vm["error_msg"] = str(e)
3327
3328 0             vm_dict[vm_id] = vm
3329
3330 0         return vm_dict
3331
3332 1     def action_vminstance(self, vm_id, action_dict, created_items={}):
3333         """Send and action over a VM instance from VIM
3334         Returns None or the console dict if the action was successfully sent to the VIM
3335         """
3336 0         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3337
3338 0         try:
3339 0             self._reload_connection()
3340 0             server = self.nova.servers.find(id=vm_id)
3341
3342 0             if "start" in action_dict:
3343 0                 if action_dict["start"] == "rebuild":
3344 0                     server.rebuild()
3345                 else:
3346 0                     if server.status == "PAUSED":
3347 0                         server.unpause()
3348 0                     elif server.status == "SUSPENDED":
3349 0                         server.resume()
3350 0                     elif server.status == "SHUTOFF":
3351 0                         server.start()
3352                     else:
3353 0                         self.logger.debug(
3354                             "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3355                         )
3356 0                         raise vimconn.VimConnException(
3357                             "Cannot 'start' instance while it is in active state",
3358                             http_code=vimconn.HTTP_Bad_Request,
3359                         )
3360
3361 0             elif "pause" in action_dict:
3362 0                 server.pause()
3363 0             elif "resume" in action_dict:
3364 0                 server.resume()
3365 0             elif "shutoff" in action_dict or "shutdown" in action_dict:
3366 0                 self.logger.debug("server status %s", server.status)
3367 0                 if server.status == "ACTIVE":
3368 0                     server.stop()
3369                 else:
3370 0                     self.logger.debug("ERROR: VM is not in Active state")
3371 0                     raise vimconn.VimConnException(
3372                         "VM is not in active state, stop operation is not allowed",
3373                         http_code=vimconn.HTTP_Bad_Request,
3374                     )
3375 0             elif "forceOff" in action_dict:
3376 0                 server.stop()  # TODO
3377 0             elif "terminate" in action_dict:
3378 0                 server.delete()
3379 0             elif "createImage" in action_dict:
3380 0                 server.create_image()
3381                 # "path":path_schema,
3382                 # "description":description_schema,
3383                 # "name":name_schema,
3384                 # "metadata":metadata_schema,
3385                 # "imageRef": id_schema,
3386                 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3387 0             elif "rebuild" in action_dict:
3388 0                 server.rebuild(server.image["id"])
3389 0             elif "reboot" in action_dict:
3390 0                 server.reboot()  # reboot_type="SOFT"
3391 0             elif "console" in action_dict:
3392 0                 console_type = action_dict["console"]
3393
3394 0                 if console_type is None or console_type == "novnc":
3395 0                     console_dict = server.get_vnc_console("novnc")
3396 0                 elif console_type == "xvpvnc":
3397 0                     console_dict = server.get_vnc_console(console_type)
3398 0                 elif console_type == "rdp-html5":
3399 0                     console_dict = server.get_rdp_console(console_type)
3400 0                 elif console_type == "spice-html5":
3401 0                     console_dict = server.get_spice_console(console_type)
3402                 else:
3403 0                     raise vimconn.VimConnException(
3404                         "console type '{}' not allowed".format(console_type),
3405                         http_code=vimconn.HTTP_Bad_Request,
3406                     )
3407
3408 0                 try:
3409 0                     console_url = console_dict["console"]["url"]
3410                     # parse console_url
3411 0                     protocol_index = console_url.find("//")
3412 0                     suffix_index = (
3413                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3414                     )
3415 0                     port_index = (
3416                         console_url[protocol_index + 2 : suffix_index].find(":")
3417                         + protocol_index
3418                         + 2
3419                     )
3420
3421 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3422 0                         raise vimconn.VimConnException(
3423                             "Unexpected response from VIM " + str(console_dict)
3424                         )
3425
3426 0                     console_dict2 = {
3427                         "protocol": console_url[0:protocol_index],
3428                         "server": console_url[protocol_index + 2 : port_index],
3429                         "port": int(console_url[port_index + 1 : suffix_index]),
3430                         "suffix": console_url[suffix_index + 1 :],
3431                     }
3432
3433 0                     return console_dict2
3434 0                 except Exception:
3435 0                     raise vimconn.VimConnException(
3436                         "Unexpected response from VIM " + str(console_dict)
3437                     )
3438
3439 0             return None
3440 0         except (
3441             ksExceptions.ClientException,
3442             nvExceptions.ClientException,
3443             nvExceptions.NotFound,
3444             ConnectionError,
3445         ) as e:
3446 0             self._format_exception(e)
3447         # TODO insert exception vimconn.HTTP_Unauthorized
3448
3449     # ###### VIO Specific Changes #########
3450 1     def _generate_vlanID(self):
3451         """
3452         Method to get unused vlanID
3453             Args:
3454                 None
3455             Returns:
3456                 vlanID
3457         """
3458         # Get used VLAN IDs
3459 0         usedVlanIDs = []
3460 0         networks = self.get_network_list()
3461
3462 0         for net in networks:
3463 0             if net.get("provider:segmentation_id"):
3464 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3465
3466 0         used_vlanIDs = set(usedVlanIDs)
3467
3468         # find unused VLAN ID
3469 0         for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3470 0             try:
3471 0                 start_vlanid, end_vlanid = map(
3472                     int, vlanID_range.replace(" ", "").split("-")
3473                 )
3474
3475 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3476 0                     if vlanID not in used_vlanIDs:
3477 0                         return vlanID
3478 0             except Exception as exp:
3479 0                 raise vimconn.VimConnException(
3480                     "Exception {} occurred while generating VLAN ID.".format(exp)
3481                 )
3482         else:
3483 0             raise vimconn.VimConnConflictException(
3484                 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3485                     self.config.get("dataplane_net_vlan_range")
3486                 )
3487             )
3488
3489 1     def _generate_multisegment_vlanID(self):
3490         """
3491         Method to get unused vlanID
3492         Args:
3493             None
3494         Returns:
3495             vlanID
3496         """
3497         # Get used VLAN IDs
3498 0         usedVlanIDs = []
3499 0         networks = self.get_network_list()
3500 0         for net in networks:
3501 0             if net.get("provider:network_type") == "vlan" and net.get(
3502                 "provider:segmentation_id"
3503             ):
3504 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3505 0             elif net.get("segments"):
3506 0                 for segment in net.get("segments"):
3507 0                     if segment.get("provider:network_type") == "vlan" and segment.get(
3508                         "provider:segmentation_id"
3509                     ):
3510 0                         usedVlanIDs.append(segment.get("provider:segmentation_id"))
3511
3512 0         used_vlanIDs = set(usedVlanIDs)
3513
3514         # find unused VLAN ID
3515 0         for vlanID_range in self.config.get("multisegment_vlan_range"):
3516 0             try:
3517 0                 start_vlanid, end_vlanid = map(
3518                     int, vlanID_range.replace(" ", "").split("-")
3519                 )
3520
3521 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3522 0                     if vlanID not in used_vlanIDs:
3523 0                         return vlanID
3524 0             except Exception as exp:
3525 0                 raise vimconn.VimConnException(
3526                     "Exception {} occurred while generating VLAN ID.".format(exp)
3527                 )
3528         else:
3529 0             raise vimconn.VimConnConflictException(
3530                 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3531                     self.config.get("multisegment_vlan_range")
3532                 )
3533             )
3534
3535 1     def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3536         """
3537         Method to validate user given vlanID ranges
3538             Args:  None
3539             Returns: None
3540         """
3541 0         for vlanID_range in input_vlan_range:
3542 0             vlan_range = vlanID_range.replace(" ", "")
3543             # validate format
3544 0             vlanID_pattern = r"(\d)*-(\d)*$"
3545 0             match_obj = re.match(vlanID_pattern, vlan_range)
3546 0             if not match_obj:
3547 0                 raise vimconn.VimConnConflictException(
3548                     "Invalid VLAN range for {}: {}.You must provide "
3549                     "'{}' in format [start_ID - end_ID].".format(
3550                         text_vlan_range, vlanID_range, text_vlan_range
3551                     )
3552                 )
3553
3554 0             start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3555 0             if start_vlanid <= 0:
3556 0                 raise vimconn.VimConnConflictException(
3557                     "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3558                     "networks valid IDs are 1 to 4094 ".format(
3559                         text_vlan_range, vlanID_range
3560                     )
3561                 )
3562
3563 0             if end_vlanid > 4094:
3564 0                 raise vimconn.VimConnConflictException(
3565                     "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3566                     "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3567                         text_vlan_range, vlanID_range
3568                     )
3569                 )
3570
3571 0             if start_vlanid > end_vlanid:
3572 0                 raise vimconn.VimConnConflictException(
3573                     "Invalid VLAN range for {}: {}. You must provide '{}'"
3574                     " in format start_ID - end_ID and start_ID < end_ID ".format(
3575                         text_vlan_range, vlanID_range, text_vlan_range
3576                     )
3577                 )
3578
3579 1     def get_hosts_info(self):
3580         """Get the information of deployed hosts
3581         Returns the hosts content"""
3582 0         if self.debug:
3583 0             print("osconnector: Getting Host info from VIM")
3584
3585 0         try:
3586 0             h_list = []
3587 0             self._reload_connection()
3588 0             hypervisors = self.nova.hypervisors.list()
3589
3590 0             for hype in hypervisors:
3591 0                 h_list.append(hype.to_dict())
3592
3593 0             return 1, {"hosts": h_list}
3594 0         except nvExceptions.NotFound as e:
3595 0             error_value = -vimconn.HTTP_Not_Found
3596 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3597 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3598 0             error_value = -vimconn.HTTP_Bad_Request
3599 0             error_text = (
3600                 type(e).__name__
3601                 + ": "
3602                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3603             )
3604
3605         # TODO insert exception vimconn.HTTP_Unauthorized
3606         # if reaching here is because an exception
3607 0         self.logger.debug("get_hosts_info " + error_text)
3608
3609 0         return error_value, error_text
3610
3611 1     def get_hosts(self, vim_tenant):
3612         """Get the hosts and deployed instances
3613         Returns the hosts content"""
3614 0         r, hype_dict = self.get_hosts_info()
3615
3616 0         if r < 0:
3617 0             return r, hype_dict
3618
3619 0         hypervisors = hype_dict["hosts"]
3620
3621 0         try:
3622 0             servers = self.nova.servers.list()
3623 0             for hype in hypervisors:
3624 0                 for server in servers:
3625 0                     if (
3626                         server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3627                         == hype["hypervisor_hostname"]
3628                     ):
3629 0                         if "vm" in hype:
3630 0                             hype["vm"].append(server.id)
3631                         else:
3632 0                             hype["vm"] = [server.id]
3633
3634 0             return 1, hype_dict
3635 0         except nvExceptions.NotFound as e:
3636 0             error_value = -vimconn.HTTP_Not_Found
3637 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3638 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3639 0             error_value = -vimconn.HTTP_Bad_Request
3640 0             error_text = (
3641                 type(e).__name__
3642                 + ": "
3643                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3644             )
3645
3646         # TODO insert exception vimconn.HTTP_Unauthorized
3647         # if reaching here is because an exception
3648 0         self.logger.debug("get_hosts " + error_text)
3649
3650 0         return error_value, error_text
3651
3652 1     def new_affinity_group(self, affinity_group_data):
3653         """Adds a server group to VIM
3654             affinity_group_data contains a dictionary with information, keys:
3655                 name: name in VIM for the server group
3656                 type: affinity or anti-affinity
3657                 scope: Only nfvi-node allowed
3658         Returns the server group identifier"""
3659 0         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3660
3661 0         try:
3662 0             name = affinity_group_data["name"]
3663 0             policy = affinity_group_data["type"]
3664
3665 0             self._reload_connection()
3666 0             new_server_group = self.nova.server_groups.create(name, policy)
3667
3668 0             return new_server_group.id
3669 0         except (
3670             ksExceptions.ClientException,
3671             nvExceptions.ClientException,
3672             ConnectionError,
3673             KeyError,
3674         ) as e:
3675 0             self._format_exception(e)
3676
3677 1     def get_affinity_group(self, affinity_group_id):
3678         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3679 0         self.logger.debug("Getting flavor '%s'", affinity_group_id)
3680 0         try:
3681 0             self._reload_connection()
3682 0             server_group = self.nova.server_groups.find(id=affinity_group_id)
3683
3684 0             return server_group.to_dict()
3685 0         except (
3686             nvExceptions.NotFound,
3687             nvExceptions.ClientException,
3688             ksExceptions.ClientException,
3689             ConnectionError,
3690         ) as e:
3691 0             self._format_exception(e)
3692
3693 1     def delete_affinity_group(self, affinity_group_id):
3694         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3695 0         self.logger.debug("Getting server group '%s'", affinity_group_id)
3696 0         try:
3697 0             self._reload_connection()
3698 0             self.nova.server_groups.delete(affinity_group_id)
3699
3700 0             return affinity_group_id
3701 0         except (
3702             nvExceptions.NotFound,
3703             ksExceptions.ClientException,
3704             nvExceptions.ClientException,
3705             ConnectionError,
3706         ) as e:
3707 0             self._format_exception(e)
3708
3709 1     def get_vdu_state(self, vm_id, host_is_required=False) -> list:
3710         """Getting the state of a VDU.
3711         Args:
3712             vm_id   (str): ID of an instance
3713             host_is_required    (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
3714                                            and if this is set to True, it raises KeyError.
3715         Returns:
3716             vdu_data    (list): VDU details including state, flavor, host_info, AZ
3717         """
3718 0         self.logger.debug("Getting the status of VM")
3719 0         self.logger.debug("VIM VM ID %s", vm_id)
3720 0         try:
3721 0             self._reload_connection()
3722 0             server_dict = self._find_nova_server(vm_id)
3723 0             srv_attr = "OS-EXT-SRV-ATTR:host"
3724 0             host_info = (
3725                 server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
3726             )
3727 0             vdu_data = [
3728                 server_dict["status"],
3729                 server_dict["flavor"]["id"],
3730                 host_info,
3731                 server_dict["OS-EXT-AZ:availability_zone"],
3732             ]
3733 0             self.logger.debug("vdu_data %s", vdu_data)
3734 0             return vdu_data
3735
3736 0         except Exception as e:
3737 0             self._format_exception(e)
3738
3739 1     def check_compute_availability(self, host, server_flavor_details):
3740 0         self._reload_connection()
3741 0         hypervisor_search = self.nova.hypervisors.search(
3742             hypervisor_match=host, servers=True
3743         )
3744 0         for hypervisor in hypervisor_search:
3745 0             hypervisor_id = hypervisor.to_dict()["id"]
3746 0             hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3747 0             hypervisor_dict = hypervisor_details.to_dict()
3748 0             hypervisor_temp = json.dumps(hypervisor_dict)
3749 0             hypervisor_json = json.loads(hypervisor_temp)
3750 0             resources_available = [
3751                 hypervisor_json["free_ram_mb"],
3752                 hypervisor_json["disk_available_least"],
3753                 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3754             ]
3755 0             compute_available = all(
3756                 x > y for x, y in zip(resources_available, server_flavor_details)
3757             )
3758 0             if compute_available:
3759 0                 return host
3760
3761 1     def check_availability_zone(
3762         self, old_az, server_flavor_details, old_host, host=None
3763     ):
3764 0         self._reload_connection()
3765 0         az_check = {"zone_check": False, "compute_availability": None}
3766 0         aggregates_list = self.nova.aggregates.list()
3767 0         for aggregate in aggregates_list:
3768 0             aggregate_details = aggregate.to_dict()
3769 0             aggregate_temp = json.dumps(aggregate_details)
3770 0             aggregate_json = json.loads(aggregate_temp)
3771 0             if aggregate_json["availability_zone"] == old_az:
3772 0                 hosts_list = aggregate_json["hosts"]
3773 0                 if host is not None:
3774 0                     if host in hosts_list:
3775 0                         az_check["zone_check"] = True
3776 0                         available_compute_id = self.check_compute_availability(
3777                             host, server_flavor_details
3778                         )
3779 0                         if available_compute_id is not None:
3780 0                             az_check["compute_availability"] = available_compute_id
3781                 else:
3782 0                     for check_host in hosts_list:
3783 0                         if check_host != old_host:
3784 0                             available_compute_id = self.check_compute_availability(
3785                                 check_host, server_flavor_details
3786                             )
3787 0                             if available_compute_id is not None:
3788 0                                 az_check["zone_check"] = True
3789 0                                 az_check["compute_availability"] = available_compute_id
3790 0                                 break
3791                     else:
3792 0                         az_check["zone_check"] = True
3793 0         return az_check
3794
3795 1     def migrate_instance(self, vm_id, compute_host=None):
3796         """
3797         Migrate a vdu
3798         param:
3799             vm_id: ID of an instance
3800             compute_host: Host to migrate the vdu to
3801         """
3802 0         self._reload_connection()
3803 0         vm_state = False
3804 0         instance_state = self.get_vdu_state(vm_id, host_is_required=True)
3805 0         server_flavor_id = instance_state[1]
3806 0         server_hypervisor_name = instance_state[2]
3807 0         server_availability_zone = instance_state[3]
3808 0         try:
3809 0             server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3810 0             server_flavor_details = [
3811                 server_flavor["ram"],
3812                 server_flavor["disk"],
3813                 server_flavor["vcpus"],
3814             ]
3815 0             if compute_host == server_hypervisor_name:
3816 0                 raise vimconn.VimConnException(
3817                     "Unable to migrate instance '{}' to the same host '{}'".format(
3818                         vm_id, compute_host
3819                     ),
3820                     http_code=vimconn.HTTP_Bad_Request,
3821                 )
3822 0             az_status = self.check_availability_zone(
3823                 server_availability_zone,
3824                 server_flavor_details,
3825                 server_hypervisor_name,
3826                 compute_host,
3827             )
3828 0             availability_zone_check = az_status["zone_check"]
3829 0             available_compute_id = az_status.get("compute_availability")
3830
3831 0             if availability_zone_check is False:
3832 0                 raise vimconn.VimConnException(
3833                     "Unable to migrate instance '{}' to a different availability zone".format(
3834                         vm_id
3835                     ),
3836                     http_code=vimconn.HTTP_Bad_Request,
3837                 )
3838 0             if available_compute_id is not None:
3839                 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
3840 0                 self.nova.servers.live_migrate(
3841                     server=vm_id,
3842                     host=available_compute_id,
3843                     block_migration=True,
3844                 )
3845 0                 state = "MIGRATING"
3846 0                 changed_compute_host = ""
3847 0                 if state == "MIGRATING":
3848 0                     vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3849 0                     changed_compute_host = self.get_vdu_state(
3850                         vm_id, host_is_required=True
3851                     )[2]
3852 0                 if vm_state and changed_compute_host == available_compute_id:
3853 0                     self.logger.debug(
3854                         "Instance '{}' migrated to the new compute host '{}'".format(
3855                             vm_id, changed_compute_host
3856                         )
3857                     )
3858 0                     return state, available_compute_id
3859                 else:
3860 0                     raise vimconn.VimConnException(
3861                         "Migration Failed. Instance '{}' not moved to the new host {}".format(
3862                             vm_id, available_compute_id
3863                         ),
3864                         http_code=vimconn.HTTP_Bad_Request,
3865                     )
3866             else:
3867 0                 raise vimconn.VimConnException(
3868                     "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3869                         available_compute_id
3870                     ),
3871                     http_code=vimconn.HTTP_Bad_Request,
3872                 )
3873 0         except (
3874             nvExceptions.BadRequest,
3875             nvExceptions.ClientException,
3876             nvExceptions.NotFound,
3877         ) as e:
3878 0             self._format_exception(e)
3879
3880 1     def resize_instance(self, vm_id, new_flavor_id):
3881         """
3882         For resizing the vm based on the given
3883         flavor details
3884         param:
3885             vm_id : ID of an instance
3886             new_flavor_id : Flavor id to be resized
3887         Return the status of a resized instance
3888         """
3889 0         self._reload_connection()
3890 0         self.logger.debug("resize the flavor of an instance")
3891 0         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3892 0         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3893 0         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3894 0         try:
3895 0             if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3896 0                 if old_flavor_disk > new_flavor_disk:
3897 0                     raise nvExceptions.BadRequest(
3898                         400,
3899                         message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3900                     )
3901                 else:
3902 0                     self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3903 0                     vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3904 0                     if vm_state:
3905 0                         instance_resized_status = self.confirm_resize(
3906                             vm_id, instance_status
3907                         )
3908 0                         return instance_resized_status
3909                     else:
3910 0                         raise nvExceptions.BadRequest(
3911                             409,
3912                             message="Cannot 'resize' vm_state is in ERROR",
3913                         )
3914
3915             else:
3916 0                 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3917 0                 raise nvExceptions.BadRequest(
3918                     409,
3919                     message="Cannot 'resize' instance while it is in vm_state resized",
3920                 )
3921 0         except (
3922             nvExceptions.BadRequest,
3923             nvExceptions.ClientException,
3924             nvExceptions.NotFound,
3925         ) as e:
3926 0             self._format_exception(e)
3927
3928 1     def confirm_resize(self, vm_id, instance_state):
3929         """
3930         Confirm the resize of an instance
3931         param:
3932             vm_id: ID of an instance
3933         """
3934 0         self._reload_connection()
3935 0         self.nova.servers.confirm_resize(server=vm_id)
3936 0         if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3937 0             self.__wait_for_vm(vm_id, instance_state)
3938 0         instance_status = self.get_vdu_state(vm_id)[0]
3939 0         return instance_status
3940
3941 1     def get_monitoring_data(self):
3942 1         try:
3943 1             self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3944 1             self._reload_connection()
3945 1             all_servers = self.nova.servers.list(detailed=True)
3946 1             try:
3947 1                 for server in all_servers:
3948 1                     if server.flavor.get("original_name"):
3949 1                         server.flavor["id"] = self.nova.flavors.find(
3950                             name=server.flavor["original_name"]
3951                         ).id
3952 0             except nClient.exceptions.NotFound as e:
3953 0                 self.logger.warning(str(e.message))
3954 1             all_ports = self.neutron.list_ports()
3955 1             return all_servers, all_ports
3956 1         except (
3957             vimconn.VimConnException,
3958             vimconn.VimConnNotFoundException,
3959             vimconn.VimConnConnectionException,
3960         ) as e:
3961 1             raise vimconn.VimConnException(
3962                 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3963             )