Code Coverage

Cobertura Coverage Report > RO-VIM-openstack.osm_rovim_openstack >

vimconn_openstack.py

Trend

Classes100%
 
Lines23%
   
Conditionals100%
 

File Coverage summary

NameClassesLinesConditionals
vimconn_openstack.py
100%
1/1
23%
385/1700
100%
0/0

Coverage Breakdown by Class

NameLinesConditionals
vimconn_openstack.py
23%
385/1700
N/A

Source

RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 #         http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 1 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 1 import copy
34 1 from http.client import HTTPException
35 1 import json
36 1 import logging
37 1 from pprint import pformat
38 1 import random
39 1 import re
40 1 import time
41
42 1 from cinderclient import client as cClient
43 1 from glanceclient import client as glClient
44 1 import glanceclient.exc as gl1Exceptions
45 1 from keystoneauth1 import session
46 1 from keystoneauth1.identity import v2, v3
47 1 import keystoneclient.exceptions as ksExceptions
48 1 import keystoneclient.v2_0.client as ksClient_v2
49 1 import keystoneclient.v3.client as ksClient_v3
50 1 import netaddr
51 1 from neutronclient.common import exceptions as neExceptions
52 1 from neutronclient.neutron import client as neClient
53 1 from novaclient import client as nClient, exceptions as nvExceptions
54 1 from osm_ro_plugin import vimconn
55 1 from requests.exceptions import ConnectionError
56 1 import yaml
57
58 1 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
59 1 __date__ = "$22-sep-2017 23:59:59$"
60
61 """contain the openstack virtual machine status to openmano status"""
62 1 vmStatus2manoFormat = {
63     "ACTIVE": "ACTIVE",
64     "PAUSED": "PAUSED",
65     "SUSPENDED": "SUSPENDED",
66     "SHUTOFF": "INACTIVE",
67     "BUILD": "BUILD",
68     "ERROR": "ERROR",
69     "DELETED": "DELETED",
70 }
71 1 netStatus2manoFormat = {
72     "ACTIVE": "ACTIVE",
73     "PAUSED": "PAUSED",
74     "INACTIVE": "INACTIVE",
75     "BUILD": "BUILD",
76     "ERROR": "ERROR",
77     "DELETED": "DELETED",
78 }
79
80 1 supportedClassificationTypes = ["legacy_flow_classifier"]
81
82 # global var to have a timeout creating and deleting volumes
83 1 volume_timeout = 1800
84 1 server_timeout = 1800
85
86
87 1 class SafeDumper(yaml.SafeDumper):
88 1     def represent_data(self, data):
89         # Openstack APIs use custom subclasses of dict and YAML safe dumper
90         # is designed to not handle that (reference issue 142 of pyyaml)
91 0         if isinstance(data, dict) and data.__class__ != dict:
92             # A simple solution is to convert those items back to dicts
93 0             data = dict(data.items())
94
95 0         return super(SafeDumper, self).represent_data(data)
96
97
98 1 class vimconnector(vimconn.VimConnector):
99 1     def __init__(
100         self,
101         uuid,
102         name,
103         tenant_id,
104         tenant_name,
105         url,
106         url_admin=None,
107         user=None,
108         passwd=None,
109         log_level=None,
110         config={},
111         persistent_info={},
112     ):
113         """using common constructor parameters. In this case
114         'url' is the keystone authorization url,
115         'url_admin' is not use
116         """
117 1         api_version = config.get("APIversion")
118
119 1         if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
120 0             raise vimconn.VimConnException(
121                 "Invalid value '{}' for config:APIversion. "
122                 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
123             )
124
125 1         vim_type = config.get("vim_type")
126
127 1         if vim_type and vim_type not in ("vio", "VIO"):
128 0             raise vimconn.VimConnException(
129                 "Invalid value '{}' for config:vim_type."
130                 "Allowed values are 'vio' or 'VIO'".format(vim_type)
131             )
132
133 1         if config.get("dataplane_net_vlan_range") is not None:
134             # validate vlan ranges provided by user
135 0             self._validate_vlan_ranges(
136                 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
137             )
138
139 1         if config.get("multisegment_vlan_range") is not None:
140             # validate vlan ranges provided by user
141 0             self._validate_vlan_ranges(
142                 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
143             )
144
145 1         vimconn.VimConnector.__init__(
146             self,
147             uuid,
148             name,
149             tenant_id,
150             tenant_name,
151             url,
152             url_admin,
153             user,
154             passwd,
155             log_level,
156             config,
157         )
158
159 1         if self.config.get("insecure") and self.config.get("ca_cert"):
160 0             raise vimconn.VimConnException(
161                 "options insecure and ca_cert are mutually exclusive"
162             )
163
164 1         self.verify = True
165
166 1         if self.config.get("insecure"):
167 0             self.verify = False
168
169 1         if self.config.get("ca_cert"):
170 0             self.verify = self.config.get("ca_cert")
171
172 1         if not url:
173 0             raise TypeError("url param can not be NoneType")
174
175 1         self.persistent_info = persistent_info
176 1         self.availability_zone = persistent_info.get("availability_zone", None)
177 1         self.session = persistent_info.get("session", {"reload_client": True})
178 1         self.my_tenant_id = self.session.get("my_tenant_id")
179 1         self.nova = self.session.get("nova")
180 1         self.neutron = self.session.get("neutron")
181 1         self.cinder = self.session.get("cinder")
182 1         self.glance = self.session.get("glance")
183         # self.glancev1 = self.session.get("glancev1")
184 1         self.keystone = self.session.get("keystone")
185 1         self.api_version3 = self.session.get("api_version3")
186 1         self.vim_type = self.config.get("vim_type")
187
188 1         if self.vim_type:
189 0             self.vim_type = self.vim_type.upper()
190
191 1         if self.config.get("use_internal_endpoint"):
192 0             self.endpoint_type = "internalURL"
193         else:
194 1             self.endpoint_type = None
195
196 1         logging.getLogger("urllib3").setLevel(logging.WARNING)
197 1         logging.getLogger("keystoneauth").setLevel(logging.WARNING)
198 1         logging.getLogger("novaclient").setLevel(logging.WARNING)
199 1         self.logger = logging.getLogger("ro.vim.openstack")
200
201         # allow security_groups to be a list or a single string
202 1         if isinstance(self.config.get("security_groups"), str):
203 0             self.config["security_groups"] = [self.config["security_groups"]]
204
205 1         self.security_groups_id = None
206
207         # ###### VIO Specific Changes #########
208 1         if self.vim_type == "VIO":
209 0             self.logger = logging.getLogger("ro.vim.vio")
210
211 1         if log_level:
212 0             self.logger.setLevel(getattr(logging, log_level))
213
214 1     def __getitem__(self, index):
215         """Get individuals parameters.
216         Throw KeyError"""
217 0         if index == "project_domain_id":
218 0             return self.config.get("project_domain_id")
219 0         elif index == "user_domain_id":
220 0             return self.config.get("user_domain_id")
221         else:
222 0             return vimconn.VimConnector.__getitem__(self, index)
223
224 1     def __setitem__(self, index, value):
225         """Set individuals parameters and it is marked as dirty so to force connection reload.
226         Throw KeyError"""
227 0         if index == "project_domain_id":
228 0             self.config["project_domain_id"] = value
229 0         elif index == "user_domain_id":
230 0             self.config["user_domain_id"] = value
231         else:
232 0             vimconn.VimConnector.__setitem__(self, index, value)
233
234 0         self.session["reload_client"] = True
235
236 1     def serialize(self, value):
237         """Serialization of python basic types.
238
239         In the case value is not serializable a message will be logged and a
240         simple representation of the data that cannot be converted back to
241         python is returned.
242         """
243 0         if isinstance(value, str):
244 0             return value
245
246 0         try:
247 0             return yaml.dump(
248                 value, Dumper=SafeDumper, default_flow_style=True, width=256
249             )
250 0         except yaml.representer.RepresenterError:
251 0             self.logger.debug(
252                 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
253                 pformat(value),
254                 exc_info=True,
255             )
256
257 0             return str(value)
258
259 1     def _reload_connection(self):
260         """Called before any operation, it check if credentials has changed
261         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
262         """
263         # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
264 1         if self.session["reload_client"]:
265 1             if self.config.get("APIversion"):
266 0                 self.api_version3 = (
267                     self.config["APIversion"] == "v3.3"
268                     or self.config["APIversion"] == "3"
269                 )
270             else:  # get from ending auth_url that end with v3 or with v2.0
271 1                 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
272                     "/v3/"
273                 )
274
275 1             self.session["api_version3"] = self.api_version3
276
277 1             if self.api_version3:
278 0                 if self.config.get("project_domain_id") or self.config.get(
279                     "project_domain_name"
280                 ):
281 0                     project_domain_id_default = None
282                 else:
283 0                     project_domain_id_default = "default"
284
285 0                 if self.config.get("user_domain_id") or self.config.get(
286                     "user_domain_name"
287                 ):
288 0                     user_domain_id_default = None
289                 else:
290 0                     user_domain_id_default = "default"
291 0                 auth = v3.Password(
292                     auth_url=self.url,
293                     username=self.user,
294                     password=self.passwd,
295                     project_name=self.tenant_name,
296                     project_id=self.tenant_id,
297                     project_domain_id=self.config.get(
298                         "project_domain_id", project_domain_id_default
299                     ),
300                     user_domain_id=self.config.get(
301                         "user_domain_id", user_domain_id_default
302                     ),
303                     project_domain_name=self.config.get("project_domain_name"),
304                     user_domain_name=self.config.get("user_domain_name"),
305                 )
306             else:
307 1                 auth = v2.Password(
308                     auth_url=self.url,
309                     username=self.user,
310                     password=self.passwd,
311                     tenant_name=self.tenant_name,
312                     tenant_id=self.tenant_id,
313                 )
314
315 1             sess = session.Session(auth=auth, verify=self.verify)
316             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
317             # Titanium cloud and StarlingX
318 1             region_name = self.config.get("region_name")
319
320 1             if self.api_version3:
321 0                 self.keystone = ksClient_v3.Client(
322                     session=sess,
323                     endpoint_type=self.endpoint_type,
324                     region_name=region_name,
325                 )
326             else:
327 1                 self.keystone = ksClient_v2.Client(
328                     session=sess, endpoint_type=self.endpoint_type
329                 )
330
331 1             self.session["keystone"] = self.keystone
332             # In order to enable microversion functionality an explicit microversion must be specified in "config".
333             # This implementation approach is due to the warning message in
334             # https://developer.openstack.org/api-guide/compute/microversions.html
335             # where it is stated that microversion backwards compatibility is not guaranteed and clients should
336             # always require an specific microversion.
337             # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
338 1             version = self.config.get("microversion")
339
340 1             if not version:
341 1                 version = "2.1"
342
343             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
344             # Titanium cloud and StarlingX
345 1             self.nova = self.session["nova"] = nClient.Client(
346                 str(version),
347                 session=sess,
348                 endpoint_type=self.endpoint_type,
349                 region_name=region_name,
350             )
351 1             self.neutron = self.session["neutron"] = neClient.Client(
352                 "2.0",
353                 session=sess,
354                 endpoint_type=self.endpoint_type,
355                 region_name=region_name,
356             )
357 1             self.cinder = self.session["cinder"] = cClient.Client(
358                 2,
359                 session=sess,
360                 endpoint_type=self.endpoint_type,
361                 region_name=region_name,
362             )
363
364 1             try:
365 1                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
366 1             except Exception:
367 1                 self.logger.error("Cannot get project_id from session", exc_info=True)
368
369 1             if self.endpoint_type == "internalURL":
370 0                 glance_service_id = self.keystone.services.list(name="glance")[0].id
371 0                 glance_endpoint = self.keystone.endpoints.list(
372                     glance_service_id, interface="internal"
373                 )[0].url
374             else:
375 1                 glance_endpoint = None
376
377 1             self.glance = self.session["glance"] = glClient.Client(
378                 2, session=sess, endpoint=glance_endpoint
379             )
380             # using version 1 of glance client in new_image()
381             # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
382             #                                                            endpoint=glance_endpoint)
383 1             self.session["reload_client"] = False
384 1             self.persistent_info["session"] = self.session
385             # add availablity zone info inside  self.persistent_info
386 1             self._set_availablity_zones()
387 1             self.persistent_info["availability_zone"] = self.availability_zone
388             # force to get again security_groups_ids next time they are needed
389 1             self.security_groups_id = None
390
391 1     def __net_os2mano(self, net_list_dict):
392         """Transform the net openstack format to mano format
393         net_list_dict can be a list of dict or a single dict"""
394 0         if type(net_list_dict) is dict:
395 0             net_list_ = (net_list_dict,)
396 0         elif type(net_list_dict) is list:
397 0             net_list_ = net_list_dict
398         else:
399 0             raise TypeError("param net_list_dict must be a list or a dictionary")
400 0         for net in net_list_:
401 0             if net.get("provider:network_type") == "vlan":
402 0                 net["type"] = "data"
403             else:
404 0                 net["type"] = "bridge"
405
406 1     def __classification_os2mano(self, class_list_dict):
407         """Transform the openstack format (Flow Classifier) to mano format
408         (Classification) class_list_dict can be a list of dict or a single dict
409         """
410 1         if isinstance(class_list_dict, dict):
411 0             class_list_ = [class_list_dict]
412 1         elif isinstance(class_list_dict, list):
413 1             class_list_ = class_list_dict
414         else:
415 0             raise TypeError("param class_list_dict must be a list or a dictionary")
416 1         for classification in class_list_:
417 1             id = classification.pop("id")
418 1             name = classification.pop("name")
419 1             description = classification.pop("description")
420 1             project_id = classification.pop("project_id")
421 1             tenant_id = classification.pop("tenant_id")
422 1             original_classification = copy.deepcopy(classification)
423 1             classification.clear()
424 1             classification["ctype"] = "legacy_flow_classifier"
425 1             classification["definition"] = original_classification
426 1             classification["id"] = id
427 1             classification["name"] = name
428 1             classification["description"] = description
429 1             classification["project_id"] = project_id
430 1             classification["tenant_id"] = tenant_id
431
432 1     def __sfi_os2mano(self, sfi_list_dict):
433         """Transform the openstack format (Port Pair) to mano format (SFI)
434         sfi_list_dict can be a list of dict or a single dict
435         """
436 1         if isinstance(sfi_list_dict, dict):
437 0             sfi_list_ = [sfi_list_dict]
438 1         elif isinstance(sfi_list_dict, list):
439 1             sfi_list_ = sfi_list_dict
440         else:
441 0             raise TypeError("param sfi_list_dict must be a list or a dictionary")
442
443 1         for sfi in sfi_list_:
444 1             sfi["ingress_ports"] = []
445 1             sfi["egress_ports"] = []
446
447 1             if sfi.get("ingress"):
448 1                 sfi["ingress_ports"].append(sfi["ingress"])
449
450 1             if sfi.get("egress"):
451 1                 sfi["egress_ports"].append(sfi["egress"])
452
453 1             del sfi["ingress"]
454 1             del sfi["egress"]
455 1             params = sfi.get("service_function_parameters")
456 1             sfc_encap = False
457
458 1             if params:
459 1                 correlation = params.get("correlation")
460
461 1                 if correlation:
462 1                     sfc_encap = True
463
464 1             sfi["sfc_encap"] = sfc_encap
465 1             del sfi["service_function_parameters"]
466
467 1     def __sf_os2mano(self, sf_list_dict):
468         """Transform the openstack format (Port Pair Group) to mano format (SF)
469         sf_list_dict can be a list of dict or a single dict
470         """
471 1         if isinstance(sf_list_dict, dict):
472 0             sf_list_ = [sf_list_dict]
473 1         elif isinstance(sf_list_dict, list):
474 1             sf_list_ = sf_list_dict
475         else:
476 0             raise TypeError("param sf_list_dict must be a list or a dictionary")
477
478 1         for sf in sf_list_:
479 1             del sf["port_pair_group_parameters"]
480 1             sf["sfis"] = sf["port_pairs"]
481 1             del sf["port_pairs"]
482
483 1     def __sfp_os2mano(self, sfp_list_dict):
484         """Transform the openstack format (Port Chain) to mano format (SFP)
485         sfp_list_dict can be a list of dict or a single dict
486         """
487 1         if isinstance(sfp_list_dict, dict):
488 0             sfp_list_ = [sfp_list_dict]
489 1         elif isinstance(sfp_list_dict, list):
490 1             sfp_list_ = sfp_list_dict
491         else:
492 0             raise TypeError("param sfp_list_dict must be a list or a dictionary")
493
494 1         for sfp in sfp_list_:
495 1             params = sfp.pop("chain_parameters")
496 1             sfc_encap = False
497
498 1             if params:
499 1                 correlation = params.get("correlation")
500
501 1                 if correlation:
502 1                     sfc_encap = True
503
504 1             sfp["sfc_encap"] = sfc_encap
505 1             sfp["spi"] = sfp.pop("chain_id")
506 1             sfp["classifications"] = sfp.pop("flow_classifiers")
507 1             sfp["service_functions"] = sfp.pop("port_pair_groups")
508
509     # placeholder for now; read TODO note below
510 1     def _validate_classification(self, type, definition):
511         # only legacy_flow_classifier Type is supported at this point
512 1         return True
513         # TODO(igordcard): this method should be an abstract method of an
514         # abstract Classification class to be implemented by the specific
515         # Types. Also, abstract vimconnector should call the validation
516         # method before the implemented VIM connectors are called.
517
518 1     def _format_exception(self, exception):
519         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
520 0         message_error = str(exception)
521 0         tip = ""
522
523 0         if isinstance(
524             exception,
525             (
526                 neExceptions.NetworkNotFoundClient,
527                 nvExceptions.NotFound,
528                 ksExceptions.NotFound,
529                 gl1Exceptions.HTTPNotFound,
530             ),
531         ):
532 0             raise vimconn.VimConnNotFoundException(
533                 type(exception).__name__ + ": " + message_error
534             )
535 0         elif isinstance(
536             exception,
537             (
538                 HTTPException,
539                 gl1Exceptions.HTTPException,
540                 gl1Exceptions.CommunicationError,
541                 ConnectionError,
542                 ksExceptions.ConnectionError,
543                 neExceptions.ConnectionFailed,
544             ),
545         ):
546 0             if type(exception).__name__ == "SSLError":
547 0                 tip = " (maybe option 'insecure' must be added to the VIM)"
548
549 0             raise vimconn.VimConnConnectionException(
550                 "Invalid URL or credentials{}: {}".format(tip, message_error)
551             )
552 0         elif isinstance(
553             exception,
554             (
555                 KeyError,
556                 nvExceptions.BadRequest,
557                 ksExceptions.BadRequest,
558             ),
559         ):
560 0             raise vimconn.VimConnException(
561                 type(exception).__name__ + ": " + message_error
562             )
563 0         elif isinstance(
564             exception,
565             (
566                 nvExceptions.ClientException,
567                 ksExceptions.ClientException,
568                 neExceptions.NeutronException,
569             ),
570         ):
571 0             raise vimconn.VimConnUnexpectedResponse(
572                 type(exception).__name__ + ": " + message_error
573             )
574 0         elif isinstance(exception, nvExceptions.Conflict):
575 0             raise vimconn.VimConnConflictException(
576                 type(exception).__name__ + ": " + message_error
577             )
578 0         elif isinstance(exception, vimconn.VimConnException):
579 0             raise exception
580         else:  # ()
581 0             self.logger.error("General Exception " + message_error, exc_info=True)
582
583 0             raise vimconn.VimConnConnectionException(
584                 type(exception).__name__ + ": " + message_error
585             )
586
587 1     def _get_ids_from_name(self):
588         """
589          Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
590         :return: None
591         """
592         # get tenant_id if only tenant_name is supplied
593 0         self._reload_connection()
594
595 0         if not self.my_tenant_id:
596 0             raise vimconn.VimConnConnectionException(
597                 "Error getting tenant information from name={} id={}".format(
598                     self.tenant_name, self.tenant_id
599                 )
600             )
601
602 0         if self.config.get("security_groups") and not self.security_groups_id:
603             # convert from name to id
604 0             neutron_sg_list = self.neutron.list_security_groups(
605                 tenant_id=self.my_tenant_id
606             )["security_groups"]
607
608 0             self.security_groups_id = []
609 0             for sg in self.config.get("security_groups"):
610 0                 for neutron_sg in neutron_sg_list:
611 0                     if sg in (neutron_sg["id"], neutron_sg["name"]):
612 0                         self.security_groups_id.append(neutron_sg["id"])
613 0                         break
614                 else:
615 0                     self.security_groups_id = None
616
617 0                     raise vimconn.VimConnConnectionException(
618                         "Not found security group {} for this tenant".format(sg)
619                     )
620
621 1     def check_vim_connectivity(self):
622         # just get network list to check connectivity and credentials
623 0         self.get_network_list(filter_dict={})
624
625 1     def get_tenant_list(self, filter_dict={}):
626         """Obtain tenants of VIM
627         filter_dict can contain the following keys:
628             name: filter by tenant name
629             id: filter by tenant uuid/id
630             <other VIM specific>
631         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
632         """
633 0         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
634
635 0         try:
636 0             self._reload_connection()
637
638 0             if self.api_version3:
639 0                 project_class_list = self.keystone.projects.list(
640                     name=filter_dict.get("name")
641                 )
642             else:
643 0                 project_class_list = self.keystone.tenants.findall(**filter_dict)
644
645 0             project_list = []
646
647 0             for project in project_class_list:
648 0                 if filter_dict.get("id") and filter_dict["id"] != project.id:
649 0                     continue
650
651 0                 project_list.append(project.to_dict())
652
653 0             return project_list
654 0         except (
655             ksExceptions.ConnectionError,
656             ksExceptions.ClientException,
657             ConnectionError,
658         ) as e:
659 0             self._format_exception(e)
660
661 1     def new_tenant(self, tenant_name, tenant_description):
662         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
663 0         self.logger.debug("Adding a new tenant name: %s", tenant_name)
664
665 0         try:
666 0             self._reload_connection()
667
668 0             if self.api_version3:
669 0                 project = self.keystone.projects.create(
670                     tenant_name,
671                     self.config.get("project_domain_id", "default"),
672                     description=tenant_description,
673                     is_domain=False,
674                 )
675             else:
676 0                 project = self.keystone.tenants.create(tenant_name, tenant_description)
677
678 0             return project.id
679 0         except (
680             ksExceptions.ConnectionError,
681             ksExceptions.ClientException,
682             ksExceptions.BadRequest,
683             ConnectionError,
684         ) as e:
685 0             self._format_exception(e)
686
687 1     def delete_tenant(self, tenant_id):
688         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
689 0         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
690
691 0         try:
692 0             self._reload_connection()
693
694 0             if self.api_version3:
695 0                 self.keystone.projects.delete(tenant_id)
696             else:
697 0                 self.keystone.tenants.delete(tenant_id)
698
699 0             return tenant_id
700 0         except (
701             ksExceptions.ConnectionError,
702             ksExceptions.ClientException,
703             ksExceptions.NotFound,
704             ConnectionError,
705         ) as e:
706 0             self._format_exception(e)
707
708 1     def new_network(
709         self,
710         net_name,
711         net_type,
712         ip_profile=None,
713         shared=False,
714         provider_network_profile=None,
715     ):
716         """Adds a tenant network to VIM
717         Params:
718             'net_name': name of the network
719             'net_type': one of:
720                 'bridge': overlay isolated network
721                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
722                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
723             'ip_profile': is a dict containing the IP parameters of the network
724                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
725                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
726                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
727                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
728                 'dhcp_enabled': True or False
729                 'dhcp_start_address': ip_schema, first IP to grant
730                 'dhcp_count': number of IPs to grant.
731             'shared': if this network can be seen/use by other tenants/organization
732             'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
733                                                              physical-network: physnet-label}
734         Returns a tuple with the network identifier and created_items, or raises an exception on error
735             created_items can be None or a dictionary where this method can include key-values that will be passed to
736             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
737             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
738             as not present.
739         """
740 0         self.logger.debug(
741             "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
742         )
743         # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
744
745 0         try:
746 0             vlan = None
747
748 0             if provider_network_profile:
749 0                 vlan = provider_network_profile.get("segmentation-id")
750
751 0             new_net = None
752 0             created_items = {}
753 0             self._reload_connection()
754 0             network_dict = {"name": net_name, "admin_state_up": True}
755
756 0             if net_type in ("data", "ptp"):
757 0                 provider_physical_network = None
758
759 0                 if provider_network_profile and provider_network_profile.get(
760                     "physical-network"
761                 ):
762 0                     provider_physical_network = provider_network_profile.get(
763                         "physical-network"
764                     )
765
766                     # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
767                     # or not declared, just ignore the checking
768 0                     if (
769                         isinstance(
770                             self.config.get("dataplane_physical_net"), (tuple, list)
771                         )
772                         and provider_physical_network
773                         not in self.config["dataplane_physical_net"]
774                     ):
775 0                         raise vimconn.VimConnConflictException(
776                             "Invalid parameter 'provider-network:physical-network' "
777                             "for network creation. '{}' is not one of the declared "
778                             "list at VIM_config:dataplane_physical_net".format(
779                                 provider_physical_network
780                             )
781                         )
782
783                 # use the default dataplane_physical_net
784 0                 if not provider_physical_network:
785 0                     provider_physical_network = self.config.get(
786                         "dataplane_physical_net"
787                     )
788
789                     # if it is non empty list, use the first value. If it is a string use the value directly
790 0                     if (
791                         isinstance(provider_physical_network, (tuple, list))
792                         and provider_physical_network
793                     ):
794 0                         provider_physical_network = provider_physical_network[0]
795
796 0                 if not provider_physical_network:
797 0                     raise vimconn.VimConnConflictException(
798                         "missing information needed for underlay networks. Provide "
799                         "'dataplane_physical_net' configuration at VIM or use the NS "
800                         "instantiation parameter 'provider-network.physical-network'"
801                         " for the VLD"
802                     )
803
804 0                 if not self.config.get("multisegment_support"):
805 0                     network_dict[
806                         "provider:physical_network"
807                     ] = provider_physical_network
808
809 0                     if (
810                         provider_network_profile
811                         and "network-type" in provider_network_profile
812                     ):
813 0                         network_dict[
814                             "provider:network_type"
815                         ] = provider_network_profile["network-type"]
816                     else:
817 0                         network_dict["provider:network_type"] = self.config.get(
818                             "dataplane_network_type", "vlan"
819                         )
820
821 0                     if vlan:
822 0                         network_dict["provider:segmentation_id"] = vlan
823                 else:
824                     # Multi-segment case
825 0                     segment_list = []
826 0                     segment1_dict = {
827                         "provider:physical_network": "",
828                         "provider:network_type": "vxlan",
829                     }
830 0                     segment_list.append(segment1_dict)
831 0                     segment2_dict = {
832                         "provider:physical_network": provider_physical_network,
833                         "provider:network_type": "vlan",
834                     }
835
836 0                     if vlan:
837 0                         segment2_dict["provider:segmentation_id"] = vlan
838 0                     elif self.config.get("multisegment_vlan_range"):
839 0                         vlanID = self._generate_multisegment_vlanID()
840 0                         segment2_dict["provider:segmentation_id"] = vlanID
841
842                     # else
843                     #     raise vimconn.VimConnConflictException(
844                     #         "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
845                     #         network")
846 0                     segment_list.append(segment2_dict)
847 0                     network_dict["segments"] = segment_list
848
849                 # VIO Specific Changes. It needs a concrete VLAN
850 0                 if self.vim_type == "VIO" and vlan is None:
851 0                     if self.config.get("dataplane_net_vlan_range") is None:
852 0                         raise vimconn.VimConnConflictException(
853                             "You must provide 'dataplane_net_vlan_range' in format "
854                             "[start_ID - end_ID] at VIM_config for creating underlay "
855                             "networks"
856                         )
857
858 0                     network_dict["provider:segmentation_id"] = self._generate_vlanID()
859
860 0             network_dict["shared"] = shared
861
862 0             if self.config.get("disable_network_port_security"):
863 0                 network_dict["port_security_enabled"] = False
864
865 0             if self.config.get("neutron_availability_zone_hints"):
866 0                 hints = self.config.get("neutron_availability_zone_hints")
867
868 0                 if isinstance(hints, str):
869 0                     hints = [hints]
870
871 0                 network_dict["availability_zone_hints"] = hints
872
873 0             new_net = self.neutron.create_network({"network": network_dict})
874             # print new_net
875             # create subnetwork, even if there is no profile
876
877 0             if not ip_profile:
878 0                 ip_profile = {}
879
880 0             if not ip_profile.get("subnet_address"):
881                 # Fake subnet is required
882 0                 subnet_rand = random.randint(0, 255)
883 0                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
884
885 0             if "ip_version" not in ip_profile:
886 0                 ip_profile["ip_version"] = "IPv4"
887
888 0             subnet = {
889                 "name": net_name + "-subnet",
890                 "network_id": new_net["network"]["id"],
891                 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
892                 "cidr": ip_profile["subnet_address"],
893             }
894
895             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
896 0             if ip_profile.get("gateway_address"):
897 0                 subnet["gateway_ip"] = ip_profile["gateway_address"]
898             else:
899 0                 subnet["gateway_ip"] = None
900
901 0             if ip_profile.get("dns_address"):
902 0                 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
903
904 0             if "dhcp_enabled" in ip_profile:
905 0                 subnet["enable_dhcp"] = (
906                     False
907                     if ip_profile["dhcp_enabled"] == "false"
908                     or ip_profile["dhcp_enabled"] is False
909                     else True
910                 )
911
912 0             if ip_profile.get("dhcp_start_address"):
913 0                 subnet["allocation_pools"] = []
914 0                 subnet["allocation_pools"].append(dict())
915 0                 subnet["allocation_pools"][0]["start"] = ip_profile[
916                     "dhcp_start_address"
917                 ]
918
919 0             if ip_profile.get("dhcp_count"):
920                 # parts = ip_profile["dhcp_start_address"].split(".")
921                 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
922 0                 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
923 0                 ip_int += ip_profile["dhcp_count"] - 1
924 0                 ip_str = str(netaddr.IPAddress(ip_int))
925 0                 subnet["allocation_pools"][0]["end"] = ip_str
926
927             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
928 0             self.neutron.create_subnet({"subnet": subnet})
929
930 0             if net_type == "data" and self.config.get("multisegment_support"):
931 0                 if self.config.get("l2gw_support"):
932 0                     l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
933 0                     for l2gw in l2gw_list:
934 0                         l2gw_conn = {
935                             "l2_gateway_id": l2gw["id"],
936                             "network_id": new_net["network"]["id"],
937                             "segmentation_id": str(vlanID),
938                         }
939 0                         new_l2gw_conn = self.neutron.create_l2_gateway_connection(
940                             {"l2_gateway_connection": l2gw_conn}
941                         )
942 0                         created_items[
943                             "l2gwconn:"
944                             + str(new_l2gw_conn["l2_gateway_connection"]["id"])
945                         ] = True
946
947 0             return new_net["network"]["id"], created_items
948 0         except Exception as e:
949             # delete l2gw connections (if any) before deleting the network
950 0             for k, v in created_items.items():
951 0                 if not v:  # skip already deleted
952 0                     continue
953
954 0                 try:
955 0                     k_item, _, k_id = k.partition(":")
956
957 0                     if k_item == "l2gwconn":
958 0                         self.neutron.delete_l2_gateway_connection(k_id)
959 0                 except Exception as e2:
960 0                     self.logger.error(
961                         "Error deleting l2 gateway connection: {}: {}".format(
962                             type(e2).__name__, e2
963                         )
964                     )
965
966 0             if new_net:
967 0                 self.neutron.delete_network(new_net["network"]["id"])
968
969 0             self._format_exception(e)
970
971 1     def get_network_list(self, filter_dict={}):
972         """Obtain tenant networks of VIM
973         Filter_dict can be:
974             name: network name
975             id: network uuid
976             shared: boolean
977             tenant_id: tenant
978             admin_state_up: boolean
979             status: 'ACTIVE'
980         Returns the network list of dictionaries
981         """
982 0         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
983
984 0         try:
985 0             self._reload_connection()
986 0             filter_dict_os = filter_dict.copy()
987
988 0             if self.api_version3 and "tenant_id" in filter_dict_os:
989                 # TODO check
990 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
991
992 0             net_dict = self.neutron.list_networks(**filter_dict_os)
993 0             net_list = net_dict["networks"]
994 0             self.__net_os2mano(net_list)
995
996 0             return net_list
997 0         except (
998             neExceptions.ConnectionFailed,
999             ksExceptions.ClientException,
1000             neExceptions.NeutronException,
1001             ConnectionError,
1002         ) as e:
1003 0             self._format_exception(e)
1004
1005 1     def get_network(self, net_id):
1006         """Obtain details of network from VIM
1007         Returns the network information from a network id"""
1008 0         self.logger.debug(" Getting tenant network %s from VIM", net_id)
1009 0         filter_dict = {"id": net_id}
1010 0         net_list = self.get_network_list(filter_dict)
1011
1012 0         if len(net_list) == 0:
1013 0             raise vimconn.VimConnNotFoundException(
1014                 "Network '{}' not found".format(net_id)
1015             )
1016 0         elif len(net_list) > 1:
1017 0             raise vimconn.VimConnConflictException(
1018                 "Found more than one network with this criteria"
1019             )
1020
1021 0         net = net_list[0]
1022 0         subnets = []
1023 0         for subnet_id in net.get("subnets", ()):
1024 0             try:
1025 0                 subnet = self.neutron.show_subnet(subnet_id)
1026 0             except Exception as e:
1027 0                 self.logger.error(
1028                     "osconnector.get_network(): Error getting subnet %s %s"
1029                     % (net_id, str(e))
1030                 )
1031 0                 subnet = {"id": subnet_id, "fault": str(e)}
1032
1033 0             subnets.append(subnet)
1034
1035 0         net["subnets"] = subnets
1036 0         net["encapsulation"] = net.get("provider:network_type")
1037 0         net["encapsulation_type"] = net.get("provider:network_type")
1038 0         net["segmentation_id"] = net.get("provider:segmentation_id")
1039 0         net["encapsulation_id"] = net.get("provider:segmentation_id")
1040
1041 0         return net
1042
1043 1     def delete_network(self, net_id, created_items=None):
1044         """
1045         Removes a tenant network from VIM and its associated elements
1046         :param net_id: VIM identifier of the network, provided by method new_network
1047         :param created_items: dictionary with extra items to be deleted. provided by method new_network
1048         Returns the network identifier or raises an exception upon error or when network is not found
1049         """
1050 0         self.logger.debug("Deleting network '%s' from VIM", net_id)
1051
1052 0         if created_items is None:
1053 0             created_items = {}
1054
1055 0         try:
1056 0             self._reload_connection()
1057             # delete l2gw connections (if any) before deleting the network
1058 0             for k, v in created_items.items():
1059 0                 if not v:  # skip already deleted
1060 0                     continue
1061
1062 0                 try:
1063 0                     k_item, _, k_id = k.partition(":")
1064 0                     if k_item == "l2gwconn":
1065 0                         self.neutron.delete_l2_gateway_connection(k_id)
1066 0                 except Exception as e:
1067 0                     self.logger.error(
1068                         "Error deleting l2 gateway connection: {}: {}".format(
1069                             type(e).__name__, e
1070                         )
1071                     )
1072
1073             # delete VM ports attached to this networks before the network
1074 0             ports = self.neutron.list_ports(network_id=net_id)
1075 0             for p in ports["ports"]:
1076 0                 try:
1077 0                     self.neutron.delete_port(p["id"])
1078 0                 except Exception as e:
1079 0                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1080
1081 0             self.neutron.delete_network(net_id)
1082
1083 0             return net_id
1084 0         except (
1085             neExceptions.ConnectionFailed,
1086             neExceptions.NetworkNotFoundClient,
1087             neExceptions.NeutronException,
1088             ksExceptions.ClientException,
1089             neExceptions.NeutronException,
1090             ConnectionError,
1091         ) as e:
1092 0             self._format_exception(e)
1093
1094 1     def refresh_nets_status(self, net_list):
1095         """Get the status of the networks
1096         Params: the list of network identifiers
1097         Returns a dictionary with:
1098             net_id:         #VIM id of this network
1099                 status:     #Mandatory. Text with one of:
1100                             #  DELETED (not found at vim)
1101                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1102                             #  OTHER (Vim reported other status not understood)
1103                             #  ERROR (VIM indicates an ERROR status)
1104                             #  ACTIVE, INACTIVE, DOWN (admin down),
1105                             #  BUILD (on building process)
1106                             #
1107                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
1108                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
1109         """
1110 0         net_dict = {}
1111
1112 0         for net_id in net_list:
1113 0             net = {}
1114
1115 0             try:
1116 0                 net_vim = self.get_network(net_id)
1117
1118 0                 if net_vim["status"] in netStatus2manoFormat:
1119 0                     net["status"] = netStatus2manoFormat[net_vim["status"]]
1120                 else:
1121 0                     net["status"] = "OTHER"
1122 0                     net["error_msg"] = "VIM status reported " + net_vim["status"]
1123
1124 0                 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1125 0                     net["status"] = "DOWN"
1126
1127 0                 net["vim_info"] = self.serialize(net_vim)
1128
1129 0                 if net_vim.get("fault"):  # TODO
1130 0                     net["error_msg"] = str(net_vim["fault"])
1131 0             except vimconn.VimConnNotFoundException as e:
1132 0                 self.logger.error("Exception getting net status: %s", str(e))
1133 0                 net["status"] = "DELETED"
1134 0                 net["error_msg"] = str(e)
1135 0             except vimconn.VimConnException as e:
1136 0                 self.logger.error("Exception getting net status: %s", str(e))
1137 0                 net["status"] = "VIM_ERROR"
1138 0                 net["error_msg"] = str(e)
1139 0             net_dict[net_id] = net
1140 0         return net_dict
1141
1142 1     def get_flavor(self, flavor_id):
1143         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
1144 0         self.logger.debug("Getting flavor '%s'", flavor_id)
1145
1146 0         try:
1147 0             self._reload_connection()
1148 0             flavor = self.nova.flavors.find(id=flavor_id)
1149             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1150
1151 0             return flavor.to_dict()
1152 0         except (
1153             nvExceptions.NotFound,
1154             nvExceptions.ClientException,
1155             ksExceptions.ClientException,
1156             ConnectionError,
1157         ) as e:
1158 0             self._format_exception(e)
1159
1160 1     def get_flavor_id_from_data(self, flavor_dict):
1161         """Obtain flavor id that match the flavor description
1162         Returns the flavor_id or raises a vimconnNotFoundException
1163         flavor_dict: contains the required ram, vcpus, disk
1164         If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1165             and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1166             vimconnNotFoundException is raised
1167         """
1168 0         exact_match = False if self.config.get("use_existing_flavors") else True
1169
1170 0         try:
1171 0             self._reload_connection()
1172 0             flavor_candidate_id = None
1173 0             flavor_candidate_data = (10000, 10000, 10000)
1174 0             flavor_target = (
1175                 flavor_dict["ram"],
1176                 flavor_dict["vcpus"],
1177                 flavor_dict["disk"],
1178                 flavor_dict.get("ephemeral", 0),
1179                 flavor_dict.get("swap", 0),
1180             )
1181             # numa=None
1182 0             extended = flavor_dict.get("extended", {})
1183 0             if extended:
1184                 # TODO
1185 0                 raise vimconn.VimConnNotFoundException(
1186                     "Flavor with EPA still not implemented"
1187                 )
1188                 # if len(numas) > 1:
1189                 #     raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1190                 # numa=numas[0]
1191                 # numas = extended.get("numas")
1192 0             for flavor in self.nova.flavors.list():
1193 0                 epa = flavor.get_keys()
1194
1195 0                 if epa:
1196 0                     continue
1197                     # TODO
1198
1199 0                 flavor_data = (
1200                     flavor.ram,
1201                     flavor.vcpus,
1202                     flavor.disk,
1203                     flavor.ephemeral,
1204                     flavor.swap if isinstance(flavor.swap, int) else 0,
1205                 )
1206 0                 if flavor_data == flavor_target:
1207 0                     return flavor.id
1208 0                 elif (
1209                     not exact_match
1210                     and flavor_target < flavor_data < flavor_candidate_data
1211                 ):
1212 0                     flavor_candidate_id = flavor.id
1213 0                     flavor_candidate_data = flavor_data
1214
1215 0             if not exact_match and flavor_candidate_id:
1216 0                 return flavor_candidate_id
1217
1218 0             raise vimconn.VimConnNotFoundException(
1219                 "Cannot find any flavor matching '{}'".format(flavor_dict)
1220             )
1221 0         except (
1222             nvExceptions.NotFound,
1223             nvExceptions.ClientException,
1224             ksExceptions.ClientException,
1225             ConnectionError,
1226         ) as e:
1227 0             self._format_exception(e)
1228
1229 1     def process_resource_quota(self, quota, prefix, extra_specs):
1230         """
1231         :param prefix:
1232         :param extra_specs:
1233         :return:
1234         """
1235 0         if "limit" in quota:
1236 0             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1237
1238 0         if "reserve" in quota:
1239 0             extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1240
1241 0         if "shares" in quota:
1242 0             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1243 0             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1244
1245 1     def new_flavor(self, flavor_data, change_name_if_used=True):
1246         """Adds a tenant flavor to openstack VIM
1247         if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1248          repetition
1249         Returns the flavor identifier
1250         """
1251 0         self.logger.debug("Adding flavor '%s'", str(flavor_data))
1252 0         retry = 0
1253 0         max_retries = 3
1254 0         name_suffix = 0
1255
1256 0         try:
1257 0             name = flavor_data["name"]
1258 0             while retry < max_retries:
1259 0                 retry += 1
1260 0                 try:
1261 0                     self._reload_connection()
1262
1263 0                     if change_name_if_used:
1264                         # get used names
1265 0                         fl_names = []
1266 0                         fl = self.nova.flavors.list()
1267
1268 0                         for f in fl:
1269 0                             fl_names.append(f.name)
1270
1271 0                         while name in fl_names:
1272 0                             name_suffix += 1
1273 0                             name = flavor_data["name"] + "-" + str(name_suffix)
1274
1275 0                     ram = flavor_data.get("ram", 64)
1276 0                     vcpus = flavor_data.get("vcpus", 1)
1277 0                     extra_specs = {}
1278
1279 0                     extended = flavor_data.get("extended")
1280 0                     if extended:
1281 0                         numas = extended.get("numas")
1282
1283 0                         if numas:
1284 0                             numa_nodes = len(numas)
1285
1286 0                             if numa_nodes > 1:
1287 0                                 return -1, "Can not add flavor with more than one numa"
1288
1289 0                             extra_specs["hw:numa_nodes"] = str(numa_nodes)
1290 0                             extra_specs["hw:mem_page_size"] = "large"
1291 0                             extra_specs["hw:cpu_policy"] = "dedicated"
1292 0                             extra_specs["hw:numa_mempolicy"] = "strict"
1293
1294 0                             if self.vim_type == "VIO":
1295 0                                 extra_specs[
1296                                     "vmware:extra_config"
1297                                 ] = '{"numa.nodeAffinity":"0"}'
1298 0                                 extra_specs["vmware:latency_sensitivity_level"] = "high"
1299
1300 0                             for numa in numas:
1301                                 # overwrite ram and vcpus
1302                                 # check if key "memory" is present in numa else use ram value at flavor
1303 0                                 if "memory" in numa:
1304 0                                     ram = numa["memory"] * 1024
1305                                 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1306                                 # implemented/virt-driver-cpu-thread-pinning.html
1307 0                                 extra_specs["hw:cpu_sockets"] = 1
1308
1309 0                                 if "paired-threads" in numa:
1310 0                                     vcpus = numa["paired-threads"] * 2
1311                                     # cpu_thread_policy "require" implies that the compute node must have an
1312                                     # STM architecture
1313 0                                     extra_specs["hw:cpu_thread_policy"] = "require"
1314 0                                     extra_specs["hw:cpu_policy"] = "dedicated"
1315 0                                 elif "cores" in numa:
1316 0                                     vcpus = numa["cores"]
1317                                     # cpu_thread_policy "prefer" implies that the host must not have an SMT
1318                                     # architecture, or a non-SMT architecture will be emulated
1319 0                                     extra_specs["hw:cpu_thread_policy"] = "isolate"
1320 0                                     extra_specs["hw:cpu_policy"] = "dedicated"
1321 0                                 elif "threads" in numa:
1322 0                                     vcpus = numa["threads"]
1323                                     # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1324                                     # architecture
1325 0                                     extra_specs["hw:cpu_thread_policy"] = "prefer"
1326 0                                     extra_specs["hw:cpu_policy"] = "dedicated"
1327                                 # for interface in numa.get("interfaces",() ):
1328                                 #     if interface["dedicated"]=="yes":
1329                                 #         raise vimconn.VimConnException("Passthrough interfaces are not supported
1330                                 #         for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1331                                 #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1332                                 #      when a way to connect it is available
1333 0                         elif extended.get("cpu-quota"):
1334 0                             self.process_resource_quota(
1335                                 extended.get("cpu-quota"), "cpu", extra_specs
1336                             )
1337
1338 0                         if extended.get("mem-quota"):
1339 0                             self.process_resource_quota(
1340                                 extended.get("mem-quota"), "memory", extra_specs
1341                             )
1342
1343 0                         if extended.get("vif-quota"):
1344 0                             self.process_resource_quota(
1345                                 extended.get("vif-quota"), "vif", extra_specs
1346                             )
1347
1348 0                         if extended.get("disk-io-quota"):
1349 0                             self.process_resource_quota(
1350                                 extended.get("disk-io-quota"), "disk_io", extra_specs
1351                             )
1352
1353                         # Set the mempage size as specified in the descriptor
1354 0                         if extended.get("mempage-size"):
1355 0                             if extended.get("mempage-size") == "LARGE":
1356 0                                 extra_specs["hw:mem_page_size"] = "large"
1357 0                             elif extended.get("mempage-size") == "SMALL":
1358 0                                 extra_specs["hw:mem_page_size"] = "small"
1359 0                             elif extended.get("mempage-size") == "SIZE_2MB":
1360 0                                 extra_specs["hw:mem_page_size"] = "2MB"
1361 0                             elif extended.get("mempage-size") == "SIZE_1GB":
1362 0                                 extra_specs["hw:mem_page_size"] = "1GB"
1363 0                             elif extended.get("mempage-size") == "PREFER_LARGE":
1364 0                                 extra_specs["hw:mem_page_size"] = "any"
1365                             else:
1366                                 # The validations in NBI should make reaching here not possible.
1367                                 # If this message is shown, check validations
1368 0                                 self.logger.debug(
1369                                     "Invalid mempage-size %s. Will be ignored",
1370                                     extended.get("mempage-size"),
1371                                 )
1372                     # create flavor
1373 0                     new_flavor = self.nova.flavors.create(
1374                         name=name,
1375                         ram=ram,
1376                         vcpus=vcpus,
1377                         disk=flavor_data.get("disk", 0),
1378                         ephemeral=flavor_data.get("ephemeral", 0),
1379                         swap=flavor_data.get("swap", 0),
1380                         is_public=flavor_data.get("is_public", True),
1381                     )
1382                     # add metadata
1383 0                     if extra_specs:
1384 0                         new_flavor.set_keys(extra_specs)
1385
1386 0                     return new_flavor.id
1387 0                 except nvExceptions.Conflict as e:
1388 0                     if change_name_if_used and retry < max_retries:
1389 0                         continue
1390
1391 0                     self._format_exception(e)
1392         # except nvExceptions.BadRequest as e:
1393 0         except (
1394             ksExceptions.ClientException,
1395             nvExceptions.ClientException,
1396             ConnectionError,
1397             KeyError,
1398         ) as e:
1399 0             self._format_exception(e)
1400
1401 1     def delete_flavor(self, flavor_id):
1402         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1403 0         try:
1404 0             self._reload_connection()
1405 0             self.nova.flavors.delete(flavor_id)
1406
1407 0             return flavor_id
1408         # except nvExceptions.BadRequest as e:
1409 0         except (
1410             nvExceptions.NotFound,
1411             ksExceptions.ClientException,
1412             nvExceptions.ClientException,
1413             ConnectionError,
1414         ) as e:
1415 0             self._format_exception(e)
1416
1417 1     def new_image(self, image_dict):
1418         """
1419         Adds a tenant image to VIM. imge_dict is a dictionary with:
1420             name: name
1421             disk_format: qcow2, vhd, vmdk, raw (by default), ...
1422             location: path or URI
1423             public: "yes" or "no"
1424             metadata: metadata of the image
1425         Returns the image_id
1426         """
1427 0         retry = 0
1428 0         max_retries = 3
1429
1430 0         while retry < max_retries:
1431 0             retry += 1
1432 0             try:
1433 0                 self._reload_connection()
1434
1435                 # determine format  http://docs.openstack.org/developer/glance/formats.html
1436 0                 if "disk_format" in image_dict:
1437 0                     disk_format = image_dict["disk_format"]
1438                 else:  # autodiscover based on extension
1439 0                     if image_dict["location"].endswith(".qcow2"):
1440 0                         disk_format = "qcow2"
1441 0                     elif image_dict["location"].endswith(".vhd"):
1442 0                         disk_format = "vhd"
1443 0                     elif image_dict["location"].endswith(".vmdk"):
1444 0                         disk_format = "vmdk"
1445 0                     elif image_dict["location"].endswith(".vdi"):
1446 0                         disk_format = "vdi"
1447 0                     elif image_dict["location"].endswith(".iso"):
1448 0                         disk_format = "iso"
1449 0                     elif image_dict["location"].endswith(".aki"):
1450 0                         disk_format = "aki"
1451 0                     elif image_dict["location"].endswith(".ari"):
1452 0                         disk_format = "ari"
1453 0                     elif image_dict["location"].endswith(".ami"):
1454 0                         disk_format = "ami"
1455                     else:
1456 0                         disk_format = "raw"
1457
1458 0                 self.logger.debug(
1459                     "new_image: '%s' loading from '%s'",
1460                     image_dict["name"],
1461                     image_dict["location"],
1462                 )
1463 0                 if self.vim_type == "VIO":
1464 0                     container_format = "bare"
1465 0                     if "container_format" in image_dict:
1466 0                         container_format = image_dict["container_format"]
1467
1468 0                     new_image = self.glance.images.create(
1469                         name=image_dict["name"],
1470                         container_format=container_format,
1471                         disk_format=disk_format,
1472                     )
1473                 else:
1474 0                     new_image = self.glance.images.create(name=image_dict["name"])
1475
1476 0                 if image_dict["location"].startswith("http"):
1477                     # TODO there is not a method to direct download. It must be downloaded locally with requests
1478 0                     raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1479                 else:  # local path
1480 0                     with open(image_dict["location"]) as fimage:
1481 0                         self.glance.images.upload(new_image.id, fimage)
1482                         # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1483                         #  image_dict.get("public","yes")=="yes",
1484                         #    container_format="bare", data=fimage, disk_format=disk_format)
1485
1486 0                 metadata_to_load = image_dict.get("metadata")
1487
1488                 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1489                 #  for openstack
1490 0                 if self.vim_type == "VIO":
1491 0                     metadata_to_load["upload_location"] = image_dict["location"]
1492                 else:
1493 0                     metadata_to_load["location"] = image_dict["location"]
1494
1495 0                 self.glance.images.update(new_image.id, **metadata_to_load)
1496
1497 0                 return new_image.id
1498 0             except (
1499                 nvExceptions.Conflict,
1500                 ksExceptions.ClientException,
1501                 nvExceptions.ClientException,
1502             ) as e:
1503 0                 self._format_exception(e)
1504 0             except (
1505                 HTTPException,
1506                 gl1Exceptions.HTTPException,
1507                 gl1Exceptions.CommunicationError,
1508                 ConnectionError,
1509             ) as e:
1510 0                 if retry == max_retries:
1511 0                     continue
1512
1513 0                 self._format_exception(e)
1514 0             except IOError as e:  # can not open the file
1515 0                 raise vimconn.VimConnConnectionException(
1516                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1517                     http_code=vimconn.HTTP_Bad_Request,
1518                 )
1519
1520 1     def delete_image(self, image_id):
1521         """Deletes a tenant image from openstack VIM. Returns the old id"""
1522 0         try:
1523 0             self._reload_connection()
1524 0             self.glance.images.delete(image_id)
1525
1526 0             return image_id
1527 0         except (
1528             nvExceptions.NotFound,
1529             ksExceptions.ClientException,
1530             nvExceptions.ClientException,
1531             gl1Exceptions.CommunicationError,
1532             gl1Exceptions.HTTPNotFound,
1533             ConnectionError,
1534         ) as e:  # TODO remove
1535 0             self._format_exception(e)
1536
1537 1     def get_image_id_from_path(self, path):
1538         """Get the image id from image path in the VIM database. Returns the image_id"""
1539 0         try:
1540 0             self._reload_connection()
1541 0             images = self.glance.images.list()
1542
1543 0             for image in images:
1544 0                 if image.metadata.get("location") == path:
1545 0                     return image.id
1546
1547 0             raise vimconn.VimConnNotFoundException(
1548                 "image with location '{}' not found".format(path)
1549             )
1550 0         except (
1551             ksExceptions.ClientException,
1552             nvExceptions.ClientException,
1553             gl1Exceptions.CommunicationError,
1554             ConnectionError,
1555         ) as e:
1556 0             self._format_exception(e)
1557
1558 1     def get_image_list(self, filter_dict={}):
1559         """Obtain tenant images from VIM
1560         Filter_dict can be:
1561             id: image id
1562             name: image name
1563             checksum: image checksum
1564         Returns the image list of dictionaries:
1565             [{<the fields at Filter_dict plus some VIM specific>}, ...]
1566             List can be empty
1567         """
1568 0         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1569
1570 0         try:
1571 0             self._reload_connection()
1572             # filter_dict_os = filter_dict.copy()
1573             # First we filter by the available filter fields: name, id. The others are removed.
1574 0             image_list = self.glance.images.list()
1575 0             filtered_list = []
1576
1577 0             for image in image_list:
1578 0                 try:
1579 0                     if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1580 0                         continue
1581
1582 0                     if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1583 0                         continue
1584
1585 0                     if (
1586                         filter_dict.get("checksum")
1587                         and image["checksum"] != filter_dict["checksum"]
1588                     ):
1589 0                         continue
1590
1591 0                     filtered_list.append(image.copy())
1592 0                 except gl1Exceptions.HTTPNotFound:
1593 0                     pass
1594
1595 0             return filtered_list
1596 0         except (
1597             ksExceptions.ClientException,
1598             nvExceptions.ClientException,
1599             gl1Exceptions.CommunicationError,
1600             ConnectionError,
1601         ) as e:
1602 0             self._format_exception(e)
1603
1604 1     def __wait_for_vm(self, vm_id, status):
1605         """wait until vm is in the desired status and return True.
1606         If the VM gets in ERROR status, return false.
1607         If the timeout is reached generate an exception"""
1608 0         elapsed_time = 0
1609 0         while elapsed_time < server_timeout:
1610 0             vm_status = self.nova.servers.get(vm_id).status
1611
1612 0             if vm_status == status:
1613 0                 return True
1614
1615 0             if vm_status == "ERROR":
1616 0                 return False
1617
1618 0             time.sleep(5)
1619 0             elapsed_time += 5
1620
1621         # if we exceeded the timeout rollback
1622 0         if elapsed_time >= server_timeout:
1623 0             raise vimconn.VimConnException(
1624                 "Timeout waiting for instance " + vm_id + " to get " + status,
1625                 http_code=vimconn.HTTP_Request_Timeout,
1626             )
1627
1628 1     def _get_openstack_availablity_zones(self):
1629         """
1630         Get from openstack availability zones available
1631         :return:
1632         """
1633 1         try:
1634 1             openstack_availability_zone = self.nova.availability_zones.list()
1635 0             openstack_availability_zone = [
1636                 str(zone.zoneName)
1637                 for zone in openstack_availability_zone
1638                 if zone.zoneName != "internal"
1639             ]
1640
1641 0             return openstack_availability_zone
1642 1         except Exception:
1643 1             return None
1644
1645 1     def _set_availablity_zones(self):
1646         """
1647         Set vim availablity zone
1648         :return:
1649         """
1650 1         if "availability_zone" in self.config:
1651 0             vim_availability_zones = self.config.get("availability_zone")
1652
1653 0             if isinstance(vim_availability_zones, str):
1654 0                 self.availability_zone = [vim_availability_zones]
1655 0             elif isinstance(vim_availability_zones, list):
1656 0                 self.availability_zone = vim_availability_zones
1657         else:
1658 1             self.availability_zone = self._get_openstack_availablity_zones()
1659
1660 1     def _get_vm_availability_zone(
1661         self, availability_zone_index, availability_zone_list
1662     ):
1663         """
1664         Return thge availability zone to be used by the created VM.
1665         :return: The VIM availability zone to be used or None
1666         """
1667 0         if availability_zone_index is None:
1668 0             if not self.config.get("availability_zone"):
1669 0                 return None
1670 0             elif isinstance(self.config.get("availability_zone"), str):
1671 0                 return self.config["availability_zone"]
1672             else:
1673                 # TODO consider using a different parameter at config for default AV and AV list match
1674 0                 return self.config["availability_zone"][0]
1675
1676 0         vim_availability_zones = self.availability_zone
1677         # check if VIM offer enough availability zones describe in the VNFD
1678 0         if vim_availability_zones and len(availability_zone_list) <= len(
1679             vim_availability_zones
1680         ):
1681             # check if all the names of NFV AV match VIM AV names
1682 0             match_by_index = False
1683 0             for av in availability_zone_list:
1684 0                 if av not in vim_availability_zones:
1685 0                     match_by_index = True
1686 0                     break
1687
1688 0             if match_by_index:
1689 0                 return vim_availability_zones[availability_zone_index]
1690             else:
1691 0                 return availability_zone_list[availability_zone_index]
1692         else:
1693 0             raise vimconn.VimConnConflictException(
1694                 "No enough availability zones at VIM for this deployment"
1695             )
1696
1697 1     def new_vminstance(
1698         self,
1699         name,
1700         description,
1701         start,
1702         image_id,
1703         flavor_id,
1704         affinity_group_list,
1705         net_list,
1706         cloud_config=None,
1707         disk_list=None,
1708         availability_zone_index=None,
1709         availability_zone_list=None,
1710     ):
1711         """Adds a VM instance to VIM
1712         Params:
1713             start: indicates if VM must start or boot in pause mode. Ignored
1714             image_id,flavor_id: image and flavor uuid
1715             affinity_group_list: list of affinity groups, each one is a dictionary.
1716                 Ignore if empty.
1717             net_list: list of interfaces, each one is a dictionary with:
1718                 name:
1719                 net_id: network uuid to connect
1720                 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
1721                 model: interface model, ignored #TODO
1722                 mac_address: used for  SR-IOV ifaces #TODO for other types
1723                 use: 'data', 'bridge',  'mgmt'
1724                 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
1725                 vim_id: filled/added by this function
1726                 floating_ip: True/False (or it can be None)
1727                 port_security: True/False
1728             'cloud_config': (optional) dictionary with:
1729                 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1730                 'users': (optional) list of users to be inserted, each item is a dict with:
1731                     'name': (mandatory) user name,
1732                     'key-pairs': (optional) list of strings with the public key to be inserted to the user
1733                 'user-data': (optional) string is a text script to be passed directly to cloud-init
1734                 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1735                     'dest': (mandatory) string with the destination absolute path
1736                     'encoding': (optional, by default text). Can be one of:
1737                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1738                     'content' (mandatory): string with the content of the file
1739                     'permissions': (optional) string with file permissions, typically octal notation '0644'
1740                     'owner': (optional) file owner, string with the format 'owner:group'
1741                 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1742             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1743                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1744                 'size': (mandatory) string with the size of the disk in GB
1745                 'vim_id' (optional) should use this existing volume id
1746             availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1747             availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
1748                 availability_zone_index is None
1749                 #TODO ip, security groups
1750         Returns a tuple with the instance identifier and created_items or raises an exception on error
1751             created_items can be None or a dictionary where this method can include key-values that will be passed to
1752             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1753             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1754             as not present.
1755         """
1756 0         self.logger.debug(
1757             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
1758             image_id,
1759             flavor_id,
1760             str(net_list),
1761         )
1762
1763 0         try:
1764 0             server = None
1765 0             created_items = {}
1766             # metadata = {}
1767 0             net_list_vim = []
1768 0             external_network = []
1769             # ^list of external networks to be connected to instance, later on used to create floating_ip
1770 0             no_secured_ports = []  # List of port-is with port-security disabled
1771 0             self._reload_connection()
1772             # metadata_vpci = {}  # For a specific neutron plugin
1773 0             block_device_mapping = None
1774
1775 0             for net in net_list:
1776 0                 if not net.get("net_id"):  # skip non connected iface
1777 0                     continue
1778
1779 0                 port_dict = {
1780                     "network_id": net["net_id"],
1781                     "name": net.get("name"),
1782                     "admin_state_up": True,
1783                 }
1784
1785 0                 if (
1786                     self.config.get("security_groups")
1787                     and net.get("port_security") is not False
1788                     and not self.config.get("no_port_security_extension")
1789                 ):
1790 0                     if not self.security_groups_id:
1791 0                         self._get_ids_from_name()
1792
1793 0                     port_dict["security_groups"] = self.security_groups_id
1794
1795 0                 if net["type"] == "virtual":
1796 0                     pass
1797                     # if "vpci" in net:
1798                     #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
1799 0                 elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
1800                     # if "vpci" in net:
1801                     #     if "VF" not in metadata_vpci:
1802                     #         metadata_vpci["VF"]=[]
1803                     #     metadata_vpci["VF"].append([ net["vpci"], "" ])
1804 0                     port_dict["binding:vnic_type"] = "direct"
1805
1806                     # VIO specific Changes
1807 0                     if self.vim_type == "VIO":
1808                         # Need to create port with port_security_enabled = False and no-security-groups
1809 0                         port_dict["port_security_enabled"] = False
1810 0                         port_dict["provider_security_groups"] = []
1811 0                         port_dict["security_groups"] = []
1812                 else:  # For PT PCI-PASSTHROUGH
1813                     # if "vpci" in net:
1814                     #     if "PF" not in metadata_vpci:
1815                     #         metadata_vpci["PF"]=[]
1816                     #     metadata_vpci["PF"].append([ net["vpci"], "" ])
1817 0                     port_dict["binding:vnic_type"] = "direct-physical"
1818
1819 0                 if not port_dict["name"]:
1820 0                     port_dict["name"] = name
1821
1822 0                 if net.get("mac_address"):
1823 0                     port_dict["mac_address"] = net["mac_address"]
1824
1825 0                 if net.get("ip_address"):
1826 0                     port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1827                     # TODO add "subnet_id": <subnet_id>
1828
1829 0                 new_port = self.neutron.create_port({"port": port_dict})
1830 0                 created_items["port:" + str(new_port["port"]["id"])] = True
1831 0                 net["mac_adress"] = new_port["port"]["mac_address"]
1832 0                 net["vim_id"] = new_port["port"]["id"]
1833                 # if try to use a network without subnetwork, it will return a emtpy list
1834 0                 fixed_ips = new_port["port"].get("fixed_ips")
1835
1836 0                 if fixed_ips:
1837 0                     net["ip"] = fixed_ips[0].get("ip_address")
1838                 else:
1839 0                     net["ip"] = None
1840
1841 0                 port = {"port-id": new_port["port"]["id"]}
1842 0                 if float(self.nova.api_version.get_string()) >= 2.32:
1843 0                     port["tag"] = new_port["port"]["name"]
1844
1845 0                 net_list_vim.append(port)
1846
1847 0                 if net.get("floating_ip", False):
1848 0                     net["exit_on_floating_ip_error"] = True
1849 0                     external_network.append(net)
1850 0                 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
1851 0                     net["exit_on_floating_ip_error"] = False
1852 0                     external_network.append(net)
1853 0                     net["floating_ip"] = self.config.get("use_floating_ip")
1854
1855                 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1856                 # is dropped.
1857                 # As a workaround we wait until the VM is active and then disable the port-security
1858 0                 if net.get("port_security") is False and not self.config.get(
1859                     "no_port_security_extension"
1860                 ):
1861 0                     no_secured_ports.append(
1862                         (
1863                             new_port["port"]["id"],
1864                             net.get("port_security_disable_strategy"),
1865                         )
1866                     )
1867
1868             # if metadata_vpci:
1869             #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
1870             #     if len(metadata["pci_assignement"]) >255:
1871             #         #limit the metadata size
1872             #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
1873             #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
1874             #         metadata = {}
1875
1876 0             self.logger.debug(
1877                 "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
1878                 name,
1879                 image_id,
1880                 flavor_id,
1881                 str(net_list_vim),
1882                 description,
1883             )
1884
1885             # cloud config
1886 0             config_drive, userdata = self._create_user_data(cloud_config)
1887
1888             # get availability Zone
1889 0             vm_av_zone = self._get_vm_availability_zone(
1890                 availability_zone_index, availability_zone_list
1891             )
1892
1893             # Create additional volumes in case these are present in disk_list
1894 0             existing_vim_volumes = []
1895 0             base_disk_index = ord("b")
1896 0             boot_volume_id = None
1897 0             if disk_list:
1898 0                 block_device_mapping = {}
1899 0                 for disk in disk_list:
1900 0                     if "image_id" in disk:
1901                         # persistent root volume
1902 0                         base_disk_index = ord("a")
1903 0                         image_id = ""
1904                         # use existing persistent root volume
1905 0                         if disk.get("vim_volume_id"):
1906 0                             block_device_mapping["vd" + chr(base_disk_index)] = disk[
1907                                 "vim_volume_id"
1908                             ]
1909 0                             existing_vim_volumes.append({"id": disk["vim_volume_id"]})
1910                         # use existing persistent root volume
1911 0                         elif disk.get("vim_id"):
1912 0                             block_device_mapping["vd" + chr(base_disk_index)] = disk[
1913                                 "vim_id"
1914                             ]
1915 0                             existing_vim_volumes.append({"id": disk["vim_id"]})
1916                         else:
1917                             # create persistent root volume
1918 0                             volume = self.cinder.volumes.create(
1919                                 size=disk["size"],
1920                                 name=name + "vd" + chr(base_disk_index),
1921                                 imageRef=disk["image_id"],
1922                                 # Make sure volume is in the same AZ as the VM to be attached to
1923                                 availability_zone=vm_av_zone,
1924                             )
1925 0                             boot_volume_id = volume.id
1926 0                             created_items["volume:" + str(volume.id)] = True
1927 0                             block_device_mapping[
1928                                 "vd" + chr(base_disk_index)
1929                             ] = volume.id
1930                     else:
1931                         # non-root persistent volume
1932 0                         key_id = (
1933                             "vim_volume_id"
1934                             if "vim_volume_id" in disk.keys()
1935                             else "vim_id"
1936                         )
1937 0                         if disk.get(key_id):
1938                             # use existing persistent volume
1939 0                             block_device_mapping["vd" + chr(base_disk_index)] = disk[
1940                                 key_id
1941                             ]
1942 0                             existing_vim_volumes.append({"id": disk[key_id]})
1943                         else:
1944                             # create persistent volume
1945 0                             volume = self.cinder.volumes.create(
1946                                 size=disk["size"],
1947                                 name=name + "vd" + chr(base_disk_index),
1948                                 # Make sure volume is in the same AZ as the VM to be attached to
1949                                 availability_zone=vm_av_zone,
1950                             )
1951 0                             created_items["volume:" + str(volume.id)] = True
1952 0                             block_device_mapping[
1953                                 "vd" + chr(base_disk_index)
1954                             ] = volume.id
1955
1956 0                     base_disk_index += 1
1957
1958                 # Wait until created volumes are with status available
1959 0                 elapsed_time = 0
1960 0                 while elapsed_time < volume_timeout:
1961 0                     for created_item in created_items:
1962 0                         v, _, volume_id = created_item.partition(":")
1963 0                         if v == "volume":
1964 0                             if self.cinder.volumes.get(volume_id).status != "available":
1965 0                                 break
1966                     else:  # all ready: break from while
1967 0                         break
1968
1969 0                     time.sleep(5)
1970 0                     elapsed_time += 5
1971
1972                 # Wait until existing volumes in vim are with status available
1973 0                 while elapsed_time < volume_timeout:
1974 0                     for volume in existing_vim_volumes:
1975 0                         if self.cinder.volumes.get(volume["id"]).status != "available":
1976 0                             break
1977                     else:  # all ready: break from while
1978 0                         break
1979
1980 0                     time.sleep(5)
1981 0                     elapsed_time += 5
1982
1983                 # If we exceeded the timeout rollback
1984 0                 if elapsed_time >= volume_timeout:
1985 0                     raise vimconn.VimConnException(
1986                         "Timeout creating volumes for instance " + name,
1987                         http_code=vimconn.HTTP_Request_Timeout,
1988                     )
1989 0                 if boot_volume_id:
1990 0                     self.cinder.volumes.set_bootable(boot_volume_id, True)
1991
1992             # Manage affinity groups/server groups
1993 0             server_group_id = None
1994 0             scheduller_hints = {}
1995
1996 0             if affinity_group_list:
1997                 # Only first id on the list will be used. Openstack restriction
1998 0                 server_group_id = affinity_group_list[0]["affinity_group_id"]
1999 0                 scheduller_hints["group"] = server_group_id
2000
2001 0             self.logger.debug(
2002                 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2003                 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2004                 "block_device_mapping={}, server_group={})".format(
2005                     name,
2006                     image_id,
2007                     flavor_id,
2008                     net_list_vim,
2009                     self.config.get("security_groups"),
2010                     vm_av_zone,
2011                     self.config.get("keypair"),
2012                     userdata,
2013                     config_drive,
2014                     block_device_mapping,
2015                     server_group_id,
2016                 )
2017             )
2018 0             server = self.nova.servers.create(
2019                 name=name,
2020                 image=image_id,
2021                 flavor=flavor_id,
2022                 nics=net_list_vim,
2023                 security_groups=self.config.get("security_groups"),
2024                 # TODO remove security_groups in future versions. Already at neutron port
2025                 availability_zone=vm_av_zone,
2026                 key_name=self.config.get("keypair"),
2027                 userdata=userdata,
2028                 config_drive=config_drive,
2029                 block_device_mapping=block_device_mapping,
2030                 scheduler_hints=scheduller_hints,
2031             )  # , description=description)
2032
2033 0             vm_start_time = time.time()
2034             # Previously mentioned workaround to wait until the VM is active and then disable the port-security
2035 0             if no_secured_ports:
2036 0                 self.__wait_for_vm(server.id, "ACTIVE")
2037
2038 0             for port in no_secured_ports:
2039 0                 port_update = {
2040                     "port": {"port_security_enabled": False, "security_groups": None}
2041                 }
2042
2043 0                 if port[1] == "allow-address-pairs":
2044 0                     port_update = {
2045                         "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2046                     }
2047
2048 0                 try:
2049 0                     self.neutron.update_port(port[0], port_update)
2050 0                 except Exception:
2051 0                     raise vimconn.VimConnException(
2052                         "It was not possible to disable port security for port {}".format(
2053                             port[0]
2054                         )
2055                     )
2056
2057             # print "DONE :-)", server
2058
2059             # pool_id = None
2060 0             for floating_network in external_network:
2061 0                 try:
2062 0                     assigned = False
2063 0                     floating_ip_retries = 3
2064                     # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2065                     # several times
2066 0                     while not assigned:
2067 0                         floating_ips = self.neutron.list_floatingips().get(
2068                             "floatingips", ()
2069                         )
2070 0                         random.shuffle(floating_ips)  # randomize
2071 0                         for fip in floating_ips:
2072 0                             if (
2073                                 fip.get("port_id")
2074                                 or fip.get("tenant_id") != server.tenant_id
2075                             ):
2076 0                                 continue
2077
2078 0                             if isinstance(floating_network["floating_ip"], str):
2079 0                                 if (
2080                                     fip.get("floating_network_id")
2081                                     != floating_network["floating_ip"]
2082                                 ):
2083 0                                     continue
2084
2085 0                             free_floating_ip = fip["id"]
2086 0                             break
2087                         else:
2088 0                             if (
2089                                 isinstance(floating_network["floating_ip"], str)
2090                                 and floating_network["floating_ip"].lower() != "true"
2091                             ):
2092 0                                 pool_id = floating_network["floating_ip"]
2093                             else:
2094                                 # Find the external network
2095 0                                 external_nets = list()
2096
2097 0                                 for net in self.neutron.list_networks()["networks"]:
2098 0                                     if net["router:external"]:
2099 0                                         external_nets.append(net)
2100
2101 0                                 if len(external_nets) == 0:
2102 0                                     raise vimconn.VimConnException(
2103                                         "Cannot create floating_ip automatically since "
2104                                         "no external network is present",
2105                                         http_code=vimconn.HTTP_Conflict,
2106                                     )
2107
2108 0                                 if len(external_nets) > 1:
2109 0                                     raise vimconn.VimConnException(
2110                                         "Cannot create floating_ip automatically since "
2111                                         "multiple external networks are present",
2112                                         http_code=vimconn.HTTP_Conflict,
2113                                     )
2114
2115 0                                 pool_id = external_nets[0].get("id")
2116
2117 0                             param = {
2118                                 "floatingip": {
2119                                     "floating_network_id": pool_id,
2120                                     "tenant_id": server.tenant_id,
2121                                 }
2122                             }
2123
2124 0                             try:
2125                                 # self.logger.debug("Creating floating IP")
2126 0                                 new_floating_ip = self.neutron.create_floatingip(param)
2127 0                                 free_floating_ip = new_floating_ip["floatingip"]["id"]
2128 0                                 created_items[
2129                                     "floating_ip:" + str(free_floating_ip)
2130                                 ] = True
2131 0                             except Exception as e:
2132 0                                 raise vimconn.VimConnException(
2133                                     type(e).__name__
2134                                     + ": Cannot create new floating_ip "
2135                                     + str(e),
2136                                     http_code=vimconn.HTTP_Conflict,
2137                                 )
2138
2139 0                         try:
2140                             # for race condition ensure not already assigned
2141 0                             fip = self.neutron.show_floatingip(free_floating_ip)
2142
2143 0                             if fip["floatingip"]["port_id"]:
2144 0                                 continue
2145
2146                             # the vim_id key contains the neutron.port_id
2147 0                             self.neutron.update_floatingip(
2148                                 free_floating_ip,
2149                                 {"floatingip": {"port_id": floating_network["vim_id"]}},
2150                             )
2151                             # for race condition ensure not re-assigned to other VM after 5 seconds
2152 0                             time.sleep(5)
2153 0                             fip = self.neutron.show_floatingip(free_floating_ip)
2154
2155 0                             if (
2156                                 fip["floatingip"]["port_id"]
2157                                 != floating_network["vim_id"]
2158                             ):
2159 0                                 self.logger.error(
2160                                     "floating_ip {} re-assigned to other port".format(
2161                                         free_floating_ip
2162                                     )
2163                                 )
2164 0                                 continue
2165
2166 0                             self.logger.debug(
2167                                 "Assigned floating_ip {} to VM {}".format(
2168                                     free_floating_ip, server.id
2169                                 )
2170                             )
2171 0                             assigned = True
2172 0                         except Exception as e:
2173                             # openstack need some time after VM creation to assign an IP. So retry if fails
2174 0                             vm_status = self.nova.servers.get(server.id).status
2175
2176 0                             if vm_status not in ("ACTIVE", "ERROR"):
2177 0                                 if time.time() - vm_start_time < server_timeout:
2178 0                                     time.sleep(5)
2179 0                                     continue
2180 0                             elif floating_ip_retries > 0:
2181 0                                 floating_ip_retries -= 1
2182 0                                 continue
2183
2184 0                             raise vimconn.VimConnException(
2185                                 "Cannot create floating_ip: {} {}".format(
2186                                     type(e).__name__, e
2187                                 ),
2188                                 http_code=vimconn.HTTP_Conflict,
2189                             )
2190
2191 0                 except Exception as e:
2192 0                     if not floating_network["exit_on_floating_ip_error"]:
2193 0                         self.logger.error("Cannot create floating_ip. %s", str(e))
2194 0                         continue
2195
2196 0                     raise
2197
2198 0             return server.id, created_items
2199         # except nvExceptions.NotFound as e:
2200         #     error_value=-vimconn.HTTP_Not_Found
2201         #     error_text= "vm instance %s not found" % vm_id
2202         # except TypeError as e:
2203         #     raise vimconn.VimConnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
2204
2205 0         except Exception as e:
2206 0             server_id = None
2207 0             if server:
2208 0                 server_id = server.id
2209
2210 0             try:
2211 0                 self.delete_vminstance(server_id, created_items)
2212 0             except Exception as e2:
2213 0                 self.logger.error("new_vminstance rollback fail {}".format(e2))
2214
2215 0             self._format_exception(e)
2216
2217 1     def get_vminstance(self, vm_id):
2218         """Returns the VM instance information from VIM"""
2219         # self.logger.debug("Getting VM from VIM")
2220 0         try:
2221 0             self._reload_connection()
2222 0             server = self.nova.servers.find(id=vm_id)
2223             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2224
2225 0             return server.to_dict()
2226 0         except (
2227             ksExceptions.ClientException,
2228             nvExceptions.ClientException,
2229             nvExceptions.NotFound,
2230             ConnectionError,
2231         ) as e:
2232 0             self._format_exception(e)
2233
2234 1     def get_vminstance_console(self, vm_id, console_type="vnc"):
2235         """
2236         Get a console for the virtual machine
2237         Params:
2238             vm_id: uuid of the VM
2239             console_type, can be:
2240                 "novnc" (by default), "xvpvnc" for VNC types,
2241                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2242         Returns dict with the console parameters:
2243                 protocol: ssh, ftp, http, https, ...
2244                 server:   usually ip address
2245                 port:     the http, ssh, ... port
2246                 suffix:   extra text, e.g. the http path and query string
2247         """
2248 0         self.logger.debug("Getting VM CONSOLE from VIM")
2249
2250 0         try:
2251 0             self._reload_connection()
2252 0             server = self.nova.servers.find(id=vm_id)
2253
2254 0             if console_type is None or console_type == "novnc":
2255 0                 console_dict = server.get_vnc_console("novnc")
2256 0             elif console_type == "xvpvnc":
2257 0                 console_dict = server.get_vnc_console(console_type)
2258 0             elif console_type == "rdp-html5":
2259 0                 console_dict = server.get_rdp_console(console_type)
2260 0             elif console_type == "spice-html5":
2261 0                 console_dict = server.get_spice_console(console_type)
2262             else:
2263 0                 raise vimconn.VimConnException(
2264                     "console type '{}' not allowed".format(console_type),
2265                     http_code=vimconn.HTTP_Bad_Request,
2266                 )
2267
2268 0             console_dict1 = console_dict.get("console")
2269
2270 0             if console_dict1:
2271 0                 console_url = console_dict1.get("url")
2272
2273 0                 if console_url:
2274                     # parse console_url
2275 0                     protocol_index = console_url.find("//")
2276 0                     suffix_index = (
2277                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2278                     )
2279 0                     port_index = (
2280                         console_url[protocol_index + 2 : suffix_index].find(":")
2281                         + protocol_index
2282                         + 2
2283                     )
2284
2285 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2286 0                         return (
2287                             -vimconn.HTTP_Internal_Server_Error,
2288                             "Unexpected response from VIM",
2289                         )
2290
2291 0                     console_dict = {
2292                         "protocol": console_url[0:protocol_index],
2293                         "server": console_url[protocol_index + 2 : port_index],
2294                         "port": console_url[port_index:suffix_index],
2295                         "suffix": console_url[suffix_index + 1 :],
2296                     }
2297 0                     protocol_index += 2
2298
2299 0                     return console_dict
2300 0             raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2301 0         except (
2302             nvExceptions.NotFound,
2303             ksExceptions.ClientException,
2304             nvExceptions.ClientException,
2305             nvExceptions.BadRequest,
2306             ConnectionError,
2307         ) as e:
2308 0             self._format_exception(e)
2309
2310 1     def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
2311         """Removes a VM instance from VIM. Returns the old identifier"""
2312         # print "osconnector: Getting VM from VIM"
2313 0         if created_items is None:
2314 0             created_items = {}
2315
2316 0         try:
2317 0             self._reload_connection()
2318             # delete VM ports attached to this networks before the virtual machine
2319 0             for k, v in created_items.items():
2320 0                 if not v:  # skip already deleted
2321 0                     continue
2322
2323 0                 try:
2324 0                     k_item, _, k_id = k.partition(":")
2325 0                     if k_item == "port":
2326 0                         port_dict = self.neutron.list_ports()
2327 0                         existing_ports = [
2328                             port["id"] for port in port_dict["ports"] if port_dict
2329                         ]
2330 0                         if k_id in existing_ports:
2331 0                             self.neutron.delete_port(k_id)
2332 0                 except Exception as e:
2333 0                     self.logger.error(
2334                         "Error deleting port: {}: {}".format(type(e).__name__, e)
2335                     )
2336
2337             # #commented because detaching the volumes makes the servers.delete not work properly ?!?
2338             # #dettach volumes attached
2339             # server = self.nova.servers.get(vm_id)
2340             # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"]   #volume["id"]
2341             # #for volume in volumes_attached_dict:
2342             # #    self.cinder.volumes.detach(volume["id"])
2343
2344 0             if vm_id:
2345 0                 self.nova.servers.delete(vm_id)
2346
2347             # delete volumes. Although having detached, they should have in active status before deleting
2348             # we ensure in this loop
2349 0             keep_waiting = True
2350 0             elapsed_time = 0
2351
2352 0             while keep_waiting and elapsed_time < volume_timeout:
2353 0                 keep_waiting = False
2354
2355 0                 for k, v in created_items.items():
2356 0                     if not v:  # skip already deleted
2357 0                         continue
2358
2359 0                     try:
2360 0                         k_item, _, k_id = k.partition(":")
2361 0                         if k_item == "volume":
2362 0                             if self.cinder.volumes.get(k_id).status != "available":
2363 0                                 keep_waiting = True
2364                             else:
2365 0                                 if k_id not in volumes_to_hold:
2366 0                                     self.cinder.volumes.delete(k_id)
2367 0                                     created_items[k] = None
2368 0                         elif k_item == "floating_ip":  # floating ip
2369 0                             self.neutron.delete_floatingip(k_id)
2370 0                             created_items[k] = None
2371
2372 0                     except Exception as e:
2373 0                         self.logger.error("Error deleting {}: {}".format(k, e))
2374
2375 0                 if keep_waiting:
2376 0                     time.sleep(1)
2377 0                     elapsed_time += 1
2378
2379 0             return None
2380 0         except (
2381             nvExceptions.NotFound,
2382             ksExceptions.ClientException,
2383             nvExceptions.ClientException,
2384             ConnectionError,
2385         ) as e:
2386 0             self._format_exception(e)
2387
2388 1     def refresh_vms_status(self, vm_list):
2389         """Get the status of the virtual machines and their interfaces/ports
2390         Params: the list of VM identifiers
2391         Returns a dictionary with:
2392             vm_id:          #VIM id of this Virtual Machine
2393                 status:     #Mandatory. Text with one of:
2394                             #  DELETED (not found at vim)
2395                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2396                             #  OTHER (Vim reported other status not understood)
2397                             #  ERROR (VIM indicates an ERROR status)
2398                             #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2399                             #  CREATING (on building process), ERROR
2400                             #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2401                             #
2402                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
2403                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
2404                 interfaces:
2405                  -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
2406                     mac_address:      #Text format XX:XX:XX:XX:XX:XX
2407                     vim_net_id:       #network id where this interface is connected
2408                     vim_interface_id: #interface/port VIM id
2409                     ip_address:       #null, or text with IPv4, IPv6 address
2410                     compute_node:     #identification of compute node where PF,VF interface is allocated
2411                     pci:              #PCI address of the NIC that hosts the PF,VF
2412                     vlan:             #physical VLAN used for VF
2413         """
2414 0         vm_dict = {}
2415 0         self.logger.debug(
2416             "refresh_vms status: Getting tenant VM instance information from VIM"
2417         )
2418
2419 0         for vm_id in vm_list:
2420 0             vm = {}
2421
2422 0             try:
2423 0                 vm_vim = self.get_vminstance(vm_id)
2424
2425 0                 if vm_vim["status"] in vmStatus2manoFormat:
2426 0                     vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
2427                 else:
2428 0                     vm["status"] = "OTHER"
2429 0                     vm["error_msg"] = "VIM status reported " + vm_vim["status"]
2430
2431 0                 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
2432 0                 vm_vim.pop("user_data", None)
2433 0                 vm["vim_info"] = self.serialize(vm_vim)
2434
2435 0                 vm["interfaces"] = []
2436 0                 if vm_vim.get("fault"):
2437 0                     vm["error_msg"] = str(vm_vim["fault"])
2438
2439                 # get interfaces
2440 0                 try:
2441 0                     self._reload_connection()
2442 0                     port_dict = self.neutron.list_ports(device_id=vm_id)
2443
2444 0                     for port in port_dict["ports"]:
2445 0                         interface = {}
2446 0                         interface["vim_info"] = self.serialize(port)
2447 0                         interface["mac_address"] = port.get("mac_address")
2448 0                         interface["vim_net_id"] = port["network_id"]
2449 0                         interface["vim_interface_id"] = port["id"]
2450                         # check if OS-EXT-SRV-ATTR:host is there,
2451                         # in case of non-admin credentials, it will be missing
2452
2453 0                         if vm_vim.get("OS-EXT-SRV-ATTR:host"):
2454 0                             interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
2455
2456 0                         interface["pci"] = None
2457
2458                         # check if binding:profile is there,
2459                         # in case of non-admin credentials, it will be missing
2460 0                         if port.get("binding:profile"):
2461 0                             if port["binding:profile"].get("pci_slot"):
2462                                 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
2463                                 #  the slot to 0x00
2464                                 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
2465                                 #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
2466 0                                 pci = port["binding:profile"]["pci_slot"]
2467                                 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
2468 0                                 interface["pci"] = pci
2469
2470 0                         interface["vlan"] = None
2471
2472 0                         if port.get("binding:vif_details"):
2473 0                             interface["vlan"] = port["binding:vif_details"].get("vlan")
2474
2475                         # Get vlan from network in case not present in port for those old openstacks and cases where
2476                         # it is needed vlan at PT
2477 0                         if not interface["vlan"]:
2478                             # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
2479 0                             network = self.neutron.show_network(port["network_id"])
2480
2481 0                             if (
2482                                 network["network"].get("provider:network_type")
2483                                 == "vlan"
2484                             ):
2485                                 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
2486 0                                 interface["vlan"] = network["network"].get(
2487                                     "provider:segmentation_id"
2488                                 )
2489
2490 0                         ips = []
2491                         # look for floating ip address
2492 0                         try:
2493 0                             floating_ip_dict = self.neutron.list_floatingips(
2494                                 port_id=port["id"]
2495                             )
2496
2497 0                             if floating_ip_dict.get("floatingips"):
2498 0                                 ips.append(
2499                                     floating_ip_dict["floatingips"][0].get(
2500                                         "floating_ip_address"
2501                                     )
2502                                 )
2503 0                         except Exception:
2504 0                             pass
2505
2506 0                         for subnet in port["fixed_ips"]:
2507 0                             ips.append(subnet["ip_address"])
2508
2509 0                         interface["ip_address"] = ";".join(ips)
2510 0                         vm["interfaces"].append(interface)
2511 0                 except Exception as e:
2512 0                     self.logger.error(
2513                         "Error getting vm interface information {}: {}".format(
2514                             type(e).__name__, e
2515                         ),
2516                         exc_info=True,
2517                     )
2518 0             except vimconn.VimConnNotFoundException as e:
2519 0                 self.logger.error("Exception getting vm status: %s", str(e))
2520 0                 vm["status"] = "DELETED"
2521 0                 vm["error_msg"] = str(e)
2522 0             except vimconn.VimConnException as e:
2523 0                 self.logger.error("Exception getting vm status: %s", str(e))
2524 0                 vm["status"] = "VIM_ERROR"
2525 0                 vm["error_msg"] = str(e)
2526
2527 0             vm_dict[vm_id] = vm
2528
2529 0         return vm_dict
2530
2531 1     def action_vminstance(self, vm_id, action_dict, created_items={}):
2532         """Send and action over a VM instance from VIM
2533         Returns None or the console dict if the action was successfully sent to the VIM"""
2534 0         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
2535
2536 0         try:
2537 0             self._reload_connection()
2538 0             server = self.nova.servers.find(id=vm_id)
2539
2540 0             if "start" in action_dict:
2541 0                 if action_dict["start"] == "rebuild":
2542 0                     server.rebuild()
2543                 else:
2544 0                     if server.status == "PAUSED":
2545 0                         server.unpause()
2546 0                     elif server.status == "SUSPENDED":
2547 0                         server.resume()
2548 0                     elif server.status == "SHUTOFF":
2549 0                         server.start()
2550                     else:
2551 0                         self.logger.debug(
2552                             "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
2553                         )
2554 0                         raise vimconn.VimConnException(
2555                             "Cannot 'start' instance while it is in active state",
2556                             http_code=vimconn.HTTP_Bad_Request,
2557                         )
2558
2559 0             elif "pause" in action_dict:
2560 0                 server.pause()
2561 0             elif "resume" in action_dict:
2562 0                 server.resume()
2563 0             elif "shutoff" in action_dict or "shutdown" in action_dict:
2564 0                 self.logger.debug("server status %s", server.status)
2565 0                 if server.status == "ACTIVE":
2566 0                     server.stop()
2567                 else:
2568 0                     self.logger.debug("ERROR: VM is not in Active state")
2569 0                     raise vimconn.VimConnException(
2570                         "VM is not in active state, stop operation is not allowed",
2571                         http_code=vimconn.HTTP_Bad_Request,
2572                     )
2573 0             elif "forceOff" in action_dict:
2574 0                 server.stop()  # TODO
2575 0             elif "terminate" in action_dict:
2576 0                 server.delete()
2577 0             elif "createImage" in action_dict:
2578 0                 server.create_image()
2579                 # "path":path_schema,
2580                 # "description":description_schema,
2581                 # "name":name_schema,
2582                 # "metadata":metadata_schema,
2583                 # "imageRef": id_schema,
2584                 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
2585 0             elif "rebuild" in action_dict:
2586 0                 server.rebuild(server.image["id"])
2587 0             elif "reboot" in action_dict:
2588 0                 server.reboot()  # reboot_type="SOFT"
2589 0             elif "console" in action_dict:
2590 0                 console_type = action_dict["console"]
2591
2592 0                 if console_type is None or console_type == "novnc":
2593 0                     console_dict = server.get_vnc_console("novnc")
2594 0                 elif console_type == "xvpvnc":
2595 0                     console_dict = server.get_vnc_console(console_type)
2596 0                 elif console_type == "rdp-html5":
2597 0                     console_dict = server.get_rdp_console(console_type)
2598 0                 elif console_type == "spice-html5":
2599 0                     console_dict = server.get_spice_console(console_type)
2600                 else:
2601 0                     raise vimconn.VimConnException(
2602                         "console type '{}' not allowed".format(console_type),
2603                         http_code=vimconn.HTTP_Bad_Request,
2604                     )
2605
2606 0                 try:
2607 0                     console_url = console_dict["console"]["url"]
2608                     # parse console_url
2609 0                     protocol_index = console_url.find("//")
2610 0                     suffix_index = (
2611                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2612                     )
2613 0                     port_index = (
2614                         console_url[protocol_index + 2 : suffix_index].find(":")
2615                         + protocol_index
2616                         + 2
2617                     )
2618
2619 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2620 0                         raise vimconn.VimConnException(
2621                             "Unexpected response from VIM " + str(console_dict)
2622                         )
2623
2624 0                     console_dict2 = {
2625                         "protocol": console_url[0:protocol_index],
2626                         "server": console_url[protocol_index + 2 : port_index],
2627                         "port": int(console_url[port_index + 1 : suffix_index]),
2628                         "suffix": console_url[suffix_index + 1 :],
2629                     }
2630
2631 0                     return console_dict2
2632 0                 except Exception:
2633 0                     raise vimconn.VimConnException(
2634                         "Unexpected response from VIM " + str(console_dict)
2635                     )
2636
2637 0             return None
2638 0         except (
2639             ksExceptions.ClientException,
2640             nvExceptions.ClientException,
2641             nvExceptions.NotFound,
2642             ConnectionError,
2643         ) as e:
2644 0             self._format_exception(e)
2645         # TODO insert exception vimconn.HTTP_Unauthorized
2646
2647     # ###### VIO Specific Changes #########
2648 1     def _generate_vlanID(self):
2649         """
2650         Method to get unused vlanID
2651             Args:
2652                 None
2653             Returns:
2654                 vlanID
2655         """
2656         # Get used VLAN IDs
2657 0         usedVlanIDs = []
2658 0         networks = self.get_network_list()
2659
2660 0         for net in networks:
2661 0             if net.get("provider:segmentation_id"):
2662 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
2663
2664 0         used_vlanIDs = set(usedVlanIDs)
2665
2666         # find unused VLAN ID
2667 0         for vlanID_range in self.config.get("dataplane_net_vlan_range"):
2668 0             try:
2669 0                 start_vlanid, end_vlanid = map(
2670                     int, vlanID_range.replace(" ", "").split("-")
2671                 )
2672
2673 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
2674 0                     if vlanID not in used_vlanIDs:
2675 0                         return vlanID
2676 0             except Exception as exp:
2677 0                 raise vimconn.VimConnException(
2678                     "Exception {} occurred while generating VLAN ID.".format(exp)
2679                 )
2680         else:
2681 0             raise vimconn.VimConnConflictException(
2682                 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
2683                     self.config.get("dataplane_net_vlan_range")
2684                 )
2685             )
2686
2687 1     def _generate_multisegment_vlanID(self):
2688         """
2689         Method to get unused vlanID
2690         Args:
2691             None
2692         Returns:
2693             vlanID
2694         """
2695         # Get used VLAN IDs
2696 0         usedVlanIDs = []
2697 0         networks = self.get_network_list()
2698 0         for net in networks:
2699 0             if net.get("provider:network_type") == "vlan" and net.get(
2700                 "provider:segmentation_id"
2701             ):
2702 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
2703 0             elif net.get("segments"):
2704 0                 for segment in net.get("segments"):
2705 0                     if segment.get("provider:network_type") == "vlan" and segment.get(
2706                         "provider:segmentation_id"
2707                     ):
2708 0                         usedVlanIDs.append(segment.get("provider:segmentation_id"))
2709
2710 0         used_vlanIDs = set(usedVlanIDs)
2711
2712         # find unused VLAN ID
2713 0         for vlanID_range in self.config.get("multisegment_vlan_range"):
2714 0             try:
2715 0                 start_vlanid, end_vlanid = map(
2716                     int, vlanID_range.replace(" ", "").split("-")
2717                 )
2718
2719 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
2720 0                     if vlanID not in used_vlanIDs:
2721 0                         return vlanID
2722 0             except Exception as exp:
2723 0                 raise vimconn.VimConnException(
2724                     "Exception {} occurred while generating VLAN ID.".format(exp)
2725                 )
2726         else:
2727 0             raise vimconn.VimConnConflictException(
2728                 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
2729                     self.config.get("multisegment_vlan_range")
2730                 )
2731             )
2732
2733 1     def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
2734         """
2735         Method to validate user given vlanID ranges
2736             Args:  None
2737             Returns: None
2738         """
2739 0         for vlanID_range in input_vlan_range:
2740 0             vlan_range = vlanID_range.replace(" ", "")
2741             # validate format
2742 0             vlanID_pattern = r"(\d)*-(\d)*$"
2743 0             match_obj = re.match(vlanID_pattern, vlan_range)
2744 0             if not match_obj:
2745 0                 raise vimconn.VimConnConflictException(
2746                     "Invalid VLAN range for {}: {}.You must provide "
2747                     "'{}' in format [start_ID - end_ID].".format(
2748                         text_vlan_range, vlanID_range, text_vlan_range
2749                     )
2750                 )
2751
2752 0             start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
2753 0             if start_vlanid <= 0:
2754 0                 raise vimconn.VimConnConflictException(
2755                     "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
2756                     "networks valid IDs are 1 to 4094 ".format(
2757                         text_vlan_range, vlanID_range
2758                     )
2759                 )
2760
2761 0             if end_vlanid > 4094:
2762 0                 raise vimconn.VimConnConflictException(
2763                     "Invalid VLAN range for {}: {}. End VLAN ID can not be "
2764                     "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
2765                         text_vlan_range, vlanID_range
2766                     )
2767                 )
2768
2769 0             if start_vlanid > end_vlanid:
2770 0                 raise vimconn.VimConnConflictException(
2771                     "Invalid VLAN range for {}: {}. You must provide '{}'"
2772                     " in format start_ID - end_ID and start_ID < end_ID ".format(
2773                         text_vlan_range, vlanID_range, text_vlan_range
2774                     )
2775                 )
2776
2777     # NOT USED FUNCTIONS
2778
2779 1     def new_external_port(self, port_data):
2780         """Adds a external port to VIM
2781         Returns the port identifier"""
2782         # TODO openstack if needed
2783 0         return (
2784             -vimconn.HTTP_Internal_Server_Error,
2785             "osconnector.new_external_port() not implemented",
2786         )
2787
2788 1     def connect_port_network(self, port_id, network_id, admin=False):
2789         """Connects a external port to a network
2790         Returns status code of the VIM response"""
2791         # TODO openstack if needed
2792 0         return (
2793             -vimconn.HTTP_Internal_Server_Error,
2794             "osconnector.connect_port_network() not implemented",
2795         )
2796
2797 1     def new_user(self, user_name, user_passwd, tenant_id=None):
2798         """Adds a new user to openstack VIM
2799         Returns the user identifier"""
2800 0         self.logger.debug("osconnector: Adding a new user to VIM")
2801
2802 0         try:
2803 0             self._reload_connection()
2804 0             user = self.keystone.users.create(
2805                 user_name, password=user_passwd, default_project=tenant_id
2806             )
2807             # self.keystone.tenants.add_user(self.k_creds["username"], #role)
2808
2809 0             return user.id
2810 0         except ksExceptions.ConnectionError as e:
2811 0             error_value = -vimconn.HTTP_Bad_Request
2812 0             error_text = (
2813                 type(e).__name__
2814                 + ": "
2815                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2816             )
2817 0         except ksExceptions.ClientException as e:  # TODO remove
2818 0             error_value = -vimconn.HTTP_Bad_Request
2819 0             error_text = (
2820                 type(e).__name__
2821                 + ": "
2822                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2823             )
2824
2825         # TODO insert exception vimconn.HTTP_Unauthorized
2826         # if reaching here is because an exception
2827 0         self.logger.debug("new_user " + error_text)
2828
2829 0         return error_value, error_text
2830
2831 1     def delete_user(self, user_id):
2832         """Delete a user from openstack VIM
2833         Returns the user identifier"""
2834 0         if self.debug:
2835 0             print("osconnector: Deleting  a  user from VIM")
2836
2837 0         try:
2838 0             self._reload_connection()
2839 0             self.keystone.users.delete(user_id)
2840
2841 0             return 1, user_id
2842 0         except ksExceptions.ConnectionError as e:
2843 0             error_value = -vimconn.HTTP_Bad_Request
2844 0             error_text = (
2845                 type(e).__name__
2846                 + ": "
2847                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2848             )
2849 0         except ksExceptions.NotFound as e:
2850 0             error_value = -vimconn.HTTP_Not_Found
2851 0             error_text = (
2852                 type(e).__name__
2853                 + ": "
2854                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2855             )
2856 0         except ksExceptions.ClientException as e:  # TODO remove
2857 0             error_value = -vimconn.HTTP_Bad_Request
2858 0             error_text = (
2859                 type(e).__name__
2860                 + ": "
2861                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2862             )
2863
2864         # TODO insert exception vimconn.HTTP_Unauthorized
2865         # if reaching here is because an exception
2866 0         self.logger.debug("delete_tenant " + error_text)
2867
2868 0         return error_value, error_text
2869
2870 1     def get_hosts_info(self):
2871         """Get the information of deployed hosts
2872         Returns the hosts content"""
2873 0         if self.debug:
2874 0             print("osconnector: Getting Host info from VIM")
2875
2876 0         try:
2877 0             h_list = []
2878 0             self._reload_connection()
2879 0             hypervisors = self.nova.hypervisors.list()
2880
2881 0             for hype in hypervisors:
2882 0                 h_list.append(hype.to_dict())
2883
2884 0             return 1, {"hosts": h_list}
2885 0         except nvExceptions.NotFound as e:
2886 0             error_value = -vimconn.HTTP_Not_Found
2887 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2888 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2889 0             error_value = -vimconn.HTTP_Bad_Request
2890 0             error_text = (
2891                 type(e).__name__
2892                 + ": "
2893                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2894             )
2895
2896         # TODO insert exception vimconn.HTTP_Unauthorized
2897         # if reaching here is because an exception
2898 0         self.logger.debug("get_hosts_info " + error_text)
2899
2900 0         return error_value, error_text
2901
2902 1     def get_hosts(self, vim_tenant):
2903         """Get the hosts and deployed instances
2904         Returns the hosts content"""
2905 0         r, hype_dict = self.get_hosts_info()
2906
2907 0         if r < 0:
2908 0             return r, hype_dict
2909
2910 0         hypervisors = hype_dict["hosts"]
2911
2912 0         try:
2913 0             servers = self.nova.servers.list()
2914 0             for hype in hypervisors:
2915 0                 for server in servers:
2916 0                     if (
2917                         server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
2918                         == hype["hypervisor_hostname"]
2919                     ):
2920 0                         if "vm" in hype:
2921 0                             hype["vm"].append(server.id)
2922                         else:
2923 0                             hype["vm"] = [server.id]
2924
2925 0             return 1, hype_dict
2926 0         except nvExceptions.NotFound as e:
2927 0             error_value = -vimconn.HTTP_Not_Found
2928 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2929 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2930 0             error_value = -vimconn.HTTP_Bad_Request
2931 0             error_text = (
2932                 type(e).__name__
2933                 + ": "
2934                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2935             )
2936
2937         # TODO insert exception vimconn.HTTP_Unauthorized
2938         # if reaching here is because an exception
2939 0         self.logger.debug("get_hosts " + error_text)
2940
2941 0         return error_value, error_text
2942
2943 1     def new_classification(self, name, ctype, definition):
2944 1         self.logger.debug(
2945             "Adding a new (Traffic) Classification to VIM, named %s", name
2946         )
2947
2948 1         try:
2949 1             new_class = None
2950 1             self._reload_connection()
2951
2952 1             if ctype not in supportedClassificationTypes:
2953 1                 raise vimconn.VimConnNotSupportedException(
2954                     "OpenStack VIM connector does not support provided "
2955                     "Classification Type {}, supported ones are: {}".format(
2956                         ctype, supportedClassificationTypes
2957                     )
2958                 )
2959
2960 1             if not self._validate_classification(ctype, definition):
2961 0                 raise vimconn.VimConnException(
2962                     "Incorrect Classification definition for the type specified."
2963                 )
2964
2965 1             classification_dict = definition
2966 1             classification_dict["name"] = name
2967 1             new_class = self.neutron.create_sfc_flow_classifier(
2968                 {"flow_classifier": classification_dict}
2969             )
2970
2971 1             return new_class["flow_classifier"]["id"]
2972 1         except (
2973             neExceptions.ConnectionFailed,
2974             ksExceptions.ClientException,
2975             neExceptions.NeutronException,
2976             ConnectionError,
2977         ) as e:
2978 0             self.logger.error("Creation of Classification failed.")
2979 0             self._format_exception(e)
2980
2981 1     def get_classification(self, class_id):
2982 1         self.logger.debug(" Getting Classification %s from VIM", class_id)
2983 1         filter_dict = {"id": class_id}
2984 1         class_list = self.get_classification_list(filter_dict)
2985
2986 1         if len(class_list) == 0:
2987 1             raise vimconn.VimConnNotFoundException(
2988                 "Classification '{}' not found".format(class_id)
2989             )
2990 1         elif len(class_list) > 1:
2991 1             raise vimconn.VimConnConflictException(
2992                 "Found more than one Classification with this criteria"
2993             )
2994
2995 1         classification = class_list[0]
2996
2997 1         return classification
2998
2999 1     def get_classification_list(self, filter_dict={}):
3000 1         self.logger.debug(
3001             "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3002         )
3003
3004 1         try:
3005 1             filter_dict_os = filter_dict.copy()
3006 1             self._reload_connection()
3007
3008 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3009 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3010
3011 1             classification_dict = self.neutron.list_sfc_flow_classifiers(
3012                 **filter_dict_os
3013             )
3014 1             classification_list = classification_dict["flow_classifiers"]
3015 1             self.__classification_os2mano(classification_list)
3016
3017 1             return classification_list
3018 0         except (
3019             neExceptions.ConnectionFailed,
3020             ksExceptions.ClientException,
3021             neExceptions.NeutronException,
3022             ConnectionError,
3023         ) as e:
3024 0             self._format_exception(e)
3025
3026 1     def delete_classification(self, class_id):
3027 1         self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3028
3029 1         try:
3030 1             self._reload_connection()
3031 1             self.neutron.delete_sfc_flow_classifier(class_id)
3032
3033 1             return class_id
3034 0         except (
3035             neExceptions.ConnectionFailed,
3036             neExceptions.NeutronException,
3037             ksExceptions.ClientException,
3038             neExceptions.NeutronException,
3039             ConnectionError,
3040         ) as e:
3041 0             self._format_exception(e)
3042
3043 1     def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3044 1         self.logger.debug(
3045             "Adding a new Service Function Instance to VIM, named '%s'", name
3046         )
3047
3048 1         try:
3049 1             new_sfi = None
3050 1             self._reload_connection()
3051 1             correlation = None
3052
3053 1             if sfc_encap:
3054 1                 correlation = "nsh"
3055
3056 1             if len(ingress_ports) != 1:
3057 1                 raise vimconn.VimConnNotSupportedException(
3058                     "OpenStack VIM connector can only have 1 ingress port per SFI"
3059                 )
3060
3061 1             if len(egress_ports) != 1:
3062 1                 raise vimconn.VimConnNotSupportedException(
3063                     "OpenStack VIM connector can only have 1 egress port per SFI"
3064                 )
3065
3066 1             sfi_dict = {
3067                 "name": name,
3068                 "ingress": ingress_ports[0],
3069                 "egress": egress_ports[0],
3070                 "service_function_parameters": {"correlation": correlation},
3071             }
3072 1             new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3073
3074 1             return new_sfi["port_pair"]["id"]
3075 1         except (
3076             neExceptions.ConnectionFailed,
3077             ksExceptions.ClientException,
3078             neExceptions.NeutronException,
3079             ConnectionError,
3080         ) as e:
3081 0             if new_sfi:
3082 0                 try:
3083 0                     self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3084 0                 except Exception:
3085 0                     self.logger.error(
3086                         "Creation of Service Function Instance failed, with "
3087                         "subsequent deletion failure as well."
3088                     )
3089
3090 0             self._format_exception(e)
3091
3092 1     def get_sfi(self, sfi_id):
3093 1         self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3094 1         filter_dict = {"id": sfi_id}
3095 1         sfi_list = self.get_sfi_list(filter_dict)
3096
3097 1         if len(sfi_list) == 0:
3098 1             raise vimconn.VimConnNotFoundException(
3099                 "Service Function Instance '{}' not found".format(sfi_id)
3100             )
3101 1         elif len(sfi_list) > 1:
3102 1             raise vimconn.VimConnConflictException(
3103                 "Found more than one Service Function Instance with this criteria"
3104             )
3105
3106 1         sfi = sfi_list[0]
3107
3108 1         return sfi
3109
3110 1     def get_sfi_list(self, filter_dict={}):
3111 1         self.logger.debug(
3112             "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3113         )
3114
3115 1         try:
3116 1             self._reload_connection()
3117 1             filter_dict_os = filter_dict.copy()
3118
3119 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3120 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3121
3122 1             sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3123 1             sfi_list = sfi_dict["port_pairs"]
3124 1             self.__sfi_os2mano(sfi_list)
3125
3126 1             return sfi_list
3127 0         except (
3128             neExceptions.ConnectionFailed,
3129             ksExceptions.ClientException,
3130             neExceptions.NeutronException,
3131             ConnectionError,
3132         ) as e:
3133 0             self._format_exception(e)
3134
3135 1     def delete_sfi(self, sfi_id):
3136 1         self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3137
3138 1         try:
3139 1             self._reload_connection()
3140 1             self.neutron.delete_sfc_port_pair(sfi_id)
3141
3142 1             return sfi_id
3143 0         except (
3144             neExceptions.ConnectionFailed,
3145             neExceptions.NeutronException,
3146             ksExceptions.ClientException,
3147             neExceptions.NeutronException,
3148             ConnectionError,
3149         ) as e:
3150 0             self._format_exception(e)
3151
3152 1     def new_sf(self, name, sfis, sfc_encap=True):
3153 1         self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3154
3155 1         try:
3156 1             new_sf = None
3157 1             self._reload_connection()
3158             # correlation = None
3159             # if sfc_encap:
3160             #     correlation = "nsh"
3161
3162 1             for instance in sfis:
3163 1                 sfi = self.get_sfi(instance)
3164
3165 1                 if sfi.get("sfc_encap") != sfc_encap:
3166 1                     raise vimconn.VimConnNotSupportedException(
3167                         "OpenStack VIM connector requires all SFIs of the "
3168                         "same SF to share the same SFC Encapsulation"
3169                     )
3170
3171 1             sf_dict = {"name": name, "port_pairs": sfis}
3172 1             new_sf = self.neutron.create_sfc_port_pair_group(
3173                 {"port_pair_group": sf_dict}
3174             )
3175
3176 1             return new_sf["port_pair_group"]["id"]
3177 1         except (
3178             neExceptions.ConnectionFailed,
3179             ksExceptions.ClientException,
3180             neExceptions.NeutronException,
3181             ConnectionError,
3182         ) as e:
3183 0             if new_sf:
3184 0                 try:
3185 0                     self.neutron.delete_sfc_port_pair_group(
3186                         new_sf["port_pair_group"]["id"]
3187                     )
3188 0                 except Exception:
3189 0                     self.logger.error(
3190                         "Creation of Service Function failed, with "
3191                         "subsequent deletion failure as well."
3192                     )
3193
3194 0             self._format_exception(e)
3195
3196 1     def get_sf(self, sf_id):
3197 1         self.logger.debug("Getting Service Function %s from VIM", sf_id)
3198 1         filter_dict = {"id": sf_id}
3199 1         sf_list = self.get_sf_list(filter_dict)
3200
3201 1         if len(sf_list) == 0:
3202 1             raise vimconn.VimConnNotFoundException(
3203                 "Service Function '{}' not found".format(sf_id)
3204             )
3205 1         elif len(sf_list) > 1:
3206 1             raise vimconn.VimConnConflictException(
3207                 "Found more than one Service Function with this criteria"
3208             )
3209
3210 1         sf = sf_list[0]
3211
3212 1         return sf
3213
3214 1     def get_sf_list(self, filter_dict={}):
3215 1         self.logger.debug(
3216             "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3217         )
3218
3219 1         try:
3220 1             self._reload_connection()
3221 1             filter_dict_os = filter_dict.copy()
3222
3223 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3224 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3225
3226 1             sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3227 1             sf_list = sf_dict["port_pair_groups"]
3228 1             self.__sf_os2mano(sf_list)
3229
3230 1             return sf_list
3231 0         except (
3232             neExceptions.ConnectionFailed,
3233             ksExceptions.ClientException,
3234             neExceptions.NeutronException,
3235             ConnectionError,
3236         ) as e:
3237 0             self._format_exception(e)
3238
3239 1     def delete_sf(self, sf_id):
3240 1         self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3241
3242 1         try:
3243 1             self._reload_connection()
3244 1             self.neutron.delete_sfc_port_pair_group(sf_id)
3245
3246 1             return sf_id
3247 0         except (
3248             neExceptions.ConnectionFailed,
3249             neExceptions.NeutronException,
3250             ksExceptions.ClientException,
3251             neExceptions.NeutronException,
3252             ConnectionError,
3253         ) as e:
3254 0             self._format_exception(e)
3255
3256 1     def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3257 1         self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3258
3259 1         try:
3260 1             new_sfp = None
3261 1             self._reload_connection()
3262             # In networking-sfc the MPLS encapsulation is legacy
3263             # should be used when no full SFC Encapsulation is intended
3264 1             correlation = "mpls"
3265
3266 1             if sfc_encap:
3267 1                 correlation = "nsh"
3268
3269 1             sfp_dict = {
3270                 "name": name,
3271                 "flow_classifiers": classifications,
3272                 "port_pair_groups": sfs,
3273                 "chain_parameters": {"correlation": correlation},
3274             }
3275
3276 1             if spi:
3277 1                 sfp_dict["chain_id"] = spi
3278
3279 1             new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3280
3281 1             return new_sfp["port_chain"]["id"]
3282 0         except (
3283             neExceptions.ConnectionFailed,
3284             ksExceptions.ClientException,
3285             neExceptions.NeutronException,
3286             ConnectionError,
3287         ) as e:
3288 0             if new_sfp:
3289 0                 try:
3290 0                     self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3291 0                 except Exception:
3292 0                     self.logger.error(
3293                         "Creation of Service Function Path failed, with "
3294                         "subsequent deletion failure as well."
3295                     )
3296
3297 0             self._format_exception(e)
3298
3299 1     def get_sfp(self, sfp_id):
3300 1         self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3301
3302 1         filter_dict = {"id": sfp_id}
3303 1         sfp_list = self.get_sfp_list(filter_dict)
3304
3305 1         if len(sfp_list) == 0:
3306 1             raise vimconn.VimConnNotFoundException(
3307                 "Service Function Path '{}' not found".format(sfp_id)
3308             )
3309 1         elif len(sfp_list) > 1:
3310 1             raise vimconn.VimConnConflictException(
3311                 "Found more than one Service Function Path with this criteria"
3312             )
3313
3314 1         sfp = sfp_list[0]
3315
3316 1         return sfp
3317
3318 1     def get_sfp_list(self, filter_dict={}):
3319 1         self.logger.debug(
3320             "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3321         )
3322
3323 1         try:
3324 1             self._reload_connection()
3325 1             filter_dict_os = filter_dict.copy()
3326
3327 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3328 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3329
3330 1             sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3331 1             sfp_list = sfp_dict["port_chains"]
3332 1             self.__sfp_os2mano(sfp_list)
3333
3334 1             return sfp_list
3335 0         except (
3336             neExceptions.ConnectionFailed,
3337             ksExceptions.ClientException,
3338             neExceptions.NeutronException,
3339             ConnectionError,
3340         ) as e:
3341 0             self._format_exception(e)
3342
3343 1     def delete_sfp(self, sfp_id):
3344 1         self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3345
3346 1         try:
3347 1             self._reload_connection()
3348 1             self.neutron.delete_sfc_port_chain(sfp_id)
3349
3350 1             return sfp_id
3351 0         except (
3352             neExceptions.ConnectionFailed,
3353             neExceptions.NeutronException,
3354             ksExceptions.ClientException,
3355             neExceptions.NeutronException,
3356             ConnectionError,
3357         ) as e:
3358 0             self._format_exception(e)
3359
3360 1     def refresh_sfps_status(self, sfp_list):
3361         """Get the status of the service function path
3362         Params: the list of sfp identifiers
3363         Returns a dictionary with:
3364             vm_id:          #VIM id of this service function path
3365                 status:     #Mandatory. Text with one of:
3366                             #  DELETED (not found at vim)
3367                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3368                             #  OTHER (Vim reported other status not understood)
3369                             #  ERROR (VIM indicates an ERROR status)
3370                             #  ACTIVE,
3371                             #  CREATING (on building process)
3372                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3373                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
3374         """
3375 0         sfp_dict = {}
3376 0         self.logger.debug(
3377             "refresh_sfps status: Getting tenant SFP information from VIM"
3378         )
3379
3380 0         for sfp_id in sfp_list:
3381 0             sfp = {}
3382
3383 0             try:
3384 0                 sfp_vim = self.get_sfp(sfp_id)
3385
3386 0                 if sfp_vim["spi"]:
3387 0                     sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3388                 else:
3389 0                     sfp["status"] = "OTHER"
3390 0                     sfp["error_msg"] = "VIM status reported " + sfp["status"]
3391
3392 0                 sfp["vim_info"] = self.serialize(sfp_vim)
3393
3394 0                 if sfp_vim.get("fault"):
3395 0                     sfp["error_msg"] = str(sfp_vim["fault"])
3396 0             except vimconn.VimConnNotFoundException as e:
3397 0                 self.logger.error("Exception getting sfp status: %s", str(e))
3398 0                 sfp["status"] = "DELETED"
3399 0                 sfp["error_msg"] = str(e)
3400 0             except vimconn.VimConnException as e:
3401 0                 self.logger.error("Exception getting sfp status: %s", str(e))
3402 0                 sfp["status"] = "VIM_ERROR"
3403 0                 sfp["error_msg"] = str(e)
3404
3405 0             sfp_dict[sfp_id] = sfp
3406
3407 0         return sfp_dict
3408
3409 1     def refresh_sfis_status(self, sfi_list):
3410         """Get the status of the service function instances
3411         Params: the list of sfi identifiers
3412         Returns a dictionary with:
3413             vm_id:          #VIM id of this service function instance
3414                 status:     #Mandatory. Text with one of:
3415                             #  DELETED (not found at vim)
3416                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3417                             #  OTHER (Vim reported other status not understood)
3418                             #  ERROR (VIM indicates an ERROR status)
3419                             #  ACTIVE,
3420                             #  CREATING (on building process)
3421                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3422                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
3423         """
3424 0         sfi_dict = {}
3425 0         self.logger.debug(
3426             "refresh_sfis status: Getting tenant sfi information from VIM"
3427         )
3428
3429 0         for sfi_id in sfi_list:
3430 0             sfi = {}
3431
3432 0             try:
3433 0                 sfi_vim = self.get_sfi(sfi_id)
3434
3435 0                 if sfi_vim:
3436 0                     sfi["status"] = vmStatus2manoFormat["ACTIVE"]
3437                 else:
3438 0                     sfi["status"] = "OTHER"
3439 0                     sfi["error_msg"] = "VIM status reported " + sfi["status"]
3440
3441 0                 sfi["vim_info"] = self.serialize(sfi_vim)
3442
3443 0                 if sfi_vim.get("fault"):
3444 0                     sfi["error_msg"] = str(sfi_vim["fault"])
3445 0             except vimconn.VimConnNotFoundException as e:
3446 0                 self.logger.error("Exception getting sfi status: %s", str(e))
3447 0                 sfi["status"] = "DELETED"
3448 0                 sfi["error_msg"] = str(e)
3449 0             except vimconn.VimConnException as e:
3450 0                 self.logger.error("Exception getting sfi status: %s", str(e))
3451 0                 sfi["status"] = "VIM_ERROR"
3452 0                 sfi["error_msg"] = str(e)
3453
3454 0             sfi_dict[sfi_id] = sfi
3455
3456 0         return sfi_dict
3457
3458 1     def refresh_sfs_status(self, sf_list):
3459         """Get the status of the service functions
3460         Params: the list of sf identifiers
3461         Returns a dictionary with:
3462             vm_id:          #VIM id of this service function
3463                 status:     #Mandatory. Text with one of:
3464                             #  DELETED (not found at vim)
3465                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3466                             #  OTHER (Vim reported other status not understood)
3467                             #  ERROR (VIM indicates an ERROR status)
3468                             #  ACTIVE,
3469                             #  CREATING (on building process)
3470                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3471                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
3472         """
3473 0         sf_dict = {}
3474 0         self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
3475
3476 0         for sf_id in sf_list:
3477 0             sf = {}
3478
3479 0             try:
3480 0                 sf_vim = self.get_sf(sf_id)
3481
3482 0                 if sf_vim:
3483 0                     sf["status"] = vmStatus2manoFormat["ACTIVE"]
3484                 else:
3485 0                     sf["status"] = "OTHER"
3486 0                     sf["error_msg"] = "VIM status reported " + sf_vim["status"]
3487
3488 0                 sf["vim_info"] = self.serialize(sf_vim)
3489
3490 0                 if sf_vim.get("fault"):
3491 0                     sf["error_msg"] = str(sf_vim["fault"])
3492 0             except vimconn.VimConnNotFoundException as e:
3493 0                 self.logger.error("Exception getting sf status: %s", str(e))
3494 0                 sf["status"] = "DELETED"
3495 0                 sf["error_msg"] = str(e)
3496 0             except vimconn.VimConnException as e:
3497 0                 self.logger.error("Exception getting sf status: %s", str(e))
3498 0                 sf["status"] = "VIM_ERROR"
3499 0                 sf["error_msg"] = str(e)
3500
3501 0             sf_dict[sf_id] = sf
3502
3503 0         return sf_dict
3504
3505 1     def refresh_classifications_status(self, classification_list):
3506         """Get the status of the classifications
3507         Params: the list of classification identifiers
3508         Returns a dictionary with:
3509             vm_id:          #VIM id of this classifier
3510                 status:     #Mandatory. Text with one of:
3511                             #  DELETED (not found at vim)
3512                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3513                             #  OTHER (Vim reported other status not understood)
3514                             #  ERROR (VIM indicates an ERROR status)
3515                             #  ACTIVE,
3516                             #  CREATING (on building process)
3517                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3518                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
3519         """
3520 0         classification_dict = {}
3521 0         self.logger.debug(
3522             "refresh_classifications status: Getting tenant classification information from VIM"
3523         )
3524
3525 0         for classification_id in classification_list:
3526 0             classification = {}
3527
3528 0             try:
3529 0                 classification_vim = self.get_classification(classification_id)
3530
3531 0                 if classification_vim:
3532 0                     classification["status"] = vmStatus2manoFormat["ACTIVE"]
3533                 else:
3534 0                     classification["status"] = "OTHER"
3535 0                     classification["error_msg"] = (
3536                         "VIM status reported " + classification["status"]
3537                     )
3538
3539 0                 classification["vim_info"] = self.serialize(classification_vim)
3540
3541 0                 if classification_vim.get("fault"):
3542 0                     classification["error_msg"] = str(classification_vim["fault"])
3543 0             except vimconn.VimConnNotFoundException as e:
3544 0                 self.logger.error("Exception getting classification status: %s", str(e))
3545 0                 classification["status"] = "DELETED"
3546 0                 classification["error_msg"] = str(e)
3547 0             except vimconn.VimConnException as e:
3548 0                 self.logger.error("Exception getting classification status: %s", str(e))
3549 0                 classification["status"] = "VIM_ERROR"
3550 0                 classification["error_msg"] = str(e)
3551
3552 0             classification_dict[classification_id] = classification
3553
3554 0         return classification_dict
3555
3556 1     def new_affinity_group(self, affinity_group_data):
3557         """Adds a server group to VIM
3558             affinity_group_data contains a dictionary with information, keys:
3559                 name: name in VIM for the server group
3560                 type: affinity or anti-affinity
3561                 scope: Only nfvi-node allowed
3562         Returns the server group identifier"""
3563 0         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3564
3565 0         try:
3566 0             name = affinity_group_data["name"]
3567 0             policy = affinity_group_data["type"]
3568
3569 0             self._reload_connection()
3570 0             new_server_group = self.nova.server_groups.create(name, policy)
3571
3572 0             return new_server_group.id
3573 0         except (
3574             ksExceptions.ClientException,
3575             nvExceptions.ClientException,
3576             ConnectionError,
3577             KeyError,
3578         ) as e:
3579 0             self._format_exception(e)
3580
3581 1     def get_affinity_group(self, affinity_group_id):
3582         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3583 0         self.logger.debug("Getting flavor '%s'", affinity_group_id)
3584 0         try:
3585 0             self._reload_connection()
3586 0             server_group = self.nova.server_groups.find(id=affinity_group_id)
3587
3588 0             return server_group.to_dict()
3589 0         except (
3590             nvExceptions.NotFound,
3591             nvExceptions.ClientException,
3592             ksExceptions.ClientException,
3593             ConnectionError,
3594         ) as e:
3595 0             self._format_exception(e)
3596
3597 1     def delete_affinity_group(self, affinity_group_id):
3598         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3599 0         self.logger.debug("Getting server group '%s'", affinity_group_id)
3600 0         try:
3601 0             self._reload_connection()
3602 0             self.nova.server_groups.delete(affinity_group_id)
3603
3604 0             return affinity_group_id
3605 0         except (
3606             nvExceptions.NotFound,
3607             ksExceptions.ClientException,
3608             nvExceptions.ClientException,
3609             ConnectionError,
3610         ) as e:
3611 0             self._format_exception(e)
3612
3613 1     def get_vdu_state(self, vm_id):
3614         """
3615         Getting the state of a vdu
3616         param:
3617             vm_id: ID of an instance
3618         """
3619 0         self.logger.debug("Getting the status of VM")
3620 0         self.logger.debug("VIM VM ID %s", vm_id)
3621 0         self._reload_connection()
3622 0         server = self.nova.servers.find(id=vm_id)
3623 0         server_dict = server.to_dict()
3624 0         vdu_data = [
3625             server_dict["status"],
3626             server_dict["flavor"]["id"],
3627             server_dict["OS-EXT-SRV-ATTR:host"],
3628             server_dict["OS-EXT-AZ:availability_zone"],
3629         ]
3630 0         self.logger.debug("vdu_data %s", vdu_data)
3631 0         return vdu_data
3632
3633 1     def check_compute_availability(self, host, server_flavor_details):
3634 0         self._reload_connection()
3635 0         hypervisor_search = self.nova.hypervisors.search(
3636             hypervisor_match=host, servers=True
3637         )
3638 0         for hypervisor in hypervisor_search:
3639 0             hypervisor_id = hypervisor.to_dict()["id"]
3640 0             hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3641 0             hypervisor_dict = hypervisor_details.to_dict()
3642 0             hypervisor_temp = json.dumps(hypervisor_dict)
3643 0             hypervisor_json = json.loads(hypervisor_temp)
3644 0             resources_available = [
3645                 hypervisor_json["free_ram_mb"],
3646                 hypervisor_json["disk_available_least"],
3647                 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3648             ]
3649 0             compute_available = all(
3650                 x > y for x, y in zip(resources_available, server_flavor_details)
3651             )
3652 0             if compute_available:
3653 0                 return host
3654
3655 1     def check_availability_zone(
3656         self, old_az, server_flavor_details, old_host, host=None
3657     ):
3658 0         self._reload_connection()
3659 0         az_check = {"zone_check": False, "compute_availability": None}
3660 0         aggregates_list = self.nova.aggregates.list()
3661 0         for aggregate in aggregates_list:
3662 0             aggregate_details = aggregate.to_dict()
3663 0             aggregate_temp = json.dumps(aggregate_details)
3664 0             aggregate_json = json.loads(aggregate_temp)
3665 0             if aggregate_json["availability_zone"] == old_az:
3666 0                 hosts_list = aggregate_json["hosts"]
3667 0                 if host is not None:
3668 0                     if host in hosts_list:
3669 0                         az_check["zone_check"] = True
3670 0                         available_compute_id = self.check_compute_availability(
3671                             host, server_flavor_details
3672                         )
3673 0                         if available_compute_id is not None:
3674 0                             az_check["compute_availability"] = available_compute_id
3675                 else:
3676 0                     for check_host in hosts_list:
3677 0                         if check_host != old_host:
3678 0                             available_compute_id = self.check_compute_availability(
3679                                 check_host, server_flavor_details
3680                             )
3681 0                             if available_compute_id is not None:
3682 0                                 az_check["zone_check"] = True
3683 0                                 az_check["compute_availability"] = available_compute_id
3684 0                                 break
3685                     else:
3686 0                         az_check["zone_check"] = True
3687 0         return az_check
3688
3689 1     def migrate_instance(self, vm_id, compute_host=None):
3690         """
3691         Migrate a vdu
3692         param:
3693             vm_id: ID of an instance
3694             compute_host: Host to migrate the vdu to
3695         """
3696 0         self._reload_connection()
3697 0         vm_state = False
3698 0         instance_state = self.get_vdu_state(vm_id)
3699 0         server_flavor_id = instance_state[1]
3700 0         server_hypervisor_name = instance_state[2]
3701 0         server_availability_zone = instance_state[3]
3702 0         try:
3703 0             server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3704 0             server_flavor_details = [
3705                 server_flavor["ram"],
3706                 server_flavor["disk"],
3707                 server_flavor["vcpus"],
3708             ]
3709 0             if compute_host == server_hypervisor_name:
3710 0                 raise vimconn.VimConnException(
3711                     "Unable to migrate instance '{}' to the same host '{}'".format(
3712                         vm_id, compute_host
3713                     ),
3714                     http_code=vimconn.HTTP_Bad_Request,
3715                 )
3716 0             az_status = self.check_availability_zone(
3717                 server_availability_zone,
3718                 server_flavor_details,
3719                 server_hypervisor_name,
3720                 compute_host,
3721             )
3722 0             availability_zone_check = az_status["zone_check"]
3723 0             available_compute_id = az_status.get("compute_availability")
3724
3725 0             if availability_zone_check is False:
3726 0                 raise vimconn.VimConnException(
3727                     "Unable to migrate instance '{}' to a different availability zone".format(
3728                         vm_id
3729                     ),
3730                     http_code=vimconn.HTTP_Bad_Request,
3731                 )
3732 0             if available_compute_id is not None:
3733 0                 self.nova.servers.live_migrate(
3734                     server=vm_id,
3735                     host=available_compute_id,
3736                     block_migration=True,
3737                     disk_over_commit=False,
3738                 )
3739 0                 state = "MIGRATING"
3740 0                 changed_compute_host = ""
3741 0                 if state == "MIGRATING":
3742 0                     vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3743 0                     changed_compute_host = self.get_vdu_state(vm_id)[2]
3744 0                 if vm_state and changed_compute_host == available_compute_id:
3745 0                     self.logger.debug(
3746                         "Instance '{}' migrated to the new compute host '{}'".format(
3747                             vm_id, changed_compute_host
3748                         )
3749                     )
3750 0                     return state, available_compute_id
3751                 else:
3752 0                     raise vimconn.VimConnException(
3753                         "Migration Failed. Instance '{}' not moved to the new host {}".format(
3754                             vm_id, available_compute_id
3755                         ),
3756                         http_code=vimconn.HTTP_Bad_Request,
3757                     )
3758             else:
3759 0                 raise vimconn.VimConnException(
3760                     "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3761                         available_compute_id
3762                     ),
3763                     http_code=vimconn.HTTP_Bad_Request,
3764                 )
3765 0         except (
3766             nvExceptions.BadRequest,
3767             nvExceptions.ClientException,
3768             nvExceptions.NotFound,
3769         ) as e:
3770 0             self._format_exception(e)
3771
3772 1     def resize_instance(self, vm_id, new_flavor_id):
3773         """
3774         For resizing the vm based on the given
3775         flavor details
3776         param:
3777             vm_id : ID of an instance
3778             new_flavor_id : Flavor id to be resized
3779         Return the status of a resized instance
3780         """
3781 0         self._reload_connection()
3782 0         self.logger.debug("resize the flavor of an instance")
3783 0         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3784 0         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3785 0         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3786 0         try:
3787 0             if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3788 0                 if old_flavor_disk > new_flavor_disk:
3789 0                     raise nvExceptions.BadRequest(
3790                         400,
3791                         message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3792                     )
3793                 else:
3794 0                     self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3795 0                     vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3796 0                     if vm_state:
3797 0                         instance_resized_status = self.confirm_resize(vm_id)
3798 0                         return instance_resized_status
3799                     else:
3800 0                         raise nvExceptions.BadRequest(
3801                             409,
3802                             message="Cannot 'resize' vm_state is in ERROR",
3803                         )
3804
3805             else:
3806 0                 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3807 0                 raise nvExceptions.BadRequest(
3808                     409,
3809                     message="Cannot 'resize' instance while it is in vm_state resized",
3810                 )
3811 0         except (
3812             nvExceptions.BadRequest,
3813             nvExceptions.ClientException,
3814             nvExceptions.NotFound,
3815         ) as e:
3816 0             self._format_exception(e)
3817
3818 1     def confirm_resize(self, vm_id):
3819         """
3820         Confirm the resize of an instance
3821         param:
3822             vm_id: ID of an instance
3823         """
3824 0         self._reload_connection()
3825 0         self.nova.servers.confirm_resize(server=vm_id)
3826 0         if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3827 0             self.__wait_for_vm(vm_id, "ACTIVE")
3828 0         instance_status = self.get_vdu_state(vm_id)[0]
3829 0         return instance_status