Code Coverage

Cobertura Coverage Report > RO-VIM-openstack.osm_rovim_openstack >

vimconn_openstack.py

Trend

File Coverage summary

NameClassesLinesConditionals
vimconn_openstack.py
100%
1/1
33%
601/1843
100%
0/0

Coverage Breakdown by Class

NameLinesConditionals
vimconn_openstack.py
33%
601/1843
N/A

Source

RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 #         http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 1 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 1 import copy
34 1 from http.client import HTTPException
35 1 import json
36 1 import logging
37 1 from pprint import pformat
38 1 import random
39 1 import re
40 1 import time
41 1 from typing import Dict, List, Optional, Tuple
42
43 1 from cinderclient import client as cClient
44 1 from glanceclient import client as glClient
45 1 import glanceclient.exc as gl1Exceptions
46 1 from keystoneauth1 import session
47 1 from keystoneauth1.identity import v2, v3
48 1 import keystoneclient.exceptions as ksExceptions
49 1 import keystoneclient.v2_0.client as ksClient_v2
50 1 import keystoneclient.v3.client as ksClient_v3
51 1 import netaddr
52 1 from neutronclient.common import exceptions as neExceptions
53 1 from neutronclient.neutron import client as neClient
54 1 from novaclient import client as nClient, exceptions as nvExceptions
55 1 from osm_ro_plugin import vimconn
56 1 from requests.exceptions import ConnectionError
57 1 import yaml
58
59 1 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 1 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 1 vmStatus2manoFormat = {
64     "ACTIVE": "ACTIVE",
65     "PAUSED": "PAUSED",
66     "SUSPENDED": "SUSPENDED",
67     "SHUTOFF": "INACTIVE",
68     "BUILD": "BUILD",
69     "ERROR": "ERROR",
70     "DELETED": "DELETED",
71 }
72 1 netStatus2manoFormat = {
73     "ACTIVE": "ACTIVE",
74     "PAUSED": "PAUSED",
75     "INACTIVE": "INACTIVE",
76     "BUILD": "BUILD",
77     "ERROR": "ERROR",
78     "DELETED": "DELETED",
79 }
80
81 1 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 1 volume_timeout = 1800
85 1 server_timeout = 1800
86
87
88 1 class SafeDumper(yaml.SafeDumper):
89 1     def represent_data(self, data):
90         # Openstack APIs use custom subclasses of dict and YAML safe dumper
91         # is designed to not handle that (reference issue 142 of pyyaml)
92 0         if isinstance(data, dict) and data.__class__ != dict:
93             # A simple solution is to convert those items back to dicts
94 0             data = dict(data.items())
95
96 0         return super(SafeDumper, self).represent_data(data)
97
98
99 1 class vimconnector(vimconn.VimConnector):
100 1     def __init__(
101         self,
102         uuid,
103         name,
104         tenant_id,
105         tenant_name,
106         url,
107         url_admin=None,
108         user=None,
109         passwd=None,
110         log_level=None,
111         config={},
112         persistent_info={},
113     ):
114         """using common constructor parameters. In this case
115         'url' is the keystone authorization url,
116         'url_admin' is not use
117         """
118 1         api_version = config.get("APIversion")
119
120 1         if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 0             raise vimconn.VimConnException(
122                 "Invalid value '{}' for config:APIversion. "
123                 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124             )
125
126 1         vim_type = config.get("vim_type")
127
128 1         if vim_type and vim_type not in ("vio", "VIO"):
129 0             raise vimconn.VimConnException(
130                 "Invalid value '{}' for config:vim_type."
131                 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132             )
133
134 1         if config.get("dataplane_net_vlan_range") is not None:
135             # validate vlan ranges provided by user
136 0             self._validate_vlan_ranges(
137                 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138             )
139
140 1         if config.get("multisegment_vlan_range") is not None:
141             # validate vlan ranges provided by user
142 0             self._validate_vlan_ranges(
143                 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144             )
145
146 1         vimconn.VimConnector.__init__(
147             self,
148             uuid,
149             name,
150             tenant_id,
151             tenant_name,
152             url,
153             url_admin,
154             user,
155             passwd,
156             log_level,
157             config,
158         )
159
160 1         if self.config.get("insecure") and self.config.get("ca_cert"):
161 0             raise vimconn.VimConnException(
162                 "options insecure and ca_cert are mutually exclusive"
163             )
164
165 1         self.verify = True
166
167 1         if self.config.get("insecure"):
168 0             self.verify = False
169
170 1         if self.config.get("ca_cert"):
171 0             self.verify = self.config.get("ca_cert")
172
173 1         if not url:
174 0             raise TypeError("url param can not be NoneType")
175
176 1         self.persistent_info = persistent_info
177 1         self.availability_zone = persistent_info.get("availability_zone", None)
178 1         self.session = persistent_info.get("session", {"reload_client": True})
179 1         self.my_tenant_id = self.session.get("my_tenant_id")
180 1         self.nova = self.session.get("nova")
181 1         self.neutron = self.session.get("neutron")
182 1         self.cinder = self.session.get("cinder")
183 1         self.glance = self.session.get("glance")
184         # self.glancev1 = self.session.get("glancev1")
185 1         self.keystone = self.session.get("keystone")
186 1         self.api_version3 = self.session.get("api_version3")
187 1         self.vim_type = self.config.get("vim_type")
188
189 1         if self.vim_type:
190 0             self.vim_type = self.vim_type.upper()
191
192 1         if self.config.get("use_internal_endpoint"):
193 0             self.endpoint_type = "internalURL"
194         else:
195 1             self.endpoint_type = None
196
197 1         logging.getLogger("urllib3").setLevel(logging.WARNING)
198 1         logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 1         logging.getLogger("novaclient").setLevel(logging.WARNING)
200 1         self.logger = logging.getLogger("ro.vim.openstack")
201
202         # allow security_groups to be a list or a single string
203 1         if isinstance(self.config.get("security_groups"), str):
204 0             self.config["security_groups"] = [self.config["security_groups"]]
205
206 1         self.security_groups_id = None
207
208         # ###### VIO Specific Changes #########
209 1         if self.vim_type == "VIO":
210 0             self.logger = logging.getLogger("ro.vim.vio")
211
212 1         if log_level:
213 0             self.logger.setLevel(getattr(logging, log_level))
214
215 1     def __getitem__(self, index):
216         """Get individuals parameters.
217         Throw KeyError"""
218 0         if index == "project_domain_id":
219 0             return self.config.get("project_domain_id")
220 0         elif index == "user_domain_id":
221 0             return self.config.get("user_domain_id")
222         else:
223 0             return vimconn.VimConnector.__getitem__(self, index)
224
225 1     def __setitem__(self, index, value):
226         """Set individuals parameters and it is marked as dirty so to force connection reload.
227         Throw KeyError"""
228 0         if index == "project_domain_id":
229 0             self.config["project_domain_id"] = value
230 0         elif index == "user_domain_id":
231 0             self.config["user_domain_id"] = value
232         else:
233 0             vimconn.VimConnector.__setitem__(self, index, value)
234
235 0         self.session["reload_client"] = True
236
237 1     def serialize(self, value):
238         """Serialization of python basic types.
239
240         In the case value is not serializable a message will be logged and a
241         simple representation of the data that cannot be converted back to
242         python is returned.
243         """
244 0         if isinstance(value, str):
245 0             return value
246
247 0         try:
248 0             return yaml.dump(
249                 value, Dumper=SafeDumper, default_flow_style=True, width=256
250             )
251 0         except yaml.representer.RepresenterError:
252 0             self.logger.debug(
253                 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254                 pformat(value),
255                 exc_info=True,
256             )
257
258 0             return str(value)
259
260 1     def _reload_connection(self):
261         """Called before any operation, it check if credentials has changed
262         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263         """
264         # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 0         if self.session["reload_client"]:
266 0             if self.config.get("APIversion"):
267 0                 self.api_version3 = (
268                     self.config["APIversion"] == "v3.3"
269                     or self.config["APIversion"] == "3"
270                 )
271             else:  # get from ending auth_url that end with v3 or with v2.0
272 0                 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273                     "/v3/"
274                 )
275
276 0             self.session["api_version3"] = self.api_version3
277
278 0             if self.api_version3:
279 0                 if self.config.get("project_domain_id") or self.config.get(
280                     "project_domain_name"
281                 ):
282 0                     project_domain_id_default = None
283                 else:
284 0                     project_domain_id_default = "default"
285
286 0                 if self.config.get("user_domain_id") or self.config.get(
287                     "user_domain_name"
288                 ):
289 0                     user_domain_id_default = None
290                 else:
291 0                     user_domain_id_default = "default"
292 0                 auth = v3.Password(
293                     auth_url=self.url,
294                     username=self.user,
295                     password=self.passwd,
296                     project_name=self.tenant_name,
297                     project_id=self.tenant_id,
298                     project_domain_id=self.config.get(
299                         "project_domain_id", project_domain_id_default
300                     ),
301                     user_domain_id=self.config.get(
302                         "user_domain_id", user_domain_id_default
303                     ),
304                     project_domain_name=self.config.get("project_domain_name"),
305                     user_domain_name=self.config.get("user_domain_name"),
306                 )
307             else:
308 0                 auth = v2.Password(
309                     auth_url=self.url,
310                     username=self.user,
311                     password=self.passwd,
312                     tenant_name=self.tenant_name,
313                     tenant_id=self.tenant_id,
314                 )
315
316 0             sess = session.Session(auth=auth, verify=self.verify)
317             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318             # Titanium cloud and StarlingX
319 0             region_name = self.config.get("region_name")
320
321 0             if self.api_version3:
322 0                 self.keystone = ksClient_v3.Client(
323                     session=sess,
324                     endpoint_type=self.endpoint_type,
325                     region_name=region_name,
326                 )
327             else:
328 0                 self.keystone = ksClient_v2.Client(
329                     session=sess, endpoint_type=self.endpoint_type
330                 )
331
332 0             self.session["keystone"] = self.keystone
333             # In order to enable microversion functionality an explicit microversion must be specified in "config".
334             # This implementation approach is due to the warning message in
335             # https://developer.openstack.org/api-guide/compute/microversions.html
336             # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337             # always require an specific microversion.
338             # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 0             version = self.config.get("microversion")
340
341 0             if not version:
342 0                 version = "2.1"
343
344             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345             # Titanium cloud and StarlingX
346 0             self.nova = self.session["nova"] = nClient.Client(
347                 str(version),
348                 session=sess,
349                 endpoint_type=self.endpoint_type,
350                 region_name=region_name,
351             )
352 0             self.neutron = self.session["neutron"] = neClient.Client(
353                 "2.0",
354                 session=sess,
355                 endpoint_type=self.endpoint_type,
356                 region_name=region_name,
357             )
358
359 0             if sess.get_all_version_data(service_type="volumev2"):
360 0                 self.cinder = self.session["cinder"] = cClient.Client(
361                     2,
362                     session=sess,
363                     endpoint_type=self.endpoint_type,
364                     region_name=region_name,
365                 )
366             else:
367 0                 self.cinder = self.session["cinder"] = cClient.Client(
368                     3,
369                     session=sess,
370                     endpoint_type=self.endpoint_type,
371                     region_name=region_name,
372                 )
373
374 0             try:
375 0                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 0             except Exception:
377 0                 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 0             if self.endpoint_type == "internalURL":
380 0                 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 0                 glance_endpoint = self.keystone.endpoints.list(
382                     glance_service_id, interface="internal"
383                 )[0].url
384             else:
385 0                 glance_endpoint = None
386
387 0             self.glance = self.session["glance"] = glClient.Client(
388                 2, session=sess, endpoint=glance_endpoint
389             )
390             # using version 1 of glance client in new_image()
391             # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392             #                                                            endpoint=glance_endpoint)
393 0             self.session["reload_client"] = False
394 0             self.persistent_info["session"] = self.session
395             # add availablity zone info inside  self.persistent_info
396 0             self._set_availablity_zones()
397 0             self.persistent_info["availability_zone"] = self.availability_zone
398             # force to get again security_groups_ids next time they are needed
399 0             self.security_groups_id = None
400
401 1     def __net_os2mano(self, net_list_dict):
402         """Transform the net openstack format to mano format
403         net_list_dict can be a list of dict or a single dict"""
404 0         if type(net_list_dict) is dict:
405 0             net_list_ = (net_list_dict,)
406 0         elif type(net_list_dict) is list:
407 0             net_list_ = net_list_dict
408         else:
409 0             raise TypeError("param net_list_dict must be a list or a dictionary")
410 0         for net in net_list_:
411 0             if net.get("provider:network_type") == "vlan":
412 0                 net["type"] = "data"
413             else:
414 0                 net["type"] = "bridge"
415
416 1     def __classification_os2mano(self, class_list_dict):
417         """Transform the openstack format (Flow Classifier) to mano format
418         (Classification) class_list_dict can be a list of dict or a single dict
419         """
420 0         if isinstance(class_list_dict, dict):
421 0             class_list_ = [class_list_dict]
422 0         elif isinstance(class_list_dict, list):
423 0             class_list_ = class_list_dict
424         else:
425 0             raise TypeError("param class_list_dict must be a list or a dictionary")
426 0         for classification in class_list_:
427 0             id = classification.pop("id")
428 0             name = classification.pop("name")
429 0             description = classification.pop("description")
430 0             project_id = classification.pop("project_id")
431 0             tenant_id = classification.pop("tenant_id")
432 0             original_classification = copy.deepcopy(classification)
433 0             classification.clear()
434 0             classification["ctype"] = "legacy_flow_classifier"
435 0             classification["definition"] = original_classification
436 0             classification["id"] = id
437 0             classification["name"] = name
438 0             classification["description"] = description
439 0             classification["project_id"] = project_id
440 0             classification["tenant_id"] = tenant_id
441
442 1     def __sfi_os2mano(self, sfi_list_dict):
443         """Transform the openstack format (Port Pair) to mano format (SFI)
444         sfi_list_dict can be a list of dict or a single dict
445         """
446 0         if isinstance(sfi_list_dict, dict):
447 0             sfi_list_ = [sfi_list_dict]
448 0         elif isinstance(sfi_list_dict, list):
449 0             sfi_list_ = sfi_list_dict
450         else:
451 0             raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 0         for sfi in sfi_list_:
454 0             sfi["ingress_ports"] = []
455 0             sfi["egress_ports"] = []
456
457 0             if sfi.get("ingress"):
458 0                 sfi["ingress_ports"].append(sfi["ingress"])
459
460 0             if sfi.get("egress"):
461 0                 sfi["egress_ports"].append(sfi["egress"])
462
463 0             del sfi["ingress"]
464 0             del sfi["egress"]
465 0             params = sfi.get("service_function_parameters")
466 0             sfc_encap = False
467
468 0             if params:
469 0                 correlation = params.get("correlation")
470
471 0                 if correlation:
472 0                     sfc_encap = True
473
474 0             sfi["sfc_encap"] = sfc_encap
475 0             del sfi["service_function_parameters"]
476
477 1     def __sf_os2mano(self, sf_list_dict):
478         """Transform the openstack format (Port Pair Group) to mano format (SF)
479         sf_list_dict can be a list of dict or a single dict
480         """
481 0         if isinstance(sf_list_dict, dict):
482 0             sf_list_ = [sf_list_dict]
483 0         elif isinstance(sf_list_dict, list):
484 0             sf_list_ = sf_list_dict
485         else:
486 0             raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 0         for sf in sf_list_:
489 0             del sf["port_pair_group_parameters"]
490 0             sf["sfis"] = sf["port_pairs"]
491 0             del sf["port_pairs"]
492
493 1     def __sfp_os2mano(self, sfp_list_dict):
494         """Transform the openstack format (Port Chain) to mano format (SFP)
495         sfp_list_dict can be a list of dict or a single dict
496         """
497 0         if isinstance(sfp_list_dict, dict):
498 0             sfp_list_ = [sfp_list_dict]
499 0         elif isinstance(sfp_list_dict, list):
500 0             sfp_list_ = sfp_list_dict
501         else:
502 0             raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 0         for sfp in sfp_list_:
505 0             params = sfp.pop("chain_parameters")
506 0             sfc_encap = False
507
508 0             if params:
509 0                 correlation = params.get("correlation")
510
511 0                 if correlation:
512 0                     sfc_encap = True
513
514 0             sfp["sfc_encap"] = sfc_encap
515 0             sfp["spi"] = sfp.pop("chain_id")
516 0             sfp["classifications"] = sfp.pop("flow_classifiers")
517 0             sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519     # placeholder for now; read TODO note below
520 1     def _validate_classification(self, type, definition):
521         # only legacy_flow_classifier Type is supported at this point
522 0         return True
523         # TODO(igordcard): this method should be an abstract method of an
524         # abstract Classification class to be implemented by the specific
525         # Types. Also, abstract vimconnector should call the validation
526         # method before the implemented VIM connectors are called.
527
528 1     def _format_exception(self, exception):
529         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
530 0         message_error = str(exception)
531 0         tip = ""
532
533 0         if isinstance(
534             exception,
535             (
536                 neExceptions.NetworkNotFoundClient,
537                 nvExceptions.NotFound,
538                 ksExceptions.NotFound,
539                 gl1Exceptions.HTTPNotFound,
540             ),
541         ):
542 0             raise vimconn.VimConnNotFoundException(
543                 type(exception).__name__ + ": " + message_error
544             )
545 0         elif isinstance(
546             exception,
547             (
548                 HTTPException,
549                 gl1Exceptions.HTTPException,
550                 gl1Exceptions.CommunicationError,
551                 ConnectionError,
552                 ksExceptions.ConnectionError,
553                 neExceptions.ConnectionFailed,
554             ),
555         ):
556 0             if type(exception).__name__ == "SSLError":
557 0                 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 0             raise vimconn.VimConnConnectionException(
560                 "Invalid URL or credentials{}: {}".format(tip, message_error)
561             )
562 0         elif isinstance(
563             exception,
564             (
565                 KeyError,
566                 nvExceptions.BadRequest,
567                 ksExceptions.BadRequest,
568             ),
569         ):
570 0             raise vimconn.VimConnException(
571                 type(exception).__name__ + ": " + message_error
572             )
573 0         elif isinstance(
574             exception,
575             (
576                 nvExceptions.ClientException,
577                 ksExceptions.ClientException,
578                 neExceptions.NeutronException,
579             ),
580         ):
581 0             raise vimconn.VimConnUnexpectedResponse(
582                 type(exception).__name__ + ": " + message_error
583             )
584 0         elif isinstance(exception, nvExceptions.Conflict):
585 0             raise vimconn.VimConnConflictException(
586                 type(exception).__name__ + ": " + message_error
587             )
588 0         elif isinstance(exception, vimconn.VimConnException):
589 0             raise exception
590         else:  # ()
591 0             self.logger.error("General Exception " + message_error, exc_info=True)
592
593 0             raise vimconn.VimConnConnectionException(
594                 type(exception).__name__ + ": " + message_error
595             )
596
597 1     def _get_ids_from_name(self):
598         """
599          Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600         :return: None
601         """
602         # get tenant_id if only tenant_name is supplied
603 0         self._reload_connection()
604
605 0         if not self.my_tenant_id:
606 0             raise vimconn.VimConnConnectionException(
607                 "Error getting tenant information from name={} id={}".format(
608                     self.tenant_name, self.tenant_id
609                 )
610             )
611
612 0         if self.config.get("security_groups") and not self.security_groups_id:
613             # convert from name to id
614 0             neutron_sg_list = self.neutron.list_security_groups(
615                 tenant_id=self.my_tenant_id
616             )["security_groups"]
617
618 0             self.security_groups_id = []
619 0             for sg in self.config.get("security_groups"):
620 0                 for neutron_sg in neutron_sg_list:
621 0                     if sg in (neutron_sg["id"], neutron_sg["name"]):
622 0                         self.security_groups_id.append(neutron_sg["id"])
623 0                         break
624                 else:
625 0                     self.security_groups_id = None
626
627 0                     raise vimconn.VimConnConnectionException(
628                         "Not found security group {} for this tenant".format(sg)
629                     )
630
631 1     def check_vim_connectivity(self):
632         # just get network list to check connectivity and credentials
633 0         self.get_network_list(filter_dict={})
634
635 1     def get_tenant_list(self, filter_dict={}):
636         """Obtain tenants of VIM
637         filter_dict can contain the following keys:
638             name: filter by tenant name
639             id: filter by tenant uuid/id
640             <other VIM specific>
641         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
642         """
643 0         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
644
645 0         try:
646 0             self._reload_connection()
647
648 0             if self.api_version3:
649 0                 project_class_list = self.keystone.projects.list(
650                     name=filter_dict.get("name")
651                 )
652             else:
653 0                 project_class_list = self.keystone.tenants.findall(**filter_dict)
654
655 0             project_list = []
656
657 0             for project in project_class_list:
658 0                 if filter_dict.get("id") and filter_dict["id"] != project.id:
659 0                     continue
660
661 0                 project_list.append(project.to_dict())
662
663 0             return project_list
664 0         except (
665             ksExceptions.ConnectionError,
666             ksExceptions.ClientException,
667             ConnectionError,
668         ) as e:
669 0             self._format_exception(e)
670
671 1     def new_tenant(self, tenant_name, tenant_description):
672         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
673 0         self.logger.debug("Adding a new tenant name: %s", tenant_name)
674
675 0         try:
676 0             self._reload_connection()
677
678 0             if self.api_version3:
679 0                 project = self.keystone.projects.create(
680                     tenant_name,
681                     self.config.get("project_domain_id", "default"),
682                     description=tenant_description,
683                     is_domain=False,
684                 )
685             else:
686 0                 project = self.keystone.tenants.create(tenant_name, tenant_description)
687
688 0             return project.id
689 0         except (
690             ksExceptions.ConnectionError,
691             ksExceptions.ClientException,
692             ksExceptions.BadRequest,
693             ConnectionError,
694         ) as e:
695 0             self._format_exception(e)
696
697 1     def delete_tenant(self, tenant_id):
698         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
699 0         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
700
701 0         try:
702 0             self._reload_connection()
703
704 0             if self.api_version3:
705 0                 self.keystone.projects.delete(tenant_id)
706             else:
707 0                 self.keystone.tenants.delete(tenant_id)
708
709 0             return tenant_id
710 0         except (
711             ksExceptions.ConnectionError,
712             ksExceptions.ClientException,
713             ksExceptions.NotFound,
714             ConnectionError,
715         ) as e:
716 0             self._format_exception(e)
717
718 1     def new_network(
719         self,
720         net_name,
721         net_type,
722         ip_profile=None,
723         shared=False,
724         provider_network_profile=None,
725     ):
726         """Adds a tenant network to VIM
727         Params:
728             'net_name': name of the network
729             'net_type': one of:
730                 'bridge': overlay isolated network
731                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
732                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
733             'ip_profile': is a dict containing the IP parameters of the network
734                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
735                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
736                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
737                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
738                 'dhcp_enabled': True or False
739                 'dhcp_start_address': ip_schema, first IP to grant
740                 'dhcp_count': number of IPs to grant.
741             'shared': if this network can be seen/use by other tenants/organization
742             'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
743                                                              physical-network: physnet-label}
744         Returns a tuple with the network identifier and created_items, or raises an exception on error
745             created_items can be None or a dictionary where this method can include key-values that will be passed to
746             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
747             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
748             as not present.
749         """
750 0         self.logger.debug(
751             "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
752         )
753         # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
754
755 0         try:
756 0             vlan = None
757
758 0             if provider_network_profile:
759 0                 vlan = provider_network_profile.get("segmentation-id")
760
761 0             new_net = None
762 0             created_items = {}
763 0             self._reload_connection()
764 0             network_dict = {"name": net_name, "admin_state_up": True}
765
766 0             if net_type in ("data", "ptp") or provider_network_profile:
767 0                 provider_physical_network = None
768
769 0                 if provider_network_profile and provider_network_profile.get(
770                     "physical-network"
771                 ):
772 0                     provider_physical_network = provider_network_profile.get(
773                         "physical-network"
774                     )
775
776                     # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
777                     # or not declared, just ignore the checking
778 0                     if (
779                         isinstance(
780                             self.config.get("dataplane_physical_net"), (tuple, list)
781                         )
782                         and provider_physical_network
783                         not in self.config["dataplane_physical_net"]
784                     ):
785 0                         raise vimconn.VimConnConflictException(
786                             "Invalid parameter 'provider-network:physical-network' "
787                             "for network creation. '{}' is not one of the declared "
788                             "list at VIM_config:dataplane_physical_net".format(
789                                 provider_physical_network
790                             )
791                         )
792
793                 # use the default dataplane_physical_net
794 0                 if not provider_physical_network:
795 0                     provider_physical_network = self.config.get(
796                         "dataplane_physical_net"
797                     )
798
799                     # if it is non empty list, use the first value. If it is a string use the value directly
800 0                     if (
801                         isinstance(provider_physical_network, (tuple, list))
802                         and provider_physical_network
803                     ):
804 0                         provider_physical_network = provider_physical_network[0]
805
806 0                 if not provider_physical_network:
807 0                     raise vimconn.VimConnConflictException(
808                         "missing information needed for underlay networks. Provide "
809                         "'dataplane_physical_net' configuration at VIM or use the NS "
810                         "instantiation parameter 'provider-network.physical-network'"
811                         " for the VLD"
812                     )
813
814 0                 if not self.config.get("multisegment_support"):
815 0                     network_dict[
816                         "provider:physical_network"
817                     ] = provider_physical_network
818
819 0                     if (
820                         provider_network_profile
821                         and "network-type" in provider_network_profile
822                     ):
823 0                         network_dict[
824                             "provider:network_type"
825                         ] = provider_network_profile["network-type"]
826                     else:
827 0                         network_dict["provider:network_type"] = self.config.get(
828                             "dataplane_network_type", "vlan"
829                         )
830
831 0                     if vlan:
832 0                         network_dict["provider:segmentation_id"] = vlan
833                 else:
834                     # Multi-segment case
835 0                     segment_list = []
836 0                     segment1_dict = {
837                         "provider:physical_network": "",
838                         "provider:network_type": "vxlan",
839                     }
840 0                     segment_list.append(segment1_dict)
841 0                     segment2_dict = {
842                         "provider:physical_network": provider_physical_network,
843                         "provider:network_type": "vlan",
844                     }
845
846 0                     if vlan:
847 0                         segment2_dict["provider:segmentation_id"] = vlan
848 0                     elif self.config.get("multisegment_vlan_range"):
849 0                         vlanID = self._generate_multisegment_vlanID()
850 0                         segment2_dict["provider:segmentation_id"] = vlanID
851
852                     # else
853                     #     raise vimconn.VimConnConflictException(
854                     #         "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
855                     #         network")
856 0                     segment_list.append(segment2_dict)
857 0                     network_dict["segments"] = segment_list
858
859                 # VIO Specific Changes. It needs a concrete VLAN
860 0                 if self.vim_type == "VIO" and vlan is None:
861 0                     if self.config.get("dataplane_net_vlan_range") is None:
862 0                         raise vimconn.VimConnConflictException(
863                             "You must provide 'dataplane_net_vlan_range' in format "
864                             "[start_ID - end_ID] at VIM_config for creating underlay "
865                             "networks"
866                         )
867
868 0                     network_dict["provider:segmentation_id"] = self._generate_vlanID()
869
870 0             network_dict["shared"] = shared
871
872 0             if self.config.get("disable_network_port_security"):
873 0                 network_dict["port_security_enabled"] = False
874
875 0             if self.config.get("neutron_availability_zone_hints"):
876 0                 hints = self.config.get("neutron_availability_zone_hints")
877
878 0                 if isinstance(hints, str):
879 0                     hints = [hints]
880
881 0                 network_dict["availability_zone_hints"] = hints
882
883 0             new_net = self.neutron.create_network({"network": network_dict})
884             # print new_net
885             # create subnetwork, even if there is no profile
886
887 0             if not ip_profile:
888 0                 ip_profile = {}
889
890 0             if not ip_profile.get("subnet_address"):
891                 # Fake subnet is required
892 0                 subnet_rand = random.randint(0, 255)
893 0                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
894
895 0             if "ip_version" not in ip_profile:
896 0                 ip_profile["ip_version"] = "IPv4"
897
898 0             subnet = {
899                 "name": net_name + "-subnet",
900                 "network_id": new_net["network"]["id"],
901                 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
902                 "cidr": ip_profile["subnet_address"],
903             }
904
905             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
906 0             if ip_profile.get("gateway_address"):
907 0                 subnet["gateway_ip"] = ip_profile["gateway_address"]
908             else:
909 0                 subnet["gateway_ip"] = None
910
911 0             if ip_profile.get("dns_address"):
912 0                 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
913
914 0             if "dhcp_enabled" in ip_profile:
915 0                 subnet["enable_dhcp"] = (
916                     False
917                     if ip_profile["dhcp_enabled"] == "false"
918                     or ip_profile["dhcp_enabled"] is False
919                     else True
920                 )
921
922 0             if ip_profile.get("dhcp_start_address"):
923 0                 subnet["allocation_pools"] = []
924 0                 subnet["allocation_pools"].append(dict())
925 0                 subnet["allocation_pools"][0]["start"] = ip_profile[
926                     "dhcp_start_address"
927                 ]
928
929 0             if ip_profile.get("dhcp_count"):
930                 # parts = ip_profile["dhcp_start_address"].split(".")
931                 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
932 0                 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
933 0                 ip_int += ip_profile["dhcp_count"] - 1
934 0                 ip_str = str(netaddr.IPAddress(ip_int))
935 0                 subnet["allocation_pools"][0]["end"] = ip_str
936
937             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
938 0             self.neutron.create_subnet({"subnet": subnet})
939
940 0             if net_type == "data" and self.config.get("multisegment_support"):
941 0                 if self.config.get("l2gw_support"):
942 0                     l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
943 0                     for l2gw in l2gw_list:
944 0                         l2gw_conn = {
945                             "l2_gateway_id": l2gw["id"],
946                             "network_id": new_net["network"]["id"],
947                             "segmentation_id": str(vlanID),
948                         }
949 0                         new_l2gw_conn = self.neutron.create_l2_gateway_connection(
950                             {"l2_gateway_connection": l2gw_conn}
951                         )
952 0                         created_items[
953                             "l2gwconn:"
954                             + str(new_l2gw_conn["l2_gateway_connection"]["id"])
955                         ] = True
956
957 0             return new_net["network"]["id"], created_items
958 0         except Exception as e:
959             # delete l2gw connections (if any) before deleting the network
960 0             for k, v in created_items.items():
961 0                 if not v:  # skip already deleted
962 0                     continue
963
964 0                 try:
965 0                     k_item, _, k_id = k.partition(":")
966
967 0                     if k_item == "l2gwconn":
968 0                         self.neutron.delete_l2_gateway_connection(k_id)
969 0                 except Exception as e2:
970 0                     self.logger.error(
971                         "Error deleting l2 gateway connection: {}: {}".format(
972                             type(e2).__name__, e2
973                         )
974                     )
975
976 0             if new_net:
977 0                 self.neutron.delete_network(new_net["network"]["id"])
978
979 0             self._format_exception(e)
980
981 1     def get_network_list(self, filter_dict={}):
982         """Obtain tenant networks of VIM
983         Filter_dict can be:
984             name: network name
985             id: network uuid
986             shared: boolean
987             tenant_id: tenant
988             admin_state_up: boolean
989             status: 'ACTIVE'
990         Returns the network list of dictionaries
991         """
992 0         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
993
994 0         try:
995 0             self._reload_connection()
996 0             filter_dict_os = filter_dict.copy()
997
998 0             if self.api_version3 and "tenant_id" in filter_dict_os:
999                 # TODO check
1000 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1001
1002 0             net_dict = self.neutron.list_networks(**filter_dict_os)
1003 0             net_list = net_dict["networks"]
1004 0             self.__net_os2mano(net_list)
1005
1006 0             return net_list
1007 0         except (
1008             neExceptions.ConnectionFailed,
1009             ksExceptions.ClientException,
1010             neExceptions.NeutronException,
1011             ConnectionError,
1012         ) as e:
1013 0             self._format_exception(e)
1014
1015 1     def get_network(self, net_id):
1016         """Obtain details of network from VIM
1017         Returns the network information from a network id"""
1018 0         self.logger.debug(" Getting tenant network %s from VIM", net_id)
1019 0         filter_dict = {"id": net_id}
1020 0         net_list = self.get_network_list(filter_dict)
1021
1022 0         if len(net_list) == 0:
1023 0             raise vimconn.VimConnNotFoundException(
1024                 "Network '{}' not found".format(net_id)
1025             )
1026 0         elif len(net_list) > 1:
1027 0             raise vimconn.VimConnConflictException(
1028                 "Found more than one network with this criteria"
1029             )
1030
1031 0         net = net_list[0]
1032 0         subnets = []
1033 0         for subnet_id in net.get("subnets", ()):
1034 0             try:
1035 0                 subnet = self.neutron.show_subnet(subnet_id)
1036 0             except Exception as e:
1037 0                 self.logger.error(
1038                     "osconnector.get_network(): Error getting subnet %s %s"
1039                     % (net_id, str(e))
1040                 )
1041 0                 subnet = {"id": subnet_id, "fault": str(e)}
1042
1043 0             subnets.append(subnet)
1044
1045 0         net["subnets"] = subnets
1046 0         net["encapsulation"] = net.get("provider:network_type")
1047 0         net["encapsulation_type"] = net.get("provider:network_type")
1048 0         net["segmentation_id"] = net.get("provider:segmentation_id")
1049 0         net["encapsulation_id"] = net.get("provider:segmentation_id")
1050
1051 0         return net
1052
1053 1     def delete_network(self, net_id, created_items=None):
1054         """
1055         Removes a tenant network from VIM and its associated elements
1056         :param net_id: VIM identifier of the network, provided by method new_network
1057         :param created_items: dictionary with extra items to be deleted. provided by method new_network
1058         Returns the network identifier or raises an exception upon error or when network is not found
1059         """
1060 0         self.logger.debug("Deleting network '%s' from VIM", net_id)
1061
1062 0         if created_items is None:
1063 0             created_items = {}
1064
1065 0         try:
1066 0             self._reload_connection()
1067             # delete l2gw connections (if any) before deleting the network
1068 0             for k, v in created_items.items():
1069 0                 if not v:  # skip already deleted
1070 0                     continue
1071
1072 0                 try:
1073 0                     k_item, _, k_id = k.partition(":")
1074 0                     if k_item == "l2gwconn":
1075 0                         self.neutron.delete_l2_gateway_connection(k_id)
1076 0                 except Exception as e:
1077 0                     self.logger.error(
1078                         "Error deleting l2 gateway connection: {}: {}".format(
1079                             type(e).__name__, e
1080                         )
1081                     )
1082
1083             # delete VM ports attached to this networks before the network
1084 0             ports = self.neutron.list_ports(network_id=net_id)
1085 0             for p in ports["ports"]:
1086 0                 try:
1087 0                     self.neutron.delete_port(p["id"])
1088 0                 except Exception as e:
1089 0                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1090
1091 0             self.neutron.delete_network(net_id)
1092
1093 0             return net_id
1094 0         except (
1095             neExceptions.ConnectionFailed,
1096             neExceptions.NetworkNotFoundClient,
1097             neExceptions.NeutronException,
1098             ksExceptions.ClientException,
1099             neExceptions.NeutronException,
1100             ConnectionError,
1101         ) as e:
1102 0             self._format_exception(e)
1103
1104 1     def refresh_nets_status(self, net_list):
1105         """Get the status of the networks
1106         Params: the list of network identifiers
1107         Returns a dictionary with:
1108             net_id:         #VIM id of this network
1109                 status:     #Mandatory. Text with one of:
1110                             #  DELETED (not found at vim)
1111                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1112                             #  OTHER (Vim reported other status not understood)
1113                             #  ERROR (VIM indicates an ERROR status)
1114                             #  ACTIVE, INACTIVE, DOWN (admin down),
1115                             #  BUILD (on building process)
1116                             #
1117                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
1118                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
1119         """
1120 0         net_dict = {}
1121
1122 0         for net_id in net_list:
1123 0             net = {}
1124
1125 0             try:
1126 0                 net_vim = self.get_network(net_id)
1127
1128 0                 if net_vim["status"] in netStatus2manoFormat:
1129 0                     net["status"] = netStatus2manoFormat[net_vim["status"]]
1130                 else:
1131 0                     net["status"] = "OTHER"
1132 0                     net["error_msg"] = "VIM status reported " + net_vim["status"]
1133
1134 0                 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1135 0                     net["status"] = "DOWN"
1136
1137 0                 net["vim_info"] = self.serialize(net_vim)
1138
1139 0                 if net_vim.get("fault"):  # TODO
1140 0                     net["error_msg"] = str(net_vim["fault"])
1141 0             except vimconn.VimConnNotFoundException as e:
1142 0                 self.logger.error("Exception getting net status: %s", str(e))
1143 0                 net["status"] = "DELETED"
1144 0                 net["error_msg"] = str(e)
1145 0             except vimconn.VimConnException as e:
1146 0                 self.logger.error("Exception getting net status: %s", str(e))
1147 0                 net["status"] = "VIM_ERROR"
1148 0                 net["error_msg"] = str(e)
1149 0             net_dict[net_id] = net
1150 0         return net_dict
1151
1152 1     def get_flavor(self, flavor_id):
1153         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
1154 0         self.logger.debug("Getting flavor '%s'", flavor_id)
1155
1156 0         try:
1157 0             self._reload_connection()
1158 0             flavor = self.nova.flavors.find(id=flavor_id)
1159             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1160
1161 0             return flavor.to_dict()
1162 0         except (
1163             nvExceptions.NotFound,
1164             nvExceptions.ClientException,
1165             ksExceptions.ClientException,
1166             ConnectionError,
1167         ) as e:
1168 0             self._format_exception(e)
1169
1170 1     def get_flavor_id_from_data(self, flavor_dict):
1171         """Obtain flavor id that match the flavor description
1172         Returns the flavor_id or raises a vimconnNotFoundException
1173         flavor_dict: contains the required ram, vcpus, disk
1174         If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1175             and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1176             vimconnNotFoundException is raised
1177         """
1178 0         exact_match = False if self.config.get("use_existing_flavors") else True
1179
1180 0         try:
1181 0             self._reload_connection()
1182 0             flavor_candidate_id = None
1183 0             flavor_candidate_data = (10000, 10000, 10000)
1184 0             flavor_target = (
1185                 flavor_dict["ram"],
1186                 flavor_dict["vcpus"],
1187                 flavor_dict["disk"],
1188                 flavor_dict.get("ephemeral", 0),
1189                 flavor_dict.get("swap", 0),
1190             )
1191             # numa=None
1192 0             extended = flavor_dict.get("extended", {})
1193 0             if extended:
1194                 # TODO
1195 0                 raise vimconn.VimConnNotFoundException(
1196                     "Flavor with EPA still not implemented"
1197                 )
1198                 # if len(numas) > 1:
1199                 #     raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1200                 # numa=numas[0]
1201                 # numas = extended.get("numas")
1202 0             for flavor in self.nova.flavors.list():
1203 0                 epa = flavor.get_keys()
1204
1205 0                 if epa:
1206 0                     continue
1207                     # TODO
1208
1209 0                 flavor_data = (
1210                     flavor.ram,
1211                     flavor.vcpus,
1212                     flavor.disk,
1213                     flavor.ephemeral,
1214                     flavor.swap if isinstance(flavor.swap, int) else 0,
1215                 )
1216 0                 if flavor_data == flavor_target:
1217 0                     return flavor.id
1218 0                 elif (
1219                     not exact_match
1220                     and flavor_target < flavor_data < flavor_candidate_data
1221                 ):
1222 0                     flavor_candidate_id = flavor.id
1223 0                     flavor_candidate_data = flavor_data
1224
1225 0             if not exact_match and flavor_candidate_id:
1226 0                 return flavor_candidate_id
1227
1228 0             raise vimconn.VimConnNotFoundException(
1229                 "Cannot find any flavor matching '{}'".format(flavor_dict)
1230             )
1231 0         except (
1232             nvExceptions.NotFound,
1233             nvExceptions.ClientException,
1234             ksExceptions.ClientException,
1235             ConnectionError,
1236         ) as e:
1237 0             self._format_exception(e)
1238
1239 1     @staticmethod
1240 1     def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1241         """Process resource quota and fill up extra_specs.
1242         Args:
1243             quota       (dict):         Keeping the quota of resurces
1244             prefix      (str)           Prefix
1245             extra_specs (dict)          Dict to be filled to be used during flavor creation
1246
1247         """
1248 0         if "limit" in quota:
1249 0             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1250
1251 0         if "reserve" in quota:
1252 0             extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1253
1254 0         if "shares" in quota:
1255 0             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1256 0             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1257
1258 1     @staticmethod
1259 1     def process_numa_memory(
1260         numa: dict, node_id: Optional[int], extra_specs: dict
1261     ) -> None:
1262         """Set the memory in extra_specs.
1263         Args:
1264             numa        (dict):         A dictionary which includes numa information
1265             node_id     (int):          ID of numa node
1266             extra_specs (dict):         To be filled.
1267
1268         """
1269 1         if not numa.get("memory"):
1270 1             return
1271 1         memory_mb = numa["memory"] * 1024
1272 1         memory = "hw:numa_mem.{}".format(node_id)
1273 1         extra_specs[memory] = int(memory_mb)
1274
1275 1     @staticmethod
1276 1     def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1277         """Set the cpu in extra_specs.
1278         Args:
1279             numa        (dict):         A dictionary which includes numa information
1280             node_id     (int):          ID of numa node
1281             extra_specs (dict):         To be filled.
1282
1283         """
1284 1         if not numa.get("vcpu"):
1285 1             return
1286 1         vcpu = numa["vcpu"]
1287 1         cpu = "hw:numa_cpus.{}".format(node_id)
1288 1         vcpu = ",".join(map(str, vcpu))
1289 1         extra_specs[cpu] = vcpu
1290
1291 1     @staticmethod
1292 1     def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1293         """Fill up extra_specs if numa has paired-threads.
1294         Args:
1295             numa        (dict):         A dictionary which includes numa information
1296             extra_specs (dict):         To be filled.
1297
1298         Returns:
1299             threads       (int)           Number of virtual cpus
1300
1301         """
1302 1         if not numa.get("paired-threads"):
1303 1             return
1304
1305         # cpu_thread_policy "require" implies that compute node must have an STM architecture
1306 1         threads = numa["paired-threads"] * 2
1307 1         extra_specs["hw:cpu_thread_policy"] = "require"
1308 1         extra_specs["hw:cpu_policy"] = "dedicated"
1309 1         return threads
1310
1311 1     @staticmethod
1312 1     def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1313         """Fill up extra_specs if numa has cores.
1314         Args:
1315             numa        (dict):         A dictionary which includes numa information
1316             extra_specs (dict):         To be filled.
1317
1318         Returns:
1319             cores       (int)           Number of virtual cpus
1320
1321         """
1322         # cpu_thread_policy "isolate" implies that the host must not have an SMT
1323         # architecture, or a non-SMT architecture will be emulated
1324 1         if not numa.get("cores"):
1325 1             return
1326 1         cores = numa["cores"]
1327 1         extra_specs["hw:cpu_thread_policy"] = "isolate"
1328 1         extra_specs["hw:cpu_policy"] = "dedicated"
1329 1         return cores
1330
1331 1     @staticmethod
1332 1     def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1333         """Fill up extra_specs if numa has threads.
1334         Args:
1335             numa        (dict):         A dictionary which includes numa information
1336             extra_specs (dict):         To be filled.
1337
1338         Returns:
1339             threads       (int)           Number of virtual cpus
1340
1341         """
1342         # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1343 1         if not numa.get("threads"):
1344 1             return
1345 1         threads = numa["threads"]
1346 1         extra_specs["hw:cpu_thread_policy"] = "prefer"
1347 1         extra_specs["hw:cpu_policy"] = "dedicated"
1348 1         return threads
1349
1350 1     def _process_numa_parameters_of_flavor(
1351         self, numas: List, extra_specs: Dict
1352     ) -> None:
1353         """Process numa parameters and fill up extra_specs.
1354
1355         Args:
1356             numas   (list):             List of dictionary which includes numa information
1357             extra_specs (dict):         To be filled.
1358
1359         """
1360 1         numa_nodes = len(numas)
1361 1         extra_specs["hw:numa_nodes"] = str(numa_nodes)
1362 1         cpu_cores, cpu_threads = 0, 0
1363
1364 1         if self.vim_type == "VIO":
1365 1             self.process_vio_numa_nodes(numa_nodes, extra_specs)
1366
1367 1         for numa in numas:
1368 1             if "id" in numa:
1369 1                 node_id = numa["id"]
1370                 # overwrite ram and vcpus
1371                 # check if key "memory" is present in numa else use ram value at flavor
1372 1                 self.process_numa_memory(numa, node_id, extra_specs)
1373 1                 self.process_numa_vcpu(numa, node_id, extra_specs)
1374
1375             # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1376 1             extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1377
1378 1             if "paired-threads" in numa:
1379 1                 threads = self.process_numa_paired_threads(numa, extra_specs)
1380 1                 cpu_threads += threads
1381
1382 1             elif "cores" in numa:
1383 1                 cores = self.process_numa_cores(numa, extra_specs)
1384 1                 cpu_cores += cores
1385
1386 1             elif "threads" in numa:
1387 1                 threads = self.process_numa_threads(numa, extra_specs)
1388 1                 cpu_threads += threads
1389
1390 1         if cpu_cores:
1391 1             extra_specs["hw:cpu_cores"] = str(cpu_cores)
1392 1         if cpu_threads:
1393 1             extra_specs["hw:cpu_threads"] = str(cpu_threads)
1394
1395 1     @staticmethod
1396 1     def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1397         """According to number of numa nodes, updates the extra_specs for VIO.
1398
1399         Args:
1400
1401             numa_nodes      (int):         List keeps the numa node numbers
1402             extra_specs     (dict):        Extra specs dict to be updated
1403
1404         """
1405         # If there are several numas, we do not define specific affinity.
1406 1         extra_specs["vmware:latency_sensitivity_level"] = "high"
1407
1408 1     def _change_flavor_name(
1409         self, name: str, name_suffix: int, flavor_data: dict
1410     ) -> str:
1411         """Change the flavor name if the name already exists.
1412
1413         Args:
1414             name    (str):          Flavor name to be checked
1415             name_suffix (int):      Suffix to be appended to name
1416             flavor_data (dict):     Flavor dict
1417
1418         Returns:
1419             name    (str):          New flavor name to be used
1420
1421         """
1422         # Get used names
1423 1         fl = self.nova.flavors.list()
1424 1         fl_names = [f.name for f in fl]
1425
1426 1         while name in fl_names:
1427 1             name_suffix += 1
1428 1             name = flavor_data["name"] + "-" + str(name_suffix)
1429
1430 1         return name
1431
1432 1     def _process_extended_config_of_flavor(
1433         self, extended: dict, extra_specs: dict
1434     ) -> None:
1435         """Process the extended dict to fill up extra_specs.
1436         Args:
1437
1438             extended                    (dict):         Keeping the extra specification of flavor
1439             extra_specs                 (dict)          Dict to be filled to be used during flavor creation
1440
1441         """
1442 1         quotas = {
1443             "cpu-quota": "cpu",
1444             "mem-quota": "memory",
1445             "vif-quota": "vif",
1446             "disk-io-quota": "disk_io",
1447         }
1448
1449 1         page_sizes = {
1450             "LARGE": "large",
1451             "SMALL": "small",
1452             "SIZE_2MB": "2MB",
1453             "SIZE_1GB": "1GB",
1454             "PREFER_LARGE": "any",
1455         }
1456
1457 1         policies = {
1458             "cpu-pinning-policy": "hw:cpu_policy",
1459             "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1460             "mem-policy": "hw:numa_mempolicy",
1461         }
1462
1463 1         numas = extended.get("numas")
1464 1         if numas:
1465 1             self._process_numa_parameters_of_flavor(numas, extra_specs)
1466
1467 1         for quota, item in quotas.items():
1468 1             if quota in extended.keys():
1469 1                 self.process_resource_quota(extended.get(quota), item, extra_specs)
1470
1471         # Set the mempage size as specified in the descriptor
1472 1         if extended.get("mempage-size"):
1473 1             if extended["mempage-size"] in page_sizes.keys():
1474 1                 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1475             else:
1476                 # Normally, validations in NBI should not allow to this condition.
1477 1                 self.logger.debug(
1478                     "Invalid mempage-size %s. Will be ignored",
1479                     extended.get("mempage-size"),
1480                 )
1481
1482 1         for policy, hw_policy in policies.items():
1483 1             if extended.get(policy):
1484 1                 extra_specs[hw_policy] = extended[policy].lower()
1485
1486 1     @staticmethod
1487 1     def _get_flavor_details(flavor_data: dict) -> Tuple:
1488         """Returns the details of flavor
1489         Args:
1490             flavor_data     (dict):     Dictionary that includes required flavor details
1491
1492         Returns:
1493             ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
1494
1495         """
1496 1         return (
1497             flavor_data.get("ram", 64),
1498             flavor_data.get("vcpus", 1),
1499             {},
1500             flavor_data.get("extended"),
1501         )
1502
1503 1     def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1504         """Adds a tenant flavor to openstack VIM.
1505         if change_name_if_used is True, it will change name in case of conflict,
1506         because it is not supported name repetition.
1507
1508         Args:
1509             flavor_data (dict):             Flavor details to be processed
1510             change_name_if_used (bool):     Change name in case of conflict
1511
1512         Returns:
1513              flavor_id  (str):     flavor identifier
1514
1515         """
1516 1         self.logger.debug("Adding flavor '%s'", str(flavor_data))
1517 1         retry = 0
1518 1         max_retries = 3
1519 1         name_suffix = 0
1520
1521 1         try:
1522 1             name = flavor_data["name"]
1523 1             while retry < max_retries:
1524 1                 retry += 1
1525 1                 try:
1526 1                     self._reload_connection()
1527
1528 1                     if change_name_if_used:
1529 1                         name = self._change_flavor_name(name, name_suffix, flavor_data)
1530
1531 1                     ram, vcpus, extra_specs, extended = self._get_flavor_details(
1532                         flavor_data
1533                     )
1534 1                     if extended:
1535 1                         self._process_extended_config_of_flavor(extended, extra_specs)
1536
1537                     # Create flavor
1538
1539 1                     new_flavor = self.nova.flavors.create(
1540                         name=name,
1541                         ram=ram,
1542                         vcpus=vcpus,
1543                         disk=flavor_data.get("disk", 0),
1544                         ephemeral=flavor_data.get("ephemeral", 0),
1545                         swap=flavor_data.get("swap", 0),
1546                         is_public=flavor_data.get("is_public", True),
1547                     )
1548
1549                     # Add metadata
1550 1                     if extra_specs:
1551 1                         new_flavor.set_keys(extra_specs)
1552
1553 1                     return new_flavor.id
1554
1555 1                 except nvExceptions.Conflict as e:
1556 1                     if change_name_if_used and retry < max_retries:
1557 1                         continue
1558
1559 1                     self._format_exception(e)
1560
1561 1         except (
1562             ksExceptions.ClientException,
1563             nvExceptions.ClientException,
1564             ConnectionError,
1565             KeyError,
1566         ) as e:
1567 1             self._format_exception(e)
1568
1569 1     def delete_flavor(self, flavor_id):
1570         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1571 0         try:
1572 0             self._reload_connection()
1573 0             self.nova.flavors.delete(flavor_id)
1574
1575 0             return flavor_id
1576         # except nvExceptions.BadRequest as e:
1577 0         except (
1578             nvExceptions.NotFound,
1579             ksExceptions.ClientException,
1580             nvExceptions.ClientException,
1581             ConnectionError,
1582         ) as e:
1583 0             self._format_exception(e)
1584
1585 1     def new_image(self, image_dict):
1586         """
1587         Adds a tenant image to VIM. imge_dict is a dictionary with:
1588             name: name
1589             disk_format: qcow2, vhd, vmdk, raw (by default), ...
1590             location: path or URI
1591             public: "yes" or "no"
1592             metadata: metadata of the image
1593         Returns the image_id
1594         """
1595 0         retry = 0
1596 0         max_retries = 3
1597
1598 0         while retry < max_retries:
1599 0             retry += 1
1600 0             try:
1601 0                 self._reload_connection()
1602
1603                 # determine format  http://docs.openstack.org/developer/glance/formats.html
1604 0                 if "disk_format" in image_dict:
1605 0                     disk_format = image_dict["disk_format"]
1606                 else:  # autodiscover based on extension
1607 0                     if image_dict["location"].endswith(".qcow2"):
1608 0                         disk_format = "qcow2"
1609 0                     elif image_dict["location"].endswith(".vhd"):
1610 0                         disk_format = "vhd"
1611 0                     elif image_dict["location"].endswith(".vmdk"):
1612 0                         disk_format = "vmdk"
1613 0                     elif image_dict["location"].endswith(".vdi"):
1614 0                         disk_format = "vdi"
1615 0                     elif image_dict["location"].endswith(".iso"):
1616 0                         disk_format = "iso"
1617 0                     elif image_dict["location"].endswith(".aki"):
1618 0                         disk_format = "aki"
1619 0                     elif image_dict["location"].endswith(".ari"):
1620 0                         disk_format = "ari"
1621 0                     elif image_dict["location"].endswith(".ami"):
1622 0                         disk_format = "ami"
1623                     else:
1624 0                         disk_format = "raw"
1625
1626 0                 self.logger.debug(
1627                     "new_image: '%s' loading from '%s'",
1628                     image_dict["name"],
1629                     image_dict["location"],
1630                 )
1631 0                 if self.vim_type == "VIO":
1632 0                     container_format = "bare"
1633 0                     if "container_format" in image_dict:
1634 0                         container_format = image_dict["container_format"]
1635
1636 0                     new_image = self.glance.images.create(
1637                         name=image_dict["name"],
1638                         container_format=container_format,
1639                         disk_format=disk_format,
1640                     )
1641                 else:
1642 0                     new_image = self.glance.images.create(name=image_dict["name"])
1643
1644 0                 if image_dict["location"].startswith("http"):
1645                     # TODO there is not a method to direct download. It must be downloaded locally with requests
1646 0                     raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1647                 else:  # local path
1648 0                     with open(image_dict["location"]) as fimage:
1649 0                         self.glance.images.upload(new_image.id, fimage)
1650                         # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1651                         #  image_dict.get("public","yes")=="yes",
1652                         #    container_format="bare", data=fimage, disk_format=disk_format)
1653
1654 0                 metadata_to_load = image_dict.get("metadata")
1655
1656                 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1657                 #  for openstack
1658 0                 if self.vim_type == "VIO":
1659 0                     metadata_to_load["upload_location"] = image_dict["location"]
1660                 else:
1661 0                     metadata_to_load["location"] = image_dict["location"]
1662
1663 0                 self.glance.images.update(new_image.id, **metadata_to_load)
1664
1665 0                 return new_image.id
1666 0             except (
1667                 nvExceptions.Conflict,
1668                 ksExceptions.ClientException,
1669                 nvExceptions.ClientException,
1670             ) as e:
1671 0                 self._format_exception(e)
1672 0             except (
1673                 HTTPException,
1674                 gl1Exceptions.HTTPException,
1675                 gl1Exceptions.CommunicationError,
1676                 ConnectionError,
1677             ) as e:
1678 0                 if retry == max_retries:
1679 0                     continue
1680
1681 0                 self._format_exception(e)
1682 0             except IOError as e:  # can not open the file
1683 0                 raise vimconn.VimConnConnectionException(
1684                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1685                     http_code=vimconn.HTTP_Bad_Request,
1686                 )
1687
1688 1     def delete_image(self, image_id):
1689         """Deletes a tenant image from openstack VIM. Returns the old id"""
1690 0         try:
1691 0             self._reload_connection()
1692 0             self.glance.images.delete(image_id)
1693
1694 0             return image_id
1695 0         except (
1696             nvExceptions.NotFound,
1697             ksExceptions.ClientException,
1698             nvExceptions.ClientException,
1699             gl1Exceptions.CommunicationError,
1700             gl1Exceptions.HTTPNotFound,
1701             ConnectionError,
1702         ) as e:  # TODO remove
1703 0             self._format_exception(e)
1704
1705 1     def get_image_id_from_path(self, path):
1706         """Get the image id from image path in the VIM database. Returns the image_id"""
1707 0         try:
1708 0             self._reload_connection()
1709 0             images = self.glance.images.list()
1710
1711 0             for image in images:
1712 0                 if image.metadata.get("location") == path:
1713 0                     return image.id
1714
1715 0             raise vimconn.VimConnNotFoundException(
1716                 "image with location '{}' not found".format(path)
1717             )
1718 0         except (
1719             ksExceptions.ClientException,
1720             nvExceptions.ClientException,
1721             gl1Exceptions.CommunicationError,
1722             ConnectionError,
1723         ) as e:
1724 0             self._format_exception(e)
1725
1726 1     def get_image_list(self, filter_dict={}):
1727         """Obtain tenant images from VIM
1728         Filter_dict can be:
1729             id: image id
1730             name: image name
1731             checksum: image checksum
1732         Returns the image list of dictionaries:
1733             [{<the fields at Filter_dict plus some VIM specific>}, ...]
1734             List can be empty
1735         """
1736 0         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1737
1738 0         try:
1739 0             self._reload_connection()
1740             # filter_dict_os = filter_dict.copy()
1741             # First we filter by the available filter fields: name, id. The others are removed.
1742 0             image_list = self.glance.images.list()
1743 0             filtered_list = []
1744
1745 0             for image in image_list:
1746 0                 try:
1747 0                     if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1748 0                         continue
1749
1750 0                     if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1751 0                         continue
1752
1753 0                     if (
1754                         filter_dict.get("checksum")
1755                         and image["checksum"] != filter_dict["checksum"]
1756                     ):
1757 0                         continue
1758
1759 0                     filtered_list.append(image.copy())
1760 0                 except gl1Exceptions.HTTPNotFound:
1761 0                     pass
1762
1763 0             return filtered_list
1764 0         except (
1765             ksExceptions.ClientException,
1766             nvExceptions.ClientException,
1767             gl1Exceptions.CommunicationError,
1768             ConnectionError,
1769         ) as e:
1770 0             self._format_exception(e)
1771
1772 1     def __wait_for_vm(self, vm_id, status):
1773         """wait until vm is in the desired status and return True.
1774         If the VM gets in ERROR status, return false.
1775         If the timeout is reached generate an exception"""
1776 0         elapsed_time = 0
1777 0         while elapsed_time < server_timeout:
1778 0             vm_status = self.nova.servers.get(vm_id).status
1779
1780 0             if vm_status == status:
1781 0                 return True
1782
1783 0             if vm_status == "ERROR":
1784 0                 return False
1785
1786 0             time.sleep(5)
1787 0             elapsed_time += 5
1788
1789         # if we exceeded the timeout rollback
1790 0         if elapsed_time >= server_timeout:
1791 0             raise vimconn.VimConnException(
1792                 "Timeout waiting for instance " + vm_id + " to get " + status,
1793                 http_code=vimconn.HTTP_Request_Timeout,
1794             )
1795
1796 1     def _get_openstack_availablity_zones(self):
1797         """
1798         Get from openstack availability zones available
1799         :return:
1800         """
1801 0         try:
1802 0             openstack_availability_zone = self.nova.availability_zones.list()
1803 0             openstack_availability_zone = [
1804                 str(zone.zoneName)
1805                 for zone in openstack_availability_zone
1806                 if zone.zoneName != "internal"
1807             ]
1808
1809 0             return openstack_availability_zone
1810 0         except Exception:
1811 0             return None
1812
1813 1     def _set_availablity_zones(self):
1814         """
1815         Set vim availablity zone
1816         :return:
1817         """
1818 0         if "availability_zone" in self.config:
1819 0             vim_availability_zones = self.config.get("availability_zone")
1820
1821 0             if isinstance(vim_availability_zones, str):
1822 0                 self.availability_zone = [vim_availability_zones]
1823 0             elif isinstance(vim_availability_zones, list):
1824 0                 self.availability_zone = vim_availability_zones
1825         else:
1826 0             self.availability_zone = self._get_openstack_availablity_zones()
1827
1828 1     def _get_vm_availability_zone(
1829         self, availability_zone_index, availability_zone_list
1830     ):
1831         """
1832         Return thge availability zone to be used by the created VM.
1833         :return: The VIM availability zone to be used or None
1834         """
1835 0         if availability_zone_index is None:
1836 0             if not self.config.get("availability_zone"):
1837 0                 return None
1838 0             elif isinstance(self.config.get("availability_zone"), str):
1839 0                 return self.config["availability_zone"]
1840             else:
1841                 # TODO consider using a different parameter at config for default AV and AV list match
1842 0                 return self.config["availability_zone"][0]
1843
1844 0         vim_availability_zones = self.availability_zone
1845         # check if VIM offer enough availability zones describe in the VNFD
1846 0         if vim_availability_zones and len(availability_zone_list) <= len(
1847             vim_availability_zones
1848         ):
1849             # check if all the names of NFV AV match VIM AV names
1850 0             match_by_index = False
1851 0             for av in availability_zone_list:
1852 0                 if av not in vim_availability_zones:
1853 0                     match_by_index = True
1854 0                     break
1855
1856 0             if match_by_index:
1857 0                 return vim_availability_zones[availability_zone_index]
1858             else:
1859 0                 return availability_zone_list[availability_zone_index]
1860         else:
1861 0             raise vimconn.VimConnConflictException(
1862                 "No enough availability zones at VIM for this deployment"
1863             )
1864
1865 1     def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1866         """Fill up the security_groups in the port_dict.
1867
1868         Args:
1869             net (dict):             Network details
1870             port_dict   (dict):     Port details
1871
1872         """
1873 1         if (
1874             self.config.get("security_groups")
1875             and net.get("port_security") is not False
1876             and not self.config.get("no_port_security_extension")
1877         ):
1878 1             if not self.security_groups_id:
1879 1                 self._get_ids_from_name()
1880
1881 1             port_dict["security_groups"] = self.security_groups_id
1882
1883 1     def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1884         """Fill up the network binding depending on network type in the port_dict.
1885
1886         Args:
1887             net (dict):             Network details
1888             port_dict   (dict):     Port details
1889
1890         """
1891 1         if not net.get("type"):
1892 1             raise vimconn.VimConnException("Type is missing in the network details.")
1893
1894 1         if net["type"] == "virtual":
1895 1             pass
1896
1897         # For VF
1898 1         elif net["type"] == "VF" or net["type"] == "SR-IOV":
1899 1             port_dict["binding:vnic_type"] = "direct"
1900
1901             # VIO specific Changes
1902 1             if self.vim_type == "VIO":
1903                 # Need to create port with port_security_enabled = False and no-security-groups
1904 1                 port_dict["port_security_enabled"] = False
1905 1                 port_dict["provider_security_groups"] = []
1906 1                 port_dict["security_groups"] = []
1907
1908         else:
1909             # For PT PCI-PASSTHROUGH
1910 1             port_dict["binding:vnic_type"] = "direct-physical"
1911
1912 1     @staticmethod
1913 1     def _set_fixed_ip(new_port: dict, net: dict) -> None:
1914         """Set the "ip" parameter in net dictionary.
1915
1916         Args:
1917             new_port    (dict):     New created port
1918             net         (dict):     Network details
1919
1920         """
1921 1         fixed_ips = new_port["port"].get("fixed_ips")
1922
1923 1         if fixed_ips:
1924 1             net["ip"] = fixed_ips[0].get("ip_address")
1925         else:
1926 1             net["ip"] = None
1927
1928 1     @staticmethod
1929 1     def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1930         """Fill up the mac_address and fixed_ips in port_dict.
1931
1932         Args:
1933             net (dict):             Network details
1934             port_dict   (dict):     Port details
1935
1936         """
1937 1         if net.get("mac_address"):
1938 1             port_dict["mac_address"] = net["mac_address"]
1939
1940 1         if net.get("ip_address"):
1941 1             port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1942             # TODO add "subnet_id": <subnet_id>
1943
1944 1     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1945         """Create new port using neutron.
1946
1947         Args:
1948             port_dict   (dict):         Port details
1949             created_items   (dict):     All created items
1950             net (dict):                 Network details
1951
1952         Returns:
1953             new_port    (dict):         New created port
1954
1955         """
1956 1         new_port = self.neutron.create_port({"port": port_dict})
1957 1         created_items["port:" + str(new_port["port"]["id"])] = True
1958 1         net["mac_adress"] = new_port["port"]["mac_address"]
1959 1         net["vim_id"] = new_port["port"]["id"]
1960
1961 1         return new_port
1962
1963 1     def _create_port(
1964         self, net: dict, name: str, created_items: dict
1965     ) -> Tuple[dict, dict]:
1966         """Create port using net details.
1967
1968         Args:
1969             net (dict):                 Network details
1970             name    (str):              Name to be used as network name if net dict does not include name
1971             created_items   (dict):     All created items
1972
1973         Returns:
1974             new_port, port              New created port, port dictionary
1975
1976         """
1977
1978 1         port_dict = {
1979             "network_id": net["net_id"],
1980             "name": net.get("name"),
1981             "admin_state_up": True,
1982         }
1983
1984 1         if not port_dict["name"]:
1985 1             port_dict["name"] = name
1986
1987 1         self._prepare_port_dict_security_groups(net, port_dict)
1988
1989 1         self._prepare_port_dict_binding(net, port_dict)
1990
1991 1         vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1992
1993 1         new_port = self._create_new_port(port_dict, created_items, net)
1994
1995 1         vimconnector._set_fixed_ip(new_port, net)
1996
1997 1         port = {"port-id": new_port["port"]["id"]}
1998
1999 1         if float(self.nova.api_version.get_string()) >= 2.32:
2000 1             port["tag"] = new_port["port"]["name"]
2001
2002 1         return new_port, port
2003
2004 1     def _prepare_network_for_vminstance(
2005         self,
2006         name: str,
2007         net_list: list,
2008         created_items: dict,
2009         net_list_vim: list,
2010         external_network: list,
2011         no_secured_ports: list,
2012     ) -> None:
2013         """Create port and fill up net dictionary for new VM instance creation.
2014
2015         Args:
2016             name    (str):                  Name of network
2017             net_list    (list):             List of networks
2018             created_items   (dict):         All created items belongs to a VM
2019             net_list_vim    (list):         List of ports
2020             external_network    (list):     List of external-networks
2021             no_secured_ports    (list):     Port security disabled ports
2022         """
2023
2024 1         self._reload_connection()
2025
2026 1         for net in net_list:
2027             # Skip non-connected iface
2028 1             if not net.get("net_id"):
2029 1                 continue
2030
2031 1             new_port, port = self._create_port(net, name, created_items)
2032
2033 1             net_list_vim.append(port)
2034
2035 1             if net.get("floating_ip", False):
2036 1                 net["exit_on_floating_ip_error"] = True
2037 1                 external_network.append(net)
2038
2039 1             elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2040 1                 net["exit_on_floating_ip_error"] = False
2041 1                 external_network.append(net)
2042 1                 net["floating_ip"] = self.config.get("use_floating_ip")
2043
2044             # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2045             # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2046 1             if net.get("port_security") is False and not self.config.get(
2047                 "no_port_security_extension"
2048             ):
2049 1                 no_secured_ports.append(
2050                     (
2051                         new_port["port"]["id"],
2052                         net.get("port_security_disable_strategy"),
2053                     )
2054                 )
2055
2056 1     def _prepare_persistent_root_volumes(
2057         self,
2058         name: str,
2059         vm_av_zone: list,
2060         disk: dict,
2061         base_disk_index: int,
2062         block_device_mapping: dict,
2063         existing_vim_volumes: list,
2064         created_items: dict,
2065     ) -> Optional[str]:
2066         """Prepare persistent root volumes for new VM instance.
2067
2068         Args:
2069             name    (str):                      Name of VM instance
2070             vm_av_zone  (list):                 List of availability zones
2071             disk    (dict):                     Disk details
2072             base_disk_index (int):              Disk index
2073             block_device_mapping    (dict):     Block device details
2074             existing_vim_volumes    (list):     Existing disk details
2075             created_items   (dict):             All created items belongs to VM
2076
2077         Returns:
2078             boot_volume_id  (str):              ID of boot volume
2079
2080         """
2081         # Disk may include only vim_volume_id or only vim_id."
2082         # Use existing persistent root volume finding with volume_id or vim_id
2083 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2084
2085 1         if disk.get(key_id):
2086 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2087 1             existing_vim_volumes.append({"id": disk[key_id]})
2088
2089         else:
2090             # Create persistent root volume
2091 1             volume = self.cinder.volumes.create(
2092                 size=disk["size"],
2093                 name=name + "vd" + chr(base_disk_index),
2094                 imageRef=disk["image_id"],
2095                 # Make sure volume is in the same AZ as the VM to be attached to
2096                 availability_zone=vm_av_zone,
2097             )
2098 1             boot_volume_id = volume.id
2099 1             self.update_block_device_mapping(
2100                 volume=volume,
2101                 block_device_mapping=block_device_mapping,
2102                 base_disk_index=base_disk_index,
2103                 disk=disk,
2104                 created_items=created_items,
2105             )
2106
2107 1             return boot_volume_id
2108
2109 1     @staticmethod
2110 1     def update_block_device_mapping(
2111         volume: object,
2112         block_device_mapping: dict,
2113         base_disk_index: int,
2114         disk: dict,
2115         created_items: dict,
2116     ) -> None:
2117         """Add volume information to block device mapping dict.
2118         Args:
2119             volume  (object):                   Created volume object
2120             block_device_mapping    (dict):     Block device details
2121             base_disk_index (int):              Disk index
2122             disk    (dict):                     Disk details
2123             created_items   (dict):             All created items belongs to VM
2124         """
2125 1         if not volume:
2126 1             raise vimconn.VimConnException("Volume is empty.")
2127
2128 1         if not hasattr(volume, "id"):
2129 1             raise vimconn.VimConnException(
2130                 "Created volume is not valid, does not have id attribute."
2131             )
2132
2133 1         volume_txt = "volume:" + str(volume.id)
2134 1         if disk.get("keep"):
2135 1             volume_txt += ":keep"
2136 1         created_items[volume_txt] = True
2137 1         block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2138
2139 1     def _prepare_non_root_persistent_volumes(
2140         self,
2141         name: str,
2142         disk: dict,
2143         vm_av_zone: list,
2144         block_device_mapping: dict,
2145         base_disk_index: int,
2146         existing_vim_volumes: list,
2147         created_items: dict,
2148     ) -> None:
2149         """Prepare persistent volumes for new VM instance.
2150
2151         Args:
2152             name    (str):                      Name of VM instance
2153             disk    (dict):                     Disk details
2154             vm_av_zone  (list):                 List of availability zones
2155             block_device_mapping    (dict):     Block device details
2156             base_disk_index (int):              Disk index
2157             existing_vim_volumes    (list):     Existing disk details
2158             created_items   (dict):             All created items belongs to VM
2159         """
2160         # Non-root persistent volumes
2161         # Disk may include only vim_volume_id or only vim_id."
2162 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2163
2164 1         if disk.get(key_id):
2165             # Use existing persistent volume
2166 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2167 1             existing_vim_volumes.append({"id": disk[key_id]})
2168
2169         else:
2170             # Create persistent volume
2171 1             volume = self.cinder.volumes.create(
2172                 size=disk["size"],
2173                 name=name + "vd" + chr(base_disk_index),
2174                 # Make sure volume is in the same AZ as the VM to be attached to
2175                 availability_zone=vm_av_zone,
2176             )
2177 1             self.update_block_device_mapping(
2178                 volume=volume,
2179                 block_device_mapping=block_device_mapping,
2180                 base_disk_index=base_disk_index,
2181                 disk=disk,
2182                 created_items=created_items,
2183             )
2184
2185 1     def _wait_for_created_volumes_availability(
2186         self, elapsed_time: int, created_items: dict
2187     ) -> Optional[int]:
2188         """Wait till created volumes become available.
2189
2190         Args:
2191             elapsed_time    (int):          Passed time while waiting
2192             created_items   (dict):         All created items belongs to VM
2193
2194         Returns:
2195             elapsed_time    (int):          Time spent while waiting
2196
2197         """
2198
2199 1         while elapsed_time < volume_timeout:
2200 1             for created_item in created_items:
2201 1                 v, volume_id = (
2202                     created_item.split(":")[0],
2203                     created_item.split(":")[1],
2204                 )
2205 1                 if v == "volume":
2206 1                     if self.cinder.volumes.get(volume_id).status != "available":
2207 1                         break
2208             else:
2209                 # All ready: break from while
2210 1                 break
2211
2212 1             time.sleep(5)
2213 1             elapsed_time += 5
2214
2215 1         return elapsed_time
2216
2217 1     def _wait_for_existing_volumes_availability(
2218         self, elapsed_time: int, existing_vim_volumes: list
2219     ) -> Optional[int]:
2220         """Wait till existing volumes become available.
2221
2222         Args:
2223             elapsed_time    (int):          Passed time while waiting
2224             existing_vim_volumes   (list):  Existing volume details
2225
2226         Returns:
2227             elapsed_time    (int):          Time spent while waiting
2228
2229         """
2230
2231 1         while elapsed_time < volume_timeout:
2232 1             for volume in existing_vim_volumes:
2233 1                 if self.cinder.volumes.get(volume["id"]).status != "available":
2234 1                     break
2235             else:  # all ready: break from while
2236 1                 break
2237
2238 1             time.sleep(5)
2239 1             elapsed_time += 5
2240
2241 1         return elapsed_time
2242
2243 1     def _prepare_disk_for_vminstance(
2244         self,
2245         name: str,
2246         existing_vim_volumes: list,
2247         created_items: dict,
2248         vm_av_zone: list,
2249         block_device_mapping: dict,
2250         disk_list: list = None,
2251     ) -> None:
2252         """Prepare all volumes for new VM instance.
2253
2254         Args:
2255             name    (str):                      Name of Instance
2256             existing_vim_volumes    (list):     List of existing volumes
2257             created_items   (dict):             All created items belongs to VM
2258             vm_av_zone  (list):                 VM availability zone
2259             block_device_mapping (dict):        Block devices to be attached to VM
2260             disk_list   (list):                 List of disks
2261
2262         """
2263         # Create additional volumes in case these are present in disk_list
2264 1         base_disk_index = ord("b")
2265 1         boot_volume_id = None
2266 1         elapsed_time = 0
2267
2268 1         for disk in disk_list:
2269 1             if "image_id" in disk:
2270                 # Root persistent volume
2271 1                 base_disk_index = ord("a")
2272 1                 boot_volume_id = self._prepare_persistent_root_volumes(
2273                     name=name,
2274                     vm_av_zone=vm_av_zone,
2275                     disk=disk,
2276                     base_disk_index=base_disk_index,
2277                     block_device_mapping=block_device_mapping,
2278                     existing_vim_volumes=existing_vim_volumes,
2279                     created_items=created_items,
2280                 )
2281             else:
2282                 # Non-root persistent volume
2283 1                 self._prepare_non_root_persistent_volumes(
2284                     name=name,
2285                     disk=disk,
2286                     vm_av_zone=vm_av_zone,
2287                     block_device_mapping=block_device_mapping,
2288                     base_disk_index=base_disk_index,
2289                     existing_vim_volumes=existing_vim_volumes,
2290                     created_items=created_items,
2291                 )
2292 1             base_disk_index += 1
2293
2294         # Wait until created volumes are with status available
2295 1         elapsed_time = self._wait_for_created_volumes_availability(
2296             elapsed_time, created_items
2297         )
2298         # Wait until existing volumes in vim are with status available
2299 1         elapsed_time = self._wait_for_existing_volumes_availability(
2300             elapsed_time, existing_vim_volumes
2301         )
2302         # If we exceeded the timeout rollback
2303 1         if elapsed_time >= volume_timeout:
2304 1             raise vimconn.VimConnException(
2305                 "Timeout creating volumes for instance " + name,
2306                 http_code=vimconn.HTTP_Request_Timeout,
2307             )
2308 1         if boot_volume_id:
2309 1             self.cinder.volumes.set_bootable(boot_volume_id, True)
2310
2311 1     def _find_the_external_network_for_floating_ip(self):
2312         """Get the external network ip in order to create floating IP.
2313
2314         Returns:
2315             pool_id (str):      External network pool ID
2316
2317         """
2318
2319         # Find the external network
2320 1         external_nets = list()
2321
2322 1         for net in self.neutron.list_networks()["networks"]:
2323 1             if net["router:external"]:
2324 1                 external_nets.append(net)
2325
2326 1         if len(external_nets) == 0:
2327 1             raise vimconn.VimConnException(
2328                 "Cannot create floating_ip automatically since "
2329                 "no external network is present",
2330                 http_code=vimconn.HTTP_Conflict,
2331             )
2332
2333 1         if len(external_nets) > 1:
2334 1             raise vimconn.VimConnException(
2335                 "Cannot create floating_ip automatically since "
2336                 "multiple external networks are present",
2337                 http_code=vimconn.HTTP_Conflict,
2338             )
2339
2340         # Pool ID
2341 1         return external_nets[0].get("id")
2342
2343 1     def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2344         """Trigger neutron to create a new floating IP using external network ID.
2345
2346         Args:
2347             param   (dict):             Input parameters to create a floating IP
2348             created_items   (dict):     All created items belongs to new VM instance
2349
2350         Raises:
2351
2352             VimConnException
2353         """
2354 1         try:
2355 1             self.logger.debug("Creating floating IP")
2356 1             new_floating_ip = self.neutron.create_floatingip(param)
2357 1             free_floating_ip = new_floating_ip["floatingip"]["id"]
2358 1             created_items["floating_ip:" + str(free_floating_ip)] = True
2359
2360 1         except Exception as e:
2361 1             raise vimconn.VimConnException(
2362                 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2363                 http_code=vimconn.HTTP_Conflict,
2364             )
2365
2366 1     def _create_floating_ip(
2367         self, floating_network: dict, server: object, created_items: dict
2368     ) -> None:
2369         """Get the available Pool ID and create a new floating IP.
2370
2371         Args:
2372             floating_network    (dict):         Dict including external network ID
2373             server   (object):                  Server object
2374             created_items   (dict):             All created items belongs to new VM instance
2375
2376         """
2377
2378         # Pool_id is available
2379 1         if (
2380             isinstance(floating_network["floating_ip"], str)
2381             and floating_network["floating_ip"].lower() != "true"
2382         ):
2383 1             pool_id = floating_network["floating_ip"]
2384
2385         # Find the Pool_id
2386         else:
2387 1             pool_id = self._find_the_external_network_for_floating_ip()
2388
2389 1         param = {
2390             "floatingip": {
2391                 "floating_network_id": pool_id,
2392                 "tenant_id": server.tenant_id,
2393             }
2394         }
2395
2396 1         self._neutron_create_float_ip(param, created_items)
2397
2398 1     def _find_floating_ip(
2399         self,
2400         server: object,
2401         floating_ips: list,
2402         floating_network: dict,
2403     ) -> Optional[str]:
2404         """Find the available free floating IPs if there are.
2405
2406         Args:
2407             server  (object):                   Server object
2408             floating_ips    (list):             List of floating IPs
2409             floating_network    (dict):         Details of floating network such as ID
2410
2411         Returns:
2412             free_floating_ip    (str):          Free floating ip address
2413
2414         """
2415 1         for fip in floating_ips:
2416 1             if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2417 1                 continue
2418
2419 1             if isinstance(floating_network["floating_ip"], str):
2420 1                 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2421 0                     continue
2422
2423 1             return fip["id"]
2424
2425 1     def _assign_floating_ip(
2426         self, free_floating_ip: str, floating_network: dict
2427     ) -> Dict:
2428         """Assign the free floating ip address to port.
2429
2430         Args:
2431             free_floating_ip    (str):          Floating IP to be assigned
2432             floating_network    (dict):         ID of floating network
2433
2434         Returns:
2435             fip (dict)          (dict):         Floating ip details
2436
2437         """
2438         # The vim_id key contains the neutron.port_id
2439 1         self.neutron.update_floatingip(
2440             free_floating_ip,
2441             {"floatingip": {"port_id": floating_network["vim_id"]}},
2442         )
2443         # For race condition ensure not re-assigned to other VM after 5 seconds
2444 1         time.sleep(5)
2445
2446 1         return self.neutron.show_floatingip(free_floating_ip)
2447
2448 1     def _get_free_floating_ip(
2449         self, server: object, floating_network: dict
2450     ) -> Optional[str]:
2451         """Get the free floating IP address.
2452
2453         Args:
2454             server  (object):               Server Object
2455             floating_network    (dict):     Floating network details
2456
2457         Returns:
2458             free_floating_ip    (str):      Free floating ip addr
2459
2460         """
2461
2462 1         floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2463
2464         # Randomize
2465 1         random.shuffle(floating_ips)
2466
2467 1         return self._find_floating_ip(server, floating_ips, floating_network)
2468
2469 1     def _prepare_external_network_for_vminstance(
2470         self,
2471         external_network: list,
2472         server: object,
2473         created_items: dict,
2474         vm_start_time: float,
2475     ) -> None:
2476         """Assign floating IP address for VM instance.
2477
2478         Args:
2479             external_network    (list):         ID of External network
2480             server  (object):                   Server Object
2481             created_items   (dict):             All created items belongs to new VM instance
2482             vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
2483
2484         Raises:
2485             VimConnException
2486
2487         """
2488 1         for floating_network in external_network:
2489 1             try:
2490 1                 assigned = False
2491 1                 floating_ip_retries = 3
2492                 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2493                 # several times
2494 1                 while not assigned:
2495 1                     free_floating_ip = self._get_free_floating_ip(
2496                         server, floating_network
2497                     )
2498
2499 1                     if not free_floating_ip:
2500 1                         self._create_floating_ip(
2501                             floating_network, server, created_items
2502                         )
2503
2504 1                     try:
2505                         # For race condition ensure not already assigned
2506 1                         fip = self.neutron.show_floatingip(free_floating_ip)
2507
2508 1                         if fip["floatingip"].get("port_id"):
2509 1                             continue
2510
2511                         # Assign floating ip
2512 1                         fip = self._assign_floating_ip(
2513                             free_floating_ip, floating_network
2514                         )
2515
2516 1                         if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2517 1                             self.logger.warning(
2518                                 "floating_ip {} re-assigned to other port".format(
2519                                     free_floating_ip
2520                                 )
2521                             )
2522 1                             continue
2523
2524 1                         self.logger.debug(
2525                             "Assigned floating_ip {} to VM {}".format(
2526                                 free_floating_ip, server.id
2527                             )
2528                         )
2529
2530 1                         assigned = True
2531
2532 1                     except Exception as e:
2533                         # Openstack need some time after VM creation to assign an IP. So retry if fails
2534 1                         vm_status = self.nova.servers.get(server.id).status
2535
2536 1                         if vm_status not in ("ACTIVE", "ERROR"):
2537 1                             if time.time() - vm_start_time < server_timeout:
2538 1                                 time.sleep(5)
2539 1                                 continue
2540 1                         elif floating_ip_retries > 0:
2541 1                             floating_ip_retries -= 1
2542 1                             continue
2543
2544 1                         raise vimconn.VimConnException(
2545                             "Cannot create floating_ip: {} {}".format(
2546                                 type(e).__name__, e
2547                             ),
2548                             http_code=vimconn.HTTP_Conflict,
2549                         )
2550
2551 1             except Exception as e:
2552 1                 if not floating_network["exit_on_floating_ip_error"]:
2553 1                     self.logger.error("Cannot create floating_ip. %s", str(e))
2554 1                     continue
2555
2556 1                 raise
2557
2558 1     def _update_port_security_for_vminstance(
2559         self,
2560         no_secured_ports: list,
2561         server: object,
2562     ) -> None:
2563         """Updates the port security according to no_secured_ports list.
2564
2565         Args:
2566             no_secured_ports    (list):     List of ports that security will be disabled
2567             server  (object):               Server Object
2568
2569         Raises:
2570             VimConnException
2571
2572         """
2573         # Wait until the VM is active and then disable the port-security
2574 1         if no_secured_ports:
2575 1             self.__wait_for_vm(server.id, "ACTIVE")
2576
2577 1         for port in no_secured_ports:
2578 1             port_update = {
2579                 "port": {"port_security_enabled": False, "security_groups": None}
2580             }
2581
2582 1             if port[1] == "allow-address-pairs":
2583 1                 port_update = {
2584                     "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2585                 }
2586
2587 1             try:
2588 1                 self.neutron.update_port(port[0], port_update)
2589
2590 1             except Exception:
2591 1                 raise vimconn.VimConnException(
2592                     "It was not possible to disable port security for port {}".format(
2593                         port[0]
2594                     )
2595                 )
2596
2597 1     def new_vminstance(
2598         self,
2599         name: str,
2600         description: str,
2601         start: bool,
2602         image_id: str,
2603         flavor_id: str,
2604         affinity_group_list: list,
2605         net_list: list,
2606         cloud_config=None,
2607         disk_list=None,
2608         availability_zone_index=None,
2609         availability_zone_list=None,
2610     ) -> tuple:
2611         """Adds a VM instance to VIM.
2612
2613         Args:
2614             name    (str):          name of VM
2615             description (str):      description
2616             start   (bool):         indicates if VM must start or boot in pause mode. Ignored
2617             image_id    (str)       image uuid
2618             flavor_id   (str)       flavor uuid
2619             affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
2620             net_list    (list):         list of interfaces, each one is a dictionary with:
2621                 name:   name of network
2622                 net_id:     network uuid to connect
2623                 vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
2624                 model:  interface model, ignored #TODO
2625                 mac_address:    used for  SR-IOV ifaces #TODO for other types
2626                 use:    'data', 'bridge',  'mgmt'
2627                 type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2628                 vim_id:     filled/added by this function
2629                 floating_ip:    True/False (or it can be None)
2630                 port_security:  True/False
2631             cloud_config    (dict): (optional) dictionary with:
2632                 key-pairs:      (optional) list of strings with the public key to be inserted to the default user
2633                 users:      (optional) list of users to be inserted, each item is a dict with:
2634                     name:   (mandatory) user name,
2635                     key-pairs: (optional) list of strings with the public key to be inserted to the user
2636                 user-data:  (optional) string is a text script to be passed directly to cloud-init
2637                 config-files:   (optional). List of files to be transferred. Each item is a dict with:
2638                     dest:   (mandatory) string with the destination absolute path
2639                     encoding:   (optional, by default text). Can be one of:
2640                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2641                     content :    (mandatory) string with the content of the file
2642                     permissions:    (optional) string with file permissions, typically octal notation '0644'
2643                     owner:  (optional) file owner, string with the format 'owner:group'
2644                 boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
2645             disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
2646                 image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2647                 size:   (mandatory) string with the size of the disk in GB
2648                 vim_id:  (optional) should use this existing volume id
2649             availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
2650             availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
2651                 availability_zone_index is None
2652                 #TODO ip, security groups
2653
2654         Returns:
2655             A tuple with the instance identifier and created_items or raises an exception on error
2656             created_items can be None or a dictionary where this method can include key-values that will be passed to
2657             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2658             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2659             as not present.
2660
2661         """
2662 1         self.logger.debug(
2663             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2664             image_id,
2665             flavor_id,
2666             str(net_list),
2667         )
2668
2669 1         try:
2670 1             server = None
2671 1             created_items = {}
2672 1             net_list_vim = []
2673             # list of external networks to be connected to instance, later on used to create floating_ip
2674 1             external_network = []
2675             # List of ports with port-security disabled
2676 1             no_secured_ports = []
2677 1             block_device_mapping = {}
2678 1             existing_vim_volumes = []
2679 1             server_group_id = None
2680 1             scheduller_hints = {}
2681
2682             # Check the Openstack Connection
2683 1             self._reload_connection()
2684
2685             # Prepare network list
2686 1             self._prepare_network_for_vminstance(
2687                 name=name,
2688                 net_list=net_list,
2689                 created_items=created_items,
2690                 net_list_vim=net_list_vim,
2691                 external_network=external_network,
2692                 no_secured_ports=no_secured_ports,
2693             )
2694
2695             # Cloud config
2696 1             config_drive, userdata = self._create_user_data(cloud_config)
2697
2698             # Get availability Zone
2699 1             vm_av_zone = self._get_vm_availability_zone(
2700                 availability_zone_index, availability_zone_list
2701             )
2702
2703 1             if disk_list:
2704                 # Prepare disks
2705 1                 self._prepare_disk_for_vminstance(
2706                     name=name,
2707                     existing_vim_volumes=existing_vim_volumes,
2708                     created_items=created_items,
2709                     vm_av_zone=vm_av_zone,
2710                     block_device_mapping=block_device_mapping,
2711                     disk_list=disk_list,
2712                 )
2713
2714 1             if affinity_group_list:
2715                 # Only first id on the list will be used. Openstack restriction
2716 1                 server_group_id = affinity_group_list[0]["affinity_group_id"]
2717 1                 scheduller_hints["group"] = server_group_id
2718
2719 1             self.logger.debug(
2720                 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2721                 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2722                 "block_device_mapping={}, server_group={})".format(
2723                     name,
2724                     image_id,
2725                     flavor_id,
2726                     net_list_vim,
2727                     self.config.get("security_groups"),
2728                     vm_av_zone,
2729                     self.config.get("keypair"),
2730                     userdata,
2731                     config_drive,
2732                     block_device_mapping,
2733                     server_group_id,
2734                 )
2735             )
2736
2737             # Create VM
2738 1             server = self.nova.servers.create(
2739                 name=name,
2740                 image=image_id,
2741                 flavor=flavor_id,
2742                 nics=net_list_vim,
2743                 security_groups=self.config.get("security_groups"),
2744                 # TODO remove security_groups in future versions. Already at neutron port
2745                 availability_zone=vm_av_zone,
2746                 key_name=self.config.get("keypair"),
2747                 userdata=userdata,
2748                 config_drive=config_drive,
2749                 block_device_mapping=block_device_mapping,
2750                 scheduler_hints=scheduller_hints,
2751             )
2752
2753 1             vm_start_time = time.time()
2754
2755 1             self._update_port_security_for_vminstance(no_secured_ports, server)
2756
2757 1             self._prepare_external_network_for_vminstance(
2758                 external_network=external_network,
2759                 server=server,
2760                 created_items=created_items,
2761                 vm_start_time=vm_start_time,
2762             )
2763
2764 1             return server.id, created_items
2765
2766 1         except Exception as e:
2767 1             server_id = None
2768 1             if server:
2769 1                 server_id = server.id
2770
2771 1             try:
2772 1                 created_items = self.remove_keep_tag_from_persistent_volumes(
2773                     created_items
2774                 )
2775
2776 1                 self.delete_vminstance(server_id, created_items)
2777
2778 0             except Exception as e2:
2779 0                 self.logger.error("new_vminstance rollback fail {}".format(e2))
2780
2781 1             self._format_exception(e)
2782
2783 1     @staticmethod
2784 1     def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2785         """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2786
2787         Args:
2788             created_items (dict):       All created items belongs to VM
2789
2790         Returns:
2791             updated_created_items   (dict):     Dict which does not include keep flag for volumes.
2792
2793         """
2794 1         return {
2795             key.replace(":keep", ""): value for (key, value) in created_items.items()
2796         }
2797
2798 1     def get_vminstance(self, vm_id):
2799         """Returns the VM instance information from VIM"""
2800         # self.logger.debug("Getting VM from VIM")
2801 0         try:
2802 0             self._reload_connection()
2803 0             server = self.nova.servers.find(id=vm_id)
2804             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2805
2806 0             return server.to_dict()
2807 0         except (
2808             ksExceptions.ClientException,
2809             nvExceptions.ClientException,
2810             nvExceptions.NotFound,
2811             ConnectionError,
2812         ) as e:
2813 0             self._format_exception(e)
2814
2815 1     def get_vminstance_console(self, vm_id, console_type="vnc"):
2816         """
2817         Get a console for the virtual machine
2818         Params:
2819             vm_id: uuid of the VM
2820             console_type, can be:
2821                 "novnc" (by default), "xvpvnc" for VNC types,
2822                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2823         Returns dict with the console parameters:
2824                 protocol: ssh, ftp, http, https, ...
2825                 server:   usually ip address
2826                 port:     the http, ssh, ... port
2827                 suffix:   extra text, e.g. the http path and query string
2828         """
2829 0         self.logger.debug("Getting VM CONSOLE from VIM")
2830
2831 0         try:
2832 0             self._reload_connection()
2833 0             server = self.nova.servers.find(id=vm_id)
2834
2835 0             if console_type is None or console_type == "novnc":
2836 0                 console_dict = server.get_vnc_console("novnc")
2837 0             elif console_type == "xvpvnc":
2838 0                 console_dict = server.get_vnc_console(console_type)
2839 0             elif console_type == "rdp-html5":
2840 0                 console_dict = server.get_rdp_console(console_type)
2841 0             elif console_type == "spice-html5":
2842 0                 console_dict = server.get_spice_console(console_type)
2843             else:
2844 0                 raise vimconn.VimConnException(
2845                     "console type '{}' not allowed".format(console_type),
2846                     http_code=vimconn.HTTP_Bad_Request,
2847                 )
2848
2849 0             console_dict1 = console_dict.get("console")
2850
2851 0             if console_dict1:
2852 0                 console_url = console_dict1.get("url")
2853
2854 0                 if console_url:
2855                     # parse console_url
2856 0                     protocol_index = console_url.find("//")
2857 0                     suffix_index = (
2858                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2859                     )
2860 0                     port_index = (
2861                         console_url[protocol_index + 2 : suffix_index].find(":")
2862                         + protocol_index
2863                         + 2
2864                     )
2865
2866 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2867 0                         return (
2868                             -vimconn.HTTP_Internal_Server_Error,
2869                             "Unexpected response from VIM",
2870                         )
2871
2872 0                     console_dict = {
2873                         "protocol": console_url[0:protocol_index],
2874                         "server": console_url[protocol_index + 2 : port_index],
2875                         "port": console_url[port_index:suffix_index],
2876                         "suffix": console_url[suffix_index + 1 :],
2877                     }
2878 0                     protocol_index += 2
2879
2880 0                     return console_dict
2881 0             raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2882 0         except (
2883             nvExceptions.NotFound,
2884             ksExceptions.ClientException,
2885             nvExceptions.ClientException,
2886             nvExceptions.BadRequest,
2887             ConnectionError,
2888         ) as e:
2889 0             self._format_exception(e)
2890
2891 1     def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2892         """Neutron delete ports by id.
2893         Args:
2894             k_id    (str):      Port id in the VIM
2895         """
2896 1         try:
2897 1             port_dict = self.neutron.list_ports()
2898 1             existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2899
2900 1             if k_id in existing_ports:
2901 1                 self.neutron.delete_port(k_id)
2902
2903 1         except Exception as e:
2904 1             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2905
2906 1     def _delete_volumes_by_id_wth_cinder(
2907         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2908     ) -> bool:
2909         """Cinder delete volume by id.
2910         Args:
2911             k   (str):                      Full item name in created_items
2912             k_id    (str):                  ID of floating ip in VIM
2913             volumes_to_hold (list):          Volumes not to delete
2914             created_items   (dict):         All created items belongs to VM
2915         """
2916 1         try:
2917 1             if k_id in volumes_to_hold:
2918 1                 return
2919
2920 1             if self.cinder.volumes.get(k_id).status != "available":
2921 1                 return True
2922
2923             else:
2924 1                 self.cinder.volumes.delete(k_id)
2925 1                 created_items[k] = None
2926
2927 1         except Exception as e:
2928 1             self.logger.error(
2929                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2930             )
2931
2932 1     def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2933         """Neutron delete floating ip by id.
2934         Args:
2935             k   (str):                      Full item name in created_items
2936             k_id    (str):                  ID of floating ip in VIM
2937             created_items   (dict):         All created items belongs to VM
2938         """
2939 1         try:
2940 1             self.neutron.delete_floatingip(k_id)
2941 1             created_items[k] = None
2942
2943 1         except Exception as e:
2944 1             self.logger.error(
2945                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2946             )
2947
2948 1     @staticmethod
2949 1     def _get_item_name_id(k: str) -> Tuple[str, str]:
2950 1         k_item, _, k_id = k.partition(":")
2951 1         return k_item, k_id
2952
2953 1     def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2954         """Delete VM ports attached to the networks before deleting virtual machine.
2955         Args:
2956             created_items   (dict):     All created items belongs to VM
2957         """
2958
2959 1         for k, v in created_items.items():
2960 1             if not v:  # skip already deleted
2961 1                 continue
2962
2963 1             try:
2964 1                 k_item, k_id = self._get_item_name_id(k)
2965 1                 if k_item == "port":
2966 1                     self._delete_ports_by_id_wth_neutron(k_id)
2967
2968 1             except Exception as e:
2969 1                 self.logger.error(
2970                     "Error deleting port: {}: {}".format(type(e).__name__, e)
2971                 )
2972
2973 1     def _delete_created_items(
2974         self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2975     ) -> bool:
2976         """Delete Volumes and floating ip if they exist in created_items."""
2977 1         for k, v in created_items.items():
2978 1             if not v:  # skip already deleted
2979 1                 continue
2980
2981 1             try:
2982 1                 k_item, k_id = self._get_item_name_id(k)
2983
2984 1                 if k_item == "volume":
2985 1                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2986                         k, k_id, volumes_to_hold, created_items
2987                     )
2988
2989 1                     if unavailable_vol:
2990 1                         keep_waiting = True
2991
2992 1                 elif k_item == "floating_ip":
2993 1                     self._delete_floating_ip_by_id(k, k_id, created_items)
2994
2995 1             except Exception as e:
2996 1                 self.logger.error("Error deleting {}: {}".format(k, e))
2997
2998 1         return keep_waiting
2999
3000 1     @staticmethod
3001 1     def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3002         """Remove the volumes which has key flag from created_items
3003
3004         Args:
3005             created_items   (dict):         All created items belongs to VM
3006
3007         Returns:
3008             created_items   (dict):         Persistent volumes eliminated created_items
3009         """
3010 1         return {
3011             key: value
3012             for (key, value) in created_items.items()
3013             if len(key.split(":")) == 2
3014         }
3015
3016 1     def delete_vminstance(
3017         self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3018     ) -> None:
3019         """Removes a VM instance from VIM. Returns the old identifier.
3020         Args:
3021             vm_id   (str):              Identifier of VM instance
3022             created_items   (dict):     All created items belongs to VM
3023             volumes_to_hold (list):     Volumes_to_hold
3024         """
3025 1         if created_items is None:
3026 1             created_items = {}
3027 1         if volumes_to_hold is None:
3028 1             volumes_to_hold = []
3029
3030 1         try:
3031 1             created_items = self._extract_items_wth_keep_flag_from_created_items(
3032                 created_items
3033             )
3034
3035 1             self._reload_connection()
3036
3037             # Delete VM ports attached to the networks before the virtual machine
3038 1             if created_items:
3039 1                 self._delete_vm_ports_attached_to_network(created_items)
3040
3041 1             if vm_id:
3042 1                 self.nova.servers.delete(vm_id)
3043
3044             # Although having detached, volumes should have in active status before deleting.
3045             # We ensure in this loop
3046 1             keep_waiting = True
3047 1             elapsed_time = 0
3048
3049 1             while keep_waiting and elapsed_time < volume_timeout:
3050 1                 keep_waiting = False
3051
3052                 # Delete volumes and floating IP.
3053 1                 keep_waiting = self._delete_created_items(
3054                     created_items, volumes_to_hold, keep_waiting
3055                 )
3056
3057 1                 if keep_waiting:
3058 1                     time.sleep(1)
3059 1                     elapsed_time += 1
3060
3061 1         except (
3062             nvExceptions.NotFound,
3063             ksExceptions.ClientException,
3064             nvExceptions.ClientException,
3065             ConnectionError,
3066         ) as e:
3067 0             self._format_exception(e)
3068
3069 1     def refresh_vms_status(self, vm_list):
3070         """Get the status of the virtual machines and their interfaces/ports
3071         Params: the list of VM identifiers
3072         Returns a dictionary with:
3073             vm_id:          #VIM id of this Virtual Machine
3074                 status:     #Mandatory. Text with one of:
3075                             #  DELETED (not found at vim)
3076                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3077                             #  OTHER (Vim reported other status not understood)
3078                             #  ERROR (VIM indicates an ERROR status)
3079                             #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3080                             #  CREATING (on building process), ERROR
3081                             #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3082                             #
3083                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3084                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
3085                 interfaces:
3086                  -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
3087                     mac_address:      #Text format XX:XX:XX:XX:XX:XX
3088                     vim_net_id:       #network id where this interface is connected
3089                     vim_interface_id: #interface/port VIM id
3090                     ip_address:       #null, or text with IPv4, IPv6 address
3091                     compute_node:     #identification of compute node where PF,VF interface is allocated
3092                     pci:              #PCI address of the NIC that hosts the PF,VF
3093                     vlan:             #physical VLAN used for VF
3094         """
3095 0         vm_dict = {}
3096 0         self.logger.debug(
3097             "refresh_vms status: Getting tenant VM instance information from VIM"
3098         )
3099
3100 0         for vm_id in vm_list:
3101 0             vm = {}
3102
3103 0             try:
3104 0                 vm_vim = self.get_vminstance(vm_id)
3105
3106 0                 if vm_vim["status"] in vmStatus2manoFormat:
3107 0                     vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3108                 else:
3109 0                     vm["status"] = "OTHER"
3110 0                     vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3111
3112 0                 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3113 0                 vm_vim.pop("user_data", None)
3114 0                 vm["vim_info"] = self.serialize(vm_vim)
3115
3116 0                 vm["interfaces"] = []
3117 0                 if vm_vim.get("fault"):
3118 0                     vm["error_msg"] = str(vm_vim["fault"])
3119
3120                 # get interfaces
3121 0                 try:
3122 0                     self._reload_connection()
3123 0                     port_dict = self.neutron.list_ports(device_id=vm_id)
3124
3125 0                     for port in port_dict["ports"]:
3126 0                         interface = {}
3127 0                         interface["vim_info"] = self.serialize(port)
3128 0                         interface["mac_address"] = port.get("mac_address")
3129 0                         interface["vim_net_id"] = port["network_id"]
3130 0                         interface["vim_interface_id"] = port["id"]
3131                         # check if OS-EXT-SRV-ATTR:host is there,
3132                         # in case of non-admin credentials, it will be missing
3133
3134 0                         if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3135 0                             interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3136
3137 0                         interface["pci"] = None
3138
3139                         # check if binding:profile is there,
3140                         # in case of non-admin credentials, it will be missing
3141 0                         if port.get("binding:profile"):
3142 0                             if port["binding:profile"].get("pci_slot"):
3143                                 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3144                                 #  the slot to 0x00
3145                                 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3146                                 #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
3147 0                                 pci = port["binding:profile"]["pci_slot"]
3148                                 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3149 0                                 interface["pci"] = pci
3150
3151 0                         interface["vlan"] = None
3152
3153 0                         if port.get("binding:vif_details"):
3154 0                             interface["vlan"] = port["binding:vif_details"].get("vlan")
3155
3156                         # Get vlan from network in case not present in port for those old openstacks and cases where
3157                         # it is needed vlan at PT
3158 0                         if not interface["vlan"]:
3159                             # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3160 0                             network = self.neutron.show_network(port["network_id"])
3161
3162 0                             if (
3163                                 network["network"].get("provider:network_type")
3164                                 == "vlan"
3165                             ):
3166                                 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3167 0                                 interface["vlan"] = network["network"].get(
3168                                     "provider:segmentation_id"
3169                                 )
3170
3171 0                         ips = []
3172                         # look for floating ip address
3173 0                         try:
3174 0                             floating_ip_dict = self.neutron.list_floatingips(
3175                                 port_id=port["id"]
3176                             )
3177
3178 0                             if floating_ip_dict.get("floatingips"):
3179 0                                 ips.append(
3180                                     floating_ip_dict["floatingips"][0].get(
3181                                         "floating_ip_address"
3182                                     )
3183                                 )
3184 0                         except Exception:
3185 0                             pass
3186
3187 0                         for subnet in port["fixed_ips"]:
3188 0                             ips.append(subnet["ip_address"])
3189
3190 0                         interface["ip_address"] = ";".join(ips)
3191 0                         vm["interfaces"].append(interface)
3192 0                 except Exception as e:
3193 0                     self.logger.error(
3194                         "Error getting vm interface information {}: {}".format(
3195                             type(e).__name__, e
3196                         ),
3197                         exc_info=True,
3198                     )
3199 0             except vimconn.VimConnNotFoundException as e:
3200 0                 self.logger.error("Exception getting vm status: %s", str(e))
3201 0                 vm["status"] = "DELETED"
3202 0                 vm["error_msg"] = str(e)
3203 0             except vimconn.VimConnException as e:
3204 0                 self.logger.error("Exception getting vm status: %s", str(e))
3205 0                 vm["status"] = "VIM_ERROR"
3206 0                 vm["error_msg"] = str(e)
3207
3208 0             vm_dict[vm_id] = vm
3209
3210 0         return vm_dict
3211
3212 1     def action_vminstance(self, vm_id, action_dict, created_items={}):
3213         """Send and action over a VM instance from VIM
3214         Returns None or the console dict if the action was successfully sent to the VIM
3215         """
3216 0         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3217
3218 0         try:
3219 0             self._reload_connection()
3220 0             server = self.nova.servers.find(id=vm_id)
3221
3222 0             if "start" in action_dict:
3223 0                 if action_dict["start"] == "rebuild":
3224 0                     server.rebuild()
3225                 else:
3226 0                     if server.status == "PAUSED":
3227 0                         server.unpause()
3228 0                     elif server.status == "SUSPENDED":
3229 0                         server.resume()
3230 0                     elif server.status == "SHUTOFF":
3231 0                         server.start()
3232                     else:
3233 0                         self.logger.debug(
3234                             "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3235                         )
3236 0                         raise vimconn.VimConnException(
3237                             "Cannot 'start' instance while it is in active state",
3238                             http_code=vimconn.HTTP_Bad_Request,
3239                         )
3240
3241 0             elif "pause" in action_dict:
3242 0                 server.pause()
3243 0             elif "resume" in action_dict:
3244 0                 server.resume()
3245 0             elif "shutoff" in action_dict or "shutdown" in action_dict:
3246 0                 self.logger.debug("server status %s", server.status)
3247 0                 if server.status == "ACTIVE":
3248 0                     server.stop()
3249                 else:
3250 0                     self.logger.debug("ERROR: VM is not in Active state")
3251 0                     raise vimconn.VimConnException(
3252                         "VM is not in active state, stop operation is not allowed",
3253                         http_code=vimconn.HTTP_Bad_Request,
3254                     )
3255 0             elif "forceOff" in action_dict:
3256 0                 server.stop()  # TODO
3257 0             elif "terminate" in action_dict:
3258 0                 server.delete()
3259 0             elif "createImage" in action_dict:
3260 0                 server.create_image()
3261                 # "path":path_schema,
3262                 # "description":description_schema,
3263                 # "name":name_schema,
3264                 # "metadata":metadata_schema,
3265                 # "imageRef": id_schema,
3266                 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3267 0             elif "rebuild" in action_dict:
3268 0                 server.rebuild(server.image["id"])
3269 0             elif "reboot" in action_dict:
3270 0                 server.reboot()  # reboot_type="SOFT"
3271 0             elif "console" in action_dict:
3272 0                 console_type = action_dict["console"]
3273
3274 0                 if console_type is None or console_type == "novnc":
3275 0                     console_dict = server.get_vnc_console("novnc")
3276 0                 elif console_type == "xvpvnc":
3277 0                     console_dict = server.get_vnc_console(console_type)
3278 0                 elif console_type == "rdp-html5":
3279 0                     console_dict = server.get_rdp_console(console_type)
3280 0                 elif console_type == "spice-html5":
3281 0                     console_dict = server.get_spice_console(console_type)
3282                 else:
3283 0                     raise vimconn.VimConnException(
3284                         "console type '{}' not allowed".format(console_type),
3285                         http_code=vimconn.HTTP_Bad_Request,
3286                     )
3287
3288 0                 try:
3289 0                     console_url = console_dict["console"]["url"]
3290                     # parse console_url
3291 0                     protocol_index = console_url.find("//")
3292 0                     suffix_index = (
3293                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3294                     )
3295 0                     port_index = (
3296                         console_url[protocol_index + 2 : suffix_index].find(":")
3297                         + protocol_index
3298                         + 2
3299                     )
3300
3301 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3302 0                         raise vimconn.VimConnException(
3303                             "Unexpected response from VIM " + str(console_dict)
3304                         )
3305
3306 0                     console_dict2 = {
3307                         "protocol": console_url[0:protocol_index],
3308                         "server": console_url[protocol_index + 2 : port_index],
3309                         "port": int(console_url[port_index + 1 : suffix_index]),
3310                         "suffix": console_url[suffix_index + 1 :],
3311                     }
3312
3313 0                     return console_dict2
3314 0                 except Exception:
3315 0                     raise vimconn.VimConnException(
3316                         "Unexpected response from VIM " + str(console_dict)
3317                     )
3318
3319 0             return None
3320 0         except (
3321             ksExceptions.ClientException,
3322             nvExceptions.ClientException,
3323             nvExceptions.NotFound,
3324             ConnectionError,
3325         ) as e:
3326 0             self._format_exception(e)
3327         # TODO insert exception vimconn.HTTP_Unauthorized
3328
3329     # ###### VIO Specific Changes #########
3330 1     def _generate_vlanID(self):
3331         """
3332         Method to get unused vlanID
3333             Args:
3334                 None
3335             Returns:
3336                 vlanID
3337         """
3338         # Get used VLAN IDs
3339 0         usedVlanIDs = []
3340 0         networks = self.get_network_list()
3341
3342 0         for net in networks:
3343 0             if net.get("provider:segmentation_id"):
3344 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3345
3346 0         used_vlanIDs = set(usedVlanIDs)
3347
3348         # find unused VLAN ID
3349 0         for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3350 0             try:
3351 0                 start_vlanid, end_vlanid = map(
3352                     int, vlanID_range.replace(" ", "").split("-")
3353                 )
3354
3355 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3356 0                     if vlanID not in used_vlanIDs:
3357 0                         return vlanID
3358 0             except Exception as exp:
3359 0                 raise vimconn.VimConnException(
3360                     "Exception {} occurred while generating VLAN ID.".format(exp)
3361                 )
3362         else:
3363 0             raise vimconn.VimConnConflictException(
3364                 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3365                     self.config.get("dataplane_net_vlan_range")
3366                 )
3367             )
3368
3369 1     def _generate_multisegment_vlanID(self):
3370         """
3371         Method to get unused vlanID
3372         Args:
3373             None
3374         Returns:
3375             vlanID
3376         """
3377         # Get used VLAN IDs
3378 0         usedVlanIDs = []
3379 0         networks = self.get_network_list()
3380 0         for net in networks:
3381 0             if net.get("provider:network_type") == "vlan" and net.get(
3382                 "provider:segmentation_id"
3383             ):
3384 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3385 0             elif net.get("segments"):
3386 0                 for segment in net.get("segments"):
3387 0                     if segment.get("provider:network_type") == "vlan" and segment.get(
3388                         "provider:segmentation_id"
3389                     ):
3390 0                         usedVlanIDs.append(segment.get("provider:segmentation_id"))
3391
3392 0         used_vlanIDs = set(usedVlanIDs)
3393
3394         # find unused VLAN ID
3395 0         for vlanID_range in self.config.get("multisegment_vlan_range"):
3396 0             try:
3397 0                 start_vlanid, end_vlanid = map(
3398                     int, vlanID_range.replace(" ", "").split("-")
3399                 )
3400
3401 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3402 0                     if vlanID not in used_vlanIDs:
3403 0                         return vlanID
3404 0             except Exception as exp:
3405 0                 raise vimconn.VimConnException(
3406                     "Exception {} occurred while generating VLAN ID.".format(exp)
3407                 )
3408         else:
3409 0             raise vimconn.VimConnConflictException(
3410                 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3411                     self.config.get("multisegment_vlan_range")
3412                 )
3413             )
3414
3415 1     def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3416         """
3417         Method to validate user given vlanID ranges
3418             Args:  None
3419             Returns: None
3420         """
3421 0         for vlanID_range in input_vlan_range:
3422 0             vlan_range = vlanID_range.replace(" ", "")
3423             # validate format
3424 0             vlanID_pattern = r"(\d)*-(\d)*$"
3425 0             match_obj = re.match(vlanID_pattern, vlan_range)
3426 0             if not match_obj:
3427 0                 raise vimconn.VimConnConflictException(
3428                     "Invalid VLAN range for {}: {}.You must provide "
3429                     "'{}' in format [start_ID - end_ID].".format(
3430                         text_vlan_range, vlanID_range, text_vlan_range
3431                     )
3432                 )
3433
3434 0             start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3435 0             if start_vlanid <= 0:
3436 0                 raise vimconn.VimConnConflictException(
3437                     "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3438                     "networks valid IDs are 1 to 4094 ".format(
3439                         text_vlan_range, vlanID_range
3440                     )
3441                 )
3442
3443 0             if end_vlanid > 4094:
3444 0                 raise vimconn.VimConnConflictException(
3445                     "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3446                     "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3447                         text_vlan_range, vlanID_range
3448                     )
3449                 )
3450
3451 0             if start_vlanid > end_vlanid:
3452 0                 raise vimconn.VimConnConflictException(
3453                     "Invalid VLAN range for {}: {}. You must provide '{}'"
3454                     " in format start_ID - end_ID and start_ID < end_ID ".format(
3455                         text_vlan_range, vlanID_range, text_vlan_range
3456                     )
3457                 )
3458
3459     # NOT USED FUNCTIONS
3460
3461 1     def new_external_port(self, port_data):
3462         """Adds a external port to VIM
3463         Returns the port identifier"""
3464         # TODO openstack if needed
3465 0         return (
3466             -vimconn.HTTP_Internal_Server_Error,
3467             "osconnector.new_external_port() not implemented",
3468         )
3469
3470 1     def connect_port_network(self, port_id, network_id, admin=False):
3471         """Connects a external port to a network
3472         Returns status code of the VIM response"""
3473         # TODO openstack if needed
3474 0         return (
3475             -vimconn.HTTP_Internal_Server_Error,
3476             "osconnector.connect_port_network() not implemented",
3477         )
3478
3479 1     def new_user(self, user_name, user_passwd, tenant_id=None):
3480         """Adds a new user to openstack VIM
3481         Returns the user identifier"""
3482 0         self.logger.debug("osconnector: Adding a new user to VIM")
3483
3484 0         try:
3485 0             self._reload_connection()
3486 0             user = self.keystone.users.create(
3487                 user_name, password=user_passwd, default_project=tenant_id
3488             )
3489             # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3490
3491 0             return user.id
3492 0         except ksExceptions.ConnectionError as e:
3493 0             error_value = -vimconn.HTTP_Bad_Request
3494 0             error_text = (
3495                 type(e).__name__
3496                 + ": "
3497                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3498             )
3499 0         except ksExceptions.ClientException as e:  # TODO remove
3500 0             error_value = -vimconn.HTTP_Bad_Request
3501 0             error_text = (
3502                 type(e).__name__
3503                 + ": "
3504                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3505             )
3506
3507         # TODO insert exception vimconn.HTTP_Unauthorized
3508         # if reaching here is because an exception
3509 0         self.logger.debug("new_user " + error_text)
3510
3511 0         return error_value, error_text
3512
3513 1     def delete_user(self, user_id):
3514         """Delete a user from openstack VIM
3515         Returns the user identifier"""
3516 0         if self.debug:
3517 0             print("osconnector: Deleting  a  user from VIM")
3518
3519 0         try:
3520 0             self._reload_connection()
3521 0             self.keystone.users.delete(user_id)
3522
3523 0             return 1, user_id
3524 0         except ksExceptions.ConnectionError as e:
3525 0             error_value = -vimconn.HTTP_Bad_Request
3526 0             error_text = (
3527                 type(e).__name__
3528                 + ": "
3529                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3530             )
3531 0         except ksExceptions.NotFound as e:
3532 0             error_value = -vimconn.HTTP_Not_Found
3533 0             error_text = (
3534                 type(e).__name__
3535                 + ": "
3536                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3537             )
3538 0         except ksExceptions.ClientException as e:  # TODO remove
3539 0             error_value = -vimconn.HTTP_Bad_Request
3540 0             error_text = (
3541                 type(e).__name__
3542                 + ": "
3543                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3544             )
3545
3546         # TODO insert exception vimconn.HTTP_Unauthorized
3547         # if reaching here is because an exception
3548 0         self.logger.debug("delete_tenant " + error_text)
3549
3550 0         return error_value, error_text
3551
3552 1     def get_hosts_info(self):
3553         """Get the information of deployed hosts
3554         Returns the hosts content"""
3555 0         if self.debug:
3556 0             print("osconnector: Getting Host info from VIM")
3557
3558 0         try:
3559 0             h_list = []
3560 0             self._reload_connection()
3561 0             hypervisors = self.nova.hypervisors.list()
3562
3563 0             for hype in hypervisors:
3564 0                 h_list.append(hype.to_dict())
3565
3566 0             return 1, {"hosts": h_list}
3567 0         except nvExceptions.NotFound as e:
3568 0             error_value = -vimconn.HTTP_Not_Found
3569 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3570 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3571 0             error_value = -vimconn.HTTP_Bad_Request
3572 0             error_text = (
3573                 type(e).__name__
3574                 + ": "
3575                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3576             )
3577
3578         # TODO insert exception vimconn.HTTP_Unauthorized
3579         # if reaching here is because an exception
3580 0         self.logger.debug("get_hosts_info " + error_text)
3581
3582 0         return error_value, error_text
3583
3584 1     def get_hosts(self, vim_tenant):
3585         """Get the hosts and deployed instances
3586         Returns the hosts content"""
3587 0         r, hype_dict = self.get_hosts_info()
3588
3589 0         if r < 0:
3590 0             return r, hype_dict
3591
3592 0         hypervisors = hype_dict["hosts"]
3593
3594 0         try:
3595 0             servers = self.nova.servers.list()
3596 0             for hype in hypervisors:
3597 0                 for server in servers:
3598 0                     if (
3599                         server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3600                         == hype["hypervisor_hostname"]
3601                     ):
3602 0                         if "vm" in hype:
3603 0                             hype["vm"].append(server.id)
3604                         else:
3605 0                             hype["vm"] = [server.id]
3606
3607 0             return 1, hype_dict
3608 0         except nvExceptions.NotFound as e:
3609 0             error_value = -vimconn.HTTP_Not_Found
3610 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3611 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3612 0             error_value = -vimconn.HTTP_Bad_Request
3613 0             error_text = (
3614                 type(e).__name__
3615                 + ": "
3616                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3617             )
3618
3619         # TODO insert exception vimconn.HTTP_Unauthorized
3620         # if reaching here is because an exception
3621 0         self.logger.debug("get_hosts " + error_text)
3622
3623 0         return error_value, error_text
3624
3625 1     def new_classification(self, name, ctype, definition):
3626 0         self.logger.debug(
3627             "Adding a new (Traffic) Classification to VIM, named %s", name
3628         )
3629
3630 0         try:
3631 0             new_class = None
3632 0             self._reload_connection()
3633
3634 0             if ctype not in supportedClassificationTypes:
3635 0                 raise vimconn.VimConnNotSupportedException(
3636                     "OpenStack VIM connector does not support provided "
3637                     "Classification Type {}, supported ones are: {}".format(
3638                         ctype, supportedClassificationTypes
3639                     )
3640                 )
3641
3642 0             if not self._validate_classification(ctype, definition):
3643 0                 raise vimconn.VimConnException(
3644                     "Incorrect Classification definition for the type specified."
3645                 )
3646
3647 0             classification_dict = definition
3648 0             classification_dict["name"] = name
3649 0             new_class = self.neutron.create_sfc_flow_classifier(
3650                 {"flow_classifier": classification_dict}
3651             )
3652
3653 0             return new_class["flow_classifier"]["id"]
3654 0         except (
3655             neExceptions.ConnectionFailed,
3656             ksExceptions.ClientException,
3657             neExceptions.NeutronException,
3658             ConnectionError,
3659         ) as e:
3660 0             self.logger.error("Creation of Classification failed.")
3661 0             self._format_exception(e)
3662
3663 1     def get_classification(self, class_id):
3664 0         self.logger.debug(" Getting Classification %s from VIM", class_id)
3665 0         filter_dict = {"id": class_id}
3666 0         class_list = self.get_classification_list(filter_dict)
3667
3668 0         if len(class_list) == 0:
3669 0             raise vimconn.VimConnNotFoundException(
3670                 "Classification '{}' not found".format(class_id)
3671             )
3672 0         elif len(class_list) > 1:
3673 0             raise vimconn.VimConnConflictException(
3674                 "Found more than one Classification with this criteria"
3675             )
3676
3677 0         classification = class_list[0]
3678
3679 0         return classification
3680
3681 1     def get_classification_list(self, filter_dict={}):
3682 0         self.logger.debug(
3683             "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3684         )
3685
3686 0         try:
3687 0             filter_dict_os = filter_dict.copy()
3688 0             self._reload_connection()
3689
3690 0             if self.api_version3 and "tenant_id" in filter_dict_os:
3691 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3692
3693 0             classification_dict = self.neutron.list_sfc_flow_classifiers(
3694                 **filter_dict_os
3695             )
3696 0             classification_list = classification_dict["flow_classifiers"]
3697 0             self.__classification_os2mano(classification_list)
3698
3699 0             return classification_list
3700 0         except (
3701             neExceptions.ConnectionFailed,
3702             ksExceptions.ClientException,
3703             neExceptions.NeutronException,
3704             ConnectionError,
3705         ) as e:
3706 0             self._format_exception(e)
3707
3708 1     def delete_classification(self, class_id):
3709 0         self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3710
3711 0         try:
3712 0             self._reload_connection()
3713 0             self.neutron.delete_sfc_flow_classifier(class_id)
3714
3715 0             return class_id
3716 0         except (
3717             neExceptions.ConnectionFailed,
3718             neExceptions.NeutronException,
3719             ksExceptions.ClientException,
3720             neExceptions.NeutronException,
3721             ConnectionError,
3722         ) as e:
3723 0             self._format_exception(e)
3724
3725 1     def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3726 0         self.logger.debug(
3727             "Adding a new Service Function Instance to VIM, named '%s'", name
3728         )
3729
3730 0         try:
3731 0             new_sfi = None
3732 0             self._reload_connection()
3733 0             correlation = None
3734
3735 0             if sfc_encap:
3736 0                 correlation = "nsh"
3737
3738 0             if len(ingress_ports) != 1:
3739 0                 raise vimconn.VimConnNotSupportedException(
3740                     "OpenStack VIM connector can only have 1 ingress port per SFI"
3741                 )
3742
3743 0             if len(egress_ports) != 1:
3744 0                 raise vimconn.VimConnNotSupportedException(
3745                     "OpenStack VIM connector can only have 1 egress port per SFI"
3746                 )
3747
3748 0             sfi_dict = {
3749                 "name": name,
3750                 "ingress": ingress_ports[0],
3751                 "egress": egress_ports[0],
3752                 "service_function_parameters": {"correlation": correlation},
3753             }
3754 0             new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3755
3756 0             return new_sfi["port_pair"]["id"]
3757 0         except (
3758             neExceptions.ConnectionFailed,
3759             ksExceptions.ClientException,
3760             neExceptions.NeutronException,
3761             ConnectionError,
3762         ) as e:
3763 0             if new_sfi:
3764 0                 try:
3765 0                     self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3766 0                 except Exception:
3767 0                     self.logger.error(
3768                         "Creation of Service Function Instance failed, with "
3769                         "subsequent deletion failure as well."
3770                     )
3771
3772 0             self._format_exception(e)
3773
3774 1     def get_sfi(self, sfi_id):
3775 0         self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3776 0         filter_dict = {"id": sfi_id}
3777 0         sfi_list = self.get_sfi_list(filter_dict)
3778
3779 0         if len(sfi_list) == 0:
3780 0             raise vimconn.VimConnNotFoundException(
3781                 "Service Function Instance '{}' not found".format(sfi_id)
3782             )
3783 0         elif len(sfi_list) > 1:
3784 0             raise vimconn.VimConnConflictException(
3785                 "Found more than one Service Function Instance with this criteria"
3786             )
3787
3788 0         sfi = sfi_list[0]
3789
3790 0         return sfi
3791
3792 1     def get_sfi_list(self, filter_dict={}):
3793 0         self.logger.debug(
3794             "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3795         )
3796
3797 0         try:
3798 0             self._reload_connection()
3799 0             filter_dict_os = filter_dict.copy()
3800
3801 0             if self.api_version3 and "tenant_id" in filter_dict_os:
3802 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3803
3804 0             sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3805 0             sfi_list = sfi_dict["port_pairs"]
3806 0             self.__sfi_os2mano(sfi_list)
3807
3808 0             return sfi_list
3809 0         except (
3810             neExceptions.ConnectionFailed,
3811             ksExceptions.ClientException,
3812             neExceptions.NeutronException,
3813             ConnectionError,
3814         ) as e:
3815 0             self._format_exception(e)
3816
3817 1     def delete_sfi(self, sfi_id):
3818 0         self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3819
3820 0         try:
3821 0             self._reload_connection()
3822 0             self.neutron.delete_sfc_port_pair(sfi_id)
3823
3824 0             return sfi_id
3825 0         except (
3826             neExceptions.ConnectionFailed,
3827             neExceptions.NeutronException,
3828             ksExceptions.ClientException,
3829             neExceptions.NeutronException,
3830             ConnectionError,
3831         ) as e:
3832 0             self._format_exception(e)
3833
3834 1     def new_sf(self, name, sfis, sfc_encap=True):
3835 0         self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3836
3837 0         try:
3838 0             new_sf = None
3839 0             self._reload_connection()
3840             # correlation = None
3841             # if sfc_encap:
3842             #     correlation = "nsh"
3843
3844 0             for instance in sfis:
3845 0                 sfi = self.get_sfi(instance)
3846
3847 0                 if sfi.get("sfc_encap") != sfc_encap:
3848 0                     raise vimconn.VimConnNotSupportedException(
3849                         "OpenStack VIM connector requires all SFIs of the "
3850                         "same SF to share the same SFC Encapsulation"
3851                     )
3852
3853 0             sf_dict = {"name": name, "port_pairs": sfis}
3854 0             new_sf = self.neutron.create_sfc_port_pair_group(
3855                 {"port_pair_group": sf_dict}
3856             )
3857
3858 0             return new_sf["port_pair_group"]["id"]
3859 0         except (
3860             neExceptions.ConnectionFailed,
3861             ksExceptions.ClientException,
3862             neExceptions.NeutronException,
3863             ConnectionError,
3864         ) as e:
3865 0             if new_sf:
3866 0                 try:
3867 0                     self.neutron.delete_sfc_port_pair_group(
3868                         new_sf["port_pair_group"]["id"]
3869                     )
3870 0                 except Exception:
3871 0                     self.logger.error(
3872                         "Creation of Service Function failed, with "
3873                         "subsequent deletion failure as well."
3874                     )
3875
3876 0             self._format_exception(e)
3877
3878 1     def get_sf(self, sf_id):
3879 0         self.logger.debug("Getting Service Function %s from VIM", sf_id)
3880 0         filter_dict = {"id": sf_id}
3881 0         sf_list = self.get_sf_list(filter_dict)
3882
3883 0         if len(sf_list) == 0:
3884 0             raise vimconn.VimConnNotFoundException(
3885                 "Service Function '{}' not found".format(sf_id)
3886             )
3887 0         elif len(sf_list) > 1:
3888 0             raise vimconn.VimConnConflictException(
3889                 "Found more than one Service Function with this criteria"
3890             )
3891
3892 0         sf = sf_list[0]
3893
3894 0         return sf
3895
3896 1     def get_sf_list(self, filter_dict={}):
3897 0         self.logger.debug(
3898             "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3899         )
3900
3901 0         try:
3902 0             self._reload_connection()
3903 0             filter_dict_os = filter_dict.copy()
3904
3905 0             if self.api_version3 and "tenant_id" in filter_dict_os:
3906 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3907
3908 0             sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3909 0             sf_list = sf_dict["port_pair_groups"]
3910 0             self.__sf_os2mano(sf_list)
3911
3912 0             return sf_list
3913 0         except (
3914             neExceptions.ConnectionFailed,
3915             ksExceptions.ClientException,
3916             neExceptions.NeutronException,
3917             ConnectionError,
3918         ) as e:
3919 0             self._format_exception(e)
3920
3921 1     def delete_sf(self, sf_id):
3922 0         self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3923
3924 0         try:
3925 0             self._reload_connection()
3926 0             self.neutron.delete_sfc_port_pair_group(sf_id)
3927
3928 0             return sf_id
3929 0         except (
3930             neExceptions.ConnectionFailed,
3931             neExceptions.NeutronException,
3932             ksExceptions.ClientException,
3933             neExceptions.NeutronException,
3934             ConnectionError,
3935         ) as e:
3936 0             self._format_exception(e)
3937
3938 1     def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3939 0         self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3940
3941 0         try:
3942 0             new_sfp = None
3943 0             self._reload_connection()
3944             # In networking-sfc the MPLS encapsulation is legacy
3945             # should be used when no full SFC Encapsulation is intended
3946 0             correlation = "mpls"
3947
3948 0             if sfc_encap:
3949 0                 correlation = "nsh"
3950
3951 0             sfp_dict = {
3952                 "name": name,
3953                 "flow_classifiers": classifications,
3954                 "port_pair_groups": sfs,
3955                 "chain_parameters": {"correlation": correlation},
3956             }
3957
3958 0             if spi:
3959 0                 sfp_dict["chain_id"] = spi
3960
3961 0             new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3962
3963 0             return new_sfp["port_chain"]["id"]
3964 0         except (
3965             neExceptions.ConnectionFailed,
3966             ksExceptions.ClientException,
3967             neExceptions.NeutronException,
3968             ConnectionError,
3969         ) as e:
3970 0             if new_sfp:
3971 0                 try:
3972 0                     self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3973 0                 except Exception:
3974 0                     self.logger.error(
3975                         "Creation of Service Function Path failed, with "
3976                         "subsequent deletion failure as well."
3977                     )
3978
3979 0             self._format_exception(e)
3980
3981 1     def get_sfp(self, sfp_id):
3982 0         self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3983
3984 0         filter_dict = {"id": sfp_id}
3985 0         sfp_list = self.get_sfp_list(filter_dict)
3986
3987 0         if len(sfp_list) == 0:
3988 0             raise vimconn.VimConnNotFoundException(
3989                 "Service Function Path '{}' not found".format(sfp_id)
3990             )
3991 0         elif len(sfp_list) > 1:
3992 0             raise vimconn.VimConnConflictException(
3993                 "Found more than one Service Function Path with this criteria"
3994             )
3995
3996 0         sfp = sfp_list[0]
3997
3998 0         return sfp
3999
4000 1     def get_sfp_list(self, filter_dict={}):
4001 0         self.logger.debug(
4002             "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
4003         )
4004
4005 0         try:
4006 0             self._reload_connection()
4007 0             filter_dict_os = filter_dict.copy()
4008
4009 0             if self.api_version3 and "tenant_id" in filter_dict_os:
4010 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
4011
4012 0             sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
4013 0             sfp_list = sfp_dict["port_chains"]
4014 0             self.__sfp_os2mano(sfp_list)
4015
4016 0             return sfp_list
4017 0         except (
4018             neExceptions.ConnectionFailed,
4019             ksExceptions.ClientException,
4020             neExceptions.NeutronException,
4021             ConnectionError,
4022         ) as e:
4023 0             self._format_exception(e)
4024
4025 1     def delete_sfp(self, sfp_id):
4026 0         self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
4027
4028 0         try:
4029 0             self._reload_connection()
4030 0             self.neutron.delete_sfc_port_chain(sfp_id)
4031
4032 0             return sfp_id
4033 0         except (
4034             neExceptions.ConnectionFailed,
4035             neExceptions.NeutronException,
4036             ksExceptions.ClientException,
4037             neExceptions.NeutronException,
4038             ConnectionError,
4039         ) as e:
4040 0             self._format_exception(e)
4041
4042 1     def refresh_sfps_status(self, sfp_list):
4043         """Get the status of the service function path
4044         Params: the list of sfp identifiers
4045         Returns a dictionary with:
4046             vm_id:          #VIM id of this service function path
4047                 status:     #Mandatory. Text with one of:
4048                             #  DELETED (not found at vim)
4049                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4050                             #  OTHER (Vim reported other status not understood)
4051                             #  ERROR (VIM indicates an ERROR status)
4052                             #  ACTIVE,
4053                             #  CREATING (on building process)
4054                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4055                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
4056         """
4057 0         sfp_dict = {}
4058 0         self.logger.debug(
4059             "refresh_sfps status: Getting tenant SFP information from VIM"
4060         )
4061
4062 0         for sfp_id in sfp_list:
4063 0             sfp = {}
4064
4065 0             try:
4066 0                 sfp_vim = self.get_sfp(sfp_id)
4067
4068 0                 if sfp_vim["spi"]:
4069 0                     sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4070                 else:
4071 0                     sfp["status"] = "OTHER"
4072 0                     sfp["error_msg"] = "VIM status reported " + sfp["status"]
4073
4074 0                 sfp["vim_info"] = self.serialize(sfp_vim)
4075
4076 0                 if sfp_vim.get("fault"):
4077 0                     sfp["error_msg"] = str(sfp_vim["fault"])
4078 0             except vimconn.VimConnNotFoundException as e:
4079 0                 self.logger.error("Exception getting sfp status: %s", str(e))
4080 0                 sfp["status"] = "DELETED"
4081 0                 sfp["error_msg"] = str(e)
4082 0             except vimconn.VimConnException as e:
4083 0                 self.logger.error("Exception getting sfp status: %s", str(e))
4084 0                 sfp["status"] = "VIM_ERROR"
4085 0                 sfp["error_msg"] = str(e)
4086
4087 0             sfp_dict[sfp_id] = sfp
4088
4089 0         return sfp_dict
4090
4091 1     def refresh_sfis_status(self, sfi_list):
4092         """Get the status of the service function instances
4093         Params: the list of sfi identifiers
4094         Returns a dictionary with:
4095             vm_id:          #VIM id of this service function instance
4096                 status:     #Mandatory. Text with one of:
4097                             #  DELETED (not found at vim)
4098                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4099                             #  OTHER (Vim reported other status not understood)
4100                             #  ERROR (VIM indicates an ERROR status)
4101                             #  ACTIVE,
4102                             #  CREATING (on building process)
4103                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4104                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4105         """
4106 0         sfi_dict = {}
4107 0         self.logger.debug(
4108             "refresh_sfis status: Getting tenant sfi information from VIM"
4109         )
4110
4111 0         for sfi_id in sfi_list:
4112 0             sfi = {}
4113
4114 0             try:
4115 0                 sfi_vim = self.get_sfi(sfi_id)
4116
4117 0                 if sfi_vim:
4118 0                     sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4119                 else:
4120 0                     sfi["status"] = "OTHER"
4121 0                     sfi["error_msg"] = "VIM status reported " + sfi["status"]
4122
4123 0                 sfi["vim_info"] = self.serialize(sfi_vim)
4124
4125 0                 if sfi_vim.get("fault"):
4126 0                     sfi["error_msg"] = str(sfi_vim["fault"])
4127 0             except vimconn.VimConnNotFoundException as e:
4128 0                 self.logger.error("Exception getting sfi status: %s", str(e))
4129 0                 sfi["status"] = "DELETED"
4130 0                 sfi["error_msg"] = str(e)
4131 0             except vimconn.VimConnException as e:
4132 0                 self.logger.error("Exception getting sfi status: %s", str(e))
4133 0                 sfi["status"] = "VIM_ERROR"
4134 0                 sfi["error_msg"] = str(e)
4135
4136 0             sfi_dict[sfi_id] = sfi
4137
4138 0         return sfi_dict
4139
4140 1     def refresh_sfs_status(self, sf_list):
4141         """Get the status of the service functions
4142         Params: the list of sf identifiers
4143         Returns a dictionary with:
4144             vm_id:          #VIM id of this service function
4145                 status:     #Mandatory. Text with one of:
4146                             #  DELETED (not found at vim)
4147                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4148                             #  OTHER (Vim reported other status not understood)
4149                             #  ERROR (VIM indicates an ERROR status)
4150                             #  ACTIVE,
4151                             #  CREATING (on building process)
4152                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4153                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4154         """
4155 0         sf_dict = {}
4156 0         self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4157
4158 0         for sf_id in sf_list:
4159 0             sf = {}
4160
4161 0             try:
4162 0                 sf_vim = self.get_sf(sf_id)
4163
4164 0                 if sf_vim:
4165 0                     sf["status"] = vmStatus2manoFormat["ACTIVE"]
4166                 else:
4167 0                     sf["status"] = "OTHER"
4168 0                     sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4169
4170 0                 sf["vim_info"] = self.serialize(sf_vim)
4171
4172 0                 if sf_vim.get("fault"):
4173 0                     sf["error_msg"] = str(sf_vim["fault"])
4174 0             except vimconn.VimConnNotFoundException as e:
4175 0                 self.logger.error("Exception getting sf status: %s", str(e))
4176 0                 sf["status"] = "DELETED"
4177 0                 sf["error_msg"] = str(e)
4178 0             except vimconn.VimConnException as e:
4179 0                 self.logger.error("Exception getting sf status: %s", str(e))
4180 0                 sf["status"] = "VIM_ERROR"
4181 0                 sf["error_msg"] = str(e)
4182
4183 0             sf_dict[sf_id] = sf
4184
4185 0         return sf_dict
4186
4187 1     def refresh_classifications_status(self, classification_list):
4188         """Get the status of the classifications
4189         Params: the list of classification identifiers
4190         Returns a dictionary with:
4191             vm_id:          #VIM id of this classifier
4192                 status:     #Mandatory. Text with one of:
4193                             #  DELETED (not found at vim)
4194                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4195                             #  OTHER (Vim reported other status not understood)
4196                             #  ERROR (VIM indicates an ERROR status)
4197                             #  ACTIVE,
4198                             #  CREATING (on building process)
4199                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4200                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4201         """
4202 0         classification_dict = {}
4203 0         self.logger.debug(
4204             "refresh_classifications status: Getting tenant classification information from VIM"
4205         )
4206
4207 0         for classification_id in classification_list:
4208 0             classification = {}
4209
4210 0             try:
4211 0                 classification_vim = self.get_classification(classification_id)
4212
4213 0                 if classification_vim:
4214 0                     classification["status"] = vmStatus2manoFormat["ACTIVE"]
4215                 else:
4216 0                     classification["status"] = "OTHER"
4217 0                     classification["error_msg"] = (
4218                         "VIM status reported " + classification["status"]
4219                     )
4220
4221 0                 classification["vim_info"] = self.serialize(classification_vim)
4222
4223 0                 if classification_vim.get("fault"):
4224 0                     classification["error_msg"] = str(classification_vim["fault"])
4225 0             except vimconn.VimConnNotFoundException as e:
4226 0                 self.logger.error("Exception getting classification status: %s", str(e))
4227 0                 classification["status"] = "DELETED"
4228 0                 classification["error_msg"] = str(e)
4229 0             except vimconn.VimConnException as e:
4230 0                 self.logger.error("Exception getting classification status: %s", str(e))
4231 0                 classification["status"] = "VIM_ERROR"
4232 0                 classification["error_msg"] = str(e)
4233
4234 0             classification_dict[classification_id] = classification
4235
4236 0         return classification_dict
4237
4238 1     def new_affinity_group(self, affinity_group_data):
4239         """Adds a server group to VIM
4240             affinity_group_data contains a dictionary with information, keys:
4241                 name: name in VIM for the server group
4242                 type: affinity or anti-affinity
4243                 scope: Only nfvi-node allowed
4244         Returns the server group identifier"""
4245 0         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4246
4247 0         try:
4248 0             name = affinity_group_data["name"]
4249 0             policy = affinity_group_data["type"]
4250
4251 0             self._reload_connection()
4252 0             new_server_group = self.nova.server_groups.create(name, policy)
4253
4254 0             return new_server_group.id
4255 0         except (
4256             ksExceptions.ClientException,
4257             nvExceptions.ClientException,
4258             ConnectionError,
4259             KeyError,
4260         ) as e:
4261 0             self._format_exception(e)
4262
4263 1     def get_affinity_group(self, affinity_group_id):
4264         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4265 0         self.logger.debug("Getting flavor '%s'", affinity_group_id)
4266 0         try:
4267 0             self._reload_connection()
4268 0             server_group = self.nova.server_groups.find(id=affinity_group_id)
4269
4270 0             return server_group.to_dict()
4271 0         except (
4272             nvExceptions.NotFound,
4273             nvExceptions.ClientException,
4274             ksExceptions.ClientException,
4275             ConnectionError,
4276         ) as e:
4277 0             self._format_exception(e)
4278
4279 1     def delete_affinity_group(self, affinity_group_id):
4280         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4281 0         self.logger.debug("Getting server group '%s'", affinity_group_id)
4282 0         try:
4283 0             self._reload_connection()
4284 0             self.nova.server_groups.delete(affinity_group_id)
4285
4286 0             return affinity_group_id
4287 0         except (
4288             nvExceptions.NotFound,
4289             ksExceptions.ClientException,
4290             nvExceptions.ClientException,
4291             ConnectionError,
4292         ) as e:
4293 0             self._format_exception(e)
4294
4295 1     def get_vdu_state(self, vm_id):
4296         """
4297         Getting the state of a vdu
4298         param:
4299             vm_id: ID of an instance
4300         """
4301 0         self.logger.debug("Getting the status of VM")
4302 0         self.logger.debug("VIM VM ID %s", vm_id)
4303 0         self._reload_connection()
4304 0         server = self.nova.servers.find(id=vm_id)
4305 0         server_dict = server.to_dict()
4306 0         vdu_data = [
4307             server_dict["status"],
4308             server_dict["flavor"]["id"],
4309             server_dict["OS-EXT-SRV-ATTR:host"],
4310             server_dict["OS-EXT-AZ:availability_zone"],
4311         ]
4312 0         self.logger.debug("vdu_data %s", vdu_data)
4313 0         return vdu_data
4314
4315 1     def check_compute_availability(self, host, server_flavor_details):
4316 0         self._reload_connection()
4317 0         hypervisor_search = self.nova.hypervisors.search(
4318             hypervisor_match=host, servers=True
4319         )
4320 0         for hypervisor in hypervisor_search:
4321 0             hypervisor_id = hypervisor.to_dict()["id"]
4322 0             hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4323 0             hypervisor_dict = hypervisor_details.to_dict()
4324 0             hypervisor_temp = json.dumps(hypervisor_dict)
4325 0             hypervisor_json = json.loads(hypervisor_temp)
4326 0             resources_available = [
4327                 hypervisor_json["free_ram_mb"],
4328                 hypervisor_json["disk_available_least"],
4329                 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4330             ]
4331 0             compute_available = all(
4332                 x > y for x, y in zip(resources_available, server_flavor_details)
4333             )
4334 0             if compute_available:
4335 0                 return host
4336
4337 1     def check_availability_zone(
4338         self, old_az, server_flavor_details, old_host, host=None
4339     ):
4340 0         self._reload_connection()
4341 0         az_check = {"zone_check": False, "compute_availability": None}
4342 0         aggregates_list = self.nova.aggregates.list()
4343 0         for aggregate in aggregates_list:
4344 0             aggregate_details = aggregate.to_dict()
4345 0             aggregate_temp = json.dumps(aggregate_details)
4346 0             aggregate_json = json.loads(aggregate_temp)
4347 0             if aggregate_json["availability_zone"] == old_az:
4348 0                 hosts_list = aggregate_json["hosts"]
4349 0                 if host is not None:
4350 0                     if host in hosts_list:
4351 0                         az_check["zone_check"] = True
4352 0                         available_compute_id = self.check_compute_availability(
4353                             host, server_flavor_details
4354                         )
4355 0                         if available_compute_id is not None:
4356 0                             az_check["compute_availability"] = available_compute_id
4357                 else:
4358 0                     for check_host in hosts_list:
4359 0                         if check_host != old_host:
4360 0                             available_compute_id = self.check_compute_availability(
4361                                 check_host, server_flavor_details
4362                             )
4363 0                             if available_compute_id is not None:
4364 0                                 az_check["zone_check"] = True
4365 0                                 az_check["compute_availability"] = available_compute_id
4366 0                                 break
4367                     else:
4368 0                         az_check["zone_check"] = True
4369 0         return az_check
4370
4371 1     def migrate_instance(self, vm_id, compute_host=None):
4372         """
4373         Migrate a vdu
4374         param:
4375             vm_id: ID of an instance
4376             compute_host: Host to migrate the vdu to
4377         """
4378 0         self._reload_connection()
4379 0         vm_state = False
4380 0         instance_state = self.get_vdu_state(vm_id)
4381 0         server_flavor_id = instance_state[1]
4382 0         server_hypervisor_name = instance_state[2]
4383 0         server_availability_zone = instance_state[3]
4384 0         try:
4385 0             server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4386 0             server_flavor_details = [
4387                 server_flavor["ram"],
4388                 server_flavor["disk"],
4389                 server_flavor["vcpus"],
4390             ]
4391 0             if compute_host == server_hypervisor_name:
4392 0                 raise vimconn.VimConnException(
4393                     "Unable to migrate instance '{}' to the same host '{}'".format(
4394                         vm_id, compute_host
4395                     ),
4396                     http_code=vimconn.HTTP_Bad_Request,
4397                 )
4398 0             az_status = self.check_availability_zone(
4399                 server_availability_zone,
4400                 server_flavor_details,
4401                 server_hypervisor_name,
4402                 compute_host,
4403             )
4404 0             availability_zone_check = az_status["zone_check"]
4405 0             available_compute_id = az_status.get("compute_availability")
4406
4407 0             if availability_zone_check is False:
4408 0                 raise vimconn.VimConnException(
4409                     "Unable to migrate instance '{}' to a different availability zone".format(
4410                         vm_id
4411                     ),
4412                     http_code=vimconn.HTTP_Bad_Request,
4413                 )
4414 0             if available_compute_id is not None:
4415 0                 self.nova.servers.live_migrate(
4416                     server=vm_id,
4417                     host=available_compute_id,
4418                     block_migration=True,
4419                     disk_over_commit=False,
4420                 )
4421 0                 state = "MIGRATING"
4422 0                 changed_compute_host = ""
4423 0                 if state == "MIGRATING":
4424 0                     vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4425 0                     changed_compute_host = self.get_vdu_state(vm_id)[2]
4426 0                 if vm_state and changed_compute_host == available_compute_id:
4427 0                     self.logger.debug(
4428                         "Instance '{}' migrated to the new compute host '{}'".format(
4429                             vm_id, changed_compute_host
4430                         )
4431                     )
4432 0                     return state, available_compute_id
4433                 else:
4434 0                     raise vimconn.VimConnException(
4435                         "Migration Failed. Instance '{}' not moved to the new host {}".format(
4436                             vm_id, available_compute_id
4437                         ),
4438                         http_code=vimconn.HTTP_Bad_Request,
4439                     )
4440             else:
4441 0                 raise vimconn.VimConnException(
4442                     "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4443                         available_compute_id
4444                     ),
4445                     http_code=vimconn.HTTP_Bad_Request,
4446                 )
4447 0         except (
4448             nvExceptions.BadRequest,
4449             nvExceptions.ClientException,
4450             nvExceptions.NotFound,
4451         ) as e:
4452 0             self._format_exception(e)
4453
4454 1     def resize_instance(self, vm_id, new_flavor_id):
4455         """
4456         For resizing the vm based on the given
4457         flavor details
4458         param:
4459             vm_id : ID of an instance
4460             new_flavor_id : Flavor id to be resized
4461         Return the status of a resized instance
4462         """
4463 0         self._reload_connection()
4464 0         self.logger.debug("resize the flavor of an instance")
4465 0         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4466 0         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4467 0         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4468 0         try:
4469 0             if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4470 0                 if old_flavor_disk > new_flavor_disk:
4471 0                     raise nvExceptions.BadRequest(
4472                         400,
4473                         message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4474                     )
4475                 else:
4476 0                     self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4477 0                     vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4478 0                     if vm_state:
4479 0                         instance_resized_status = self.confirm_resize(vm_id)
4480 0                         return instance_resized_status
4481                     else:
4482 0                         raise nvExceptions.BadRequest(
4483                             409,
4484                             message="Cannot 'resize' vm_state is in ERROR",
4485                         )
4486
4487             else:
4488 0                 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4489 0                 raise nvExceptions.BadRequest(
4490                     409,
4491                     message="Cannot 'resize' instance while it is in vm_state resized",
4492                 )
4493 0         except (
4494             nvExceptions.BadRequest,
4495             nvExceptions.ClientException,
4496             nvExceptions.NotFound,
4497         ) as e:
4498 0             self._format_exception(e)
4499
4500 1     def confirm_resize(self, vm_id):
4501         """
4502         Confirm the resize of an instance
4503         param:
4504             vm_id: ID of an instance
4505         """
4506 0         self._reload_connection()
4507 0         self.nova.servers.confirm_resize(server=vm_id)
4508 0         if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4509 0             self.__wait_for_vm(vm_id, "ACTIVE")
4510 0         instance_status = self.get_vdu_state(vm_id)[0]
4511 0         return instance_status
4512
4513 1     def get_monitoring_data(self):
4514 1         try:
4515 1             self.logger.debug("Getting servers and ports data from Openstack VIMs.")
4516 1             self._reload_connection()
4517 1             all_servers = self.nova.servers.list(detailed=True)
4518 1             all_ports = self.neutron.list_ports()
4519 1             return all_servers, all_ports
4520 1         except (
4521             vimconn.VimConnException,
4522             vimconn.VimConnNotFoundException,
4523             vimconn.VimConnConnectionException,
4524         ) as e:
4525 1             raise vimconn.VimConnException(
4526                 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
4527             )