Code Coverage

Cobertura Coverage Report > RO-VIM-openstack.osm_rovim_openstack >

vimconn_openstack.py

Trend

File Coverage summary

NameClassesLinesConditionals
vimconn_openstack.py
100%
1/1
45%
819/1821
100%
0/0

Coverage Breakdown by Class

NameLinesConditionals
vimconn_openstack.py
45%
819/1821
N/A

Source

RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 #         http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 1 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 1 import copy
34 1 from http.client import HTTPException
35 1 import json
36 1 import logging
37 1 from pprint import pformat
38 1 import random
39 1 import re
40 1 import time
41 1 from typing import Dict, List, Optional, Tuple
42
43 1 from cinderclient import client as cClient
44 1 from glanceclient import client as glClient
45 1 import glanceclient.exc as gl1Exceptions
46 1 from keystoneauth1 import session
47 1 from keystoneauth1.identity import v2, v3
48 1 import keystoneclient.exceptions as ksExceptions
49 1 import keystoneclient.v2_0.client as ksClient_v2
50 1 import keystoneclient.v3.client as ksClient_v3
51 1 import netaddr
52 1 from neutronclient.common import exceptions as neExceptions
53 1 from neutronclient.neutron import client as neClient
54 1 from novaclient import client as nClient, exceptions as nvExceptions
55 1 from osm_ro_plugin import vimconn
56 1 from requests.exceptions import ConnectionError
57 1 import yaml
58
59 1 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 1 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 1 vmStatus2manoFormat = {
64     "ACTIVE": "ACTIVE",
65     "PAUSED": "PAUSED",
66     "SUSPENDED": "SUSPENDED",
67     "SHUTOFF": "INACTIVE",
68     "BUILD": "BUILD",
69     "ERROR": "ERROR",
70     "DELETED": "DELETED",
71 }
72 1 netStatus2manoFormat = {
73     "ACTIVE": "ACTIVE",
74     "PAUSED": "PAUSED",
75     "INACTIVE": "INACTIVE",
76     "BUILD": "BUILD",
77     "ERROR": "ERROR",
78     "DELETED": "DELETED",
79 }
80
81 1 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 1 volume_timeout = 1800
85 1 server_timeout = 1800
86
87
88 1 class SafeDumper(yaml.SafeDumper):
89 1     def represent_data(self, data):
90         # Openstack APIs use custom subclasses of dict and YAML safe dumper
91         # is designed to not handle that (reference issue 142 of pyyaml)
92 0         if isinstance(data, dict) and data.__class__ != dict:
93             # A simple solution is to convert those items back to dicts
94 0             data = dict(data.items())
95
96 0         return super(SafeDumper, self).represent_data(data)
97
98
99 1 class vimconnector(vimconn.VimConnector):
100 1     def __init__(
101         self,
102         uuid,
103         name,
104         tenant_id,
105         tenant_name,
106         url,
107         url_admin=None,
108         user=None,
109         passwd=None,
110         log_level=None,
111         config={},
112         persistent_info={},
113     ):
114         """using common constructor parameters. In this case
115         'url' is the keystone authorization url,
116         'url_admin' is not use
117         """
118 1         api_version = config.get("APIversion")
119
120 1         if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 0             raise vimconn.VimConnException(
122                 "Invalid value '{}' for config:APIversion. "
123                 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124             )
125
126 1         vim_type = config.get("vim_type")
127
128 1         if vim_type and vim_type not in ("vio", "VIO"):
129 0             raise vimconn.VimConnException(
130                 "Invalid value '{}' for config:vim_type."
131                 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132             )
133
134 1         if config.get("dataplane_net_vlan_range") is not None:
135             # validate vlan ranges provided by user
136 0             self._validate_vlan_ranges(
137                 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138             )
139
140 1         if config.get("multisegment_vlan_range") is not None:
141             # validate vlan ranges provided by user
142 0             self._validate_vlan_ranges(
143                 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144             )
145
146 1         vimconn.VimConnector.__init__(
147             self,
148             uuid,
149             name,
150             tenant_id,
151             tenant_name,
152             url,
153             url_admin,
154             user,
155             passwd,
156             log_level,
157             config,
158         )
159
160 1         if self.config.get("insecure") and self.config.get("ca_cert"):
161 0             raise vimconn.VimConnException(
162                 "options insecure and ca_cert are mutually exclusive"
163             )
164
165 1         self.verify = True
166
167 1         if self.config.get("insecure"):
168 0             self.verify = False
169
170 1         if self.config.get("ca_cert"):
171 0             self.verify = self.config.get("ca_cert")
172
173 1         if not url:
174 0             raise TypeError("url param can not be NoneType")
175
176 1         self.persistent_info = persistent_info
177 1         self.availability_zone = persistent_info.get("availability_zone", None)
178 1         self.session = persistent_info.get("session", {"reload_client": True})
179 1         self.my_tenant_id = self.session.get("my_tenant_id")
180 1         self.nova = self.session.get("nova")
181 1         self.neutron = self.session.get("neutron")
182 1         self.cinder = self.session.get("cinder")
183 1         self.glance = self.session.get("glance")
184         # self.glancev1 = self.session.get("glancev1")
185 1         self.keystone = self.session.get("keystone")
186 1         self.api_version3 = self.session.get("api_version3")
187 1         self.vim_type = self.config.get("vim_type")
188
189 1         if self.vim_type:
190 0             self.vim_type = self.vim_type.upper()
191
192 1         if self.config.get("use_internal_endpoint"):
193 0             self.endpoint_type = "internalURL"
194         else:
195 1             self.endpoint_type = None
196
197 1         logging.getLogger("urllib3").setLevel(logging.WARNING)
198 1         logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 1         logging.getLogger("novaclient").setLevel(logging.WARNING)
200 1         self.logger = logging.getLogger("ro.vim.openstack")
201
202         # allow security_groups to be a list or a single string
203 1         if isinstance(self.config.get("security_groups"), str):
204 0             self.config["security_groups"] = [self.config["security_groups"]]
205
206 1         self.security_groups_id = None
207
208         # ###### VIO Specific Changes #########
209 1         if self.vim_type == "VIO":
210 0             self.logger = logging.getLogger("ro.vim.vio")
211
212 1         if log_level:
213 0             self.logger.setLevel(getattr(logging, log_level))
214
215 1     def __getitem__(self, index):
216         """Get individuals parameters.
217         Throw KeyError"""
218 0         if index == "project_domain_id":
219 0             return self.config.get("project_domain_id")
220 0         elif index == "user_domain_id":
221 0             return self.config.get("user_domain_id")
222         else:
223 0             return vimconn.VimConnector.__getitem__(self, index)
224
225 1     def __setitem__(self, index, value):
226         """Set individuals parameters and it is marked as dirty so to force connection reload.
227         Throw KeyError"""
228 0         if index == "project_domain_id":
229 0             self.config["project_domain_id"] = value
230 0         elif index == "user_domain_id":
231 0             self.config["user_domain_id"] = value
232         else:
233 0             vimconn.VimConnector.__setitem__(self, index, value)
234
235 0         self.session["reload_client"] = True
236
237 1     def serialize(self, value):
238         """Serialization of python basic types.
239
240         In the case value is not serializable a message will be logged and a
241         simple representation of the data that cannot be converted back to
242         python is returned.
243         """
244 0         if isinstance(value, str):
245 0             return value
246
247 0         try:
248 0             return yaml.dump(
249                 value, Dumper=SafeDumper, default_flow_style=True, width=256
250             )
251 0         except yaml.representer.RepresenterError:
252 0             self.logger.debug(
253                 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254                 pformat(value),
255                 exc_info=True,
256             )
257
258 0             return str(value)
259
260 1     def _reload_connection(self):
261         """Called before any operation, it check if credentials has changed
262         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263         """
264         # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 1         if self.session["reload_client"]:
266 1             if self.config.get("APIversion"):
267 0                 self.api_version3 = (
268                     self.config["APIversion"] == "v3.3"
269                     or self.config["APIversion"] == "3"
270                 )
271             else:  # get from ending auth_url that end with v3 or with v2.0
272 1                 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273                     "/v3/"
274                 )
275
276 1             self.session["api_version3"] = self.api_version3
277
278 1             if self.api_version3:
279 0                 if self.config.get("project_domain_id") or self.config.get(
280                     "project_domain_name"
281                 ):
282 0                     project_domain_id_default = None
283                 else:
284 0                     project_domain_id_default = "default"
285
286 0                 if self.config.get("user_domain_id") or self.config.get(
287                     "user_domain_name"
288                 ):
289 0                     user_domain_id_default = None
290                 else:
291 0                     user_domain_id_default = "default"
292 0                 auth = v3.Password(
293                     auth_url=self.url,
294                     username=self.user,
295                     password=self.passwd,
296                     project_name=self.tenant_name,
297                     project_id=self.tenant_id,
298                     project_domain_id=self.config.get(
299                         "project_domain_id", project_domain_id_default
300                     ),
301                     user_domain_id=self.config.get(
302                         "user_domain_id", user_domain_id_default
303                     ),
304                     project_domain_name=self.config.get("project_domain_name"),
305                     user_domain_name=self.config.get("user_domain_name"),
306                 )
307             else:
308 1                 auth = v2.Password(
309                     auth_url=self.url,
310                     username=self.user,
311                     password=self.passwd,
312                     tenant_name=self.tenant_name,
313                     tenant_id=self.tenant_id,
314                 )
315
316 1             sess = session.Session(auth=auth, verify=self.verify)
317             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318             # Titanium cloud and StarlingX
319 1             region_name = self.config.get("region_name")
320
321 1             if self.api_version3:
322 0                 self.keystone = ksClient_v3.Client(
323                     session=sess,
324                     endpoint_type=self.endpoint_type,
325                     region_name=region_name,
326                 )
327             else:
328 1                 self.keystone = ksClient_v2.Client(
329                     session=sess, endpoint_type=self.endpoint_type
330                 )
331
332 1             self.session["keystone"] = self.keystone
333             # In order to enable microversion functionality an explicit microversion must be specified in "config".
334             # This implementation approach is due to the warning message in
335             # https://developer.openstack.org/api-guide/compute/microversions.html
336             # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337             # always require an specific microversion.
338             # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 1             version = self.config.get("microversion")
340
341 1             if not version:
342 1                 version = "2.1"
343
344             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345             # Titanium cloud and StarlingX
346 1             self.nova = self.session["nova"] = nClient.Client(
347                 str(version),
348                 session=sess,
349                 endpoint_type=self.endpoint_type,
350                 region_name=region_name,
351             )
352 1             self.neutron = self.session["neutron"] = neClient.Client(
353                 "2.0",
354                 session=sess,
355                 endpoint_type=self.endpoint_type,
356                 region_name=region_name,
357             )
358 1             self.cinder = self.session["cinder"] = cClient.Client(
359                 2,
360                 session=sess,
361                 endpoint_type=self.endpoint_type,
362                 region_name=region_name,
363             )
364
365 1             try:
366 1                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 1             except Exception:
368 1                 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 1             if self.endpoint_type == "internalURL":
371 0                 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 0                 glance_endpoint = self.keystone.endpoints.list(
373                     glance_service_id, interface="internal"
374                 )[0].url
375             else:
376 1                 glance_endpoint = None
377
378 1             self.glance = self.session["glance"] = glClient.Client(
379                 2, session=sess, endpoint=glance_endpoint
380             )
381             # using version 1 of glance client in new_image()
382             # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383             #                                                            endpoint=glance_endpoint)
384 1             self.session["reload_client"] = False
385 1             self.persistent_info["session"] = self.session
386             # add availablity zone info inside  self.persistent_info
387 1             self._set_availablity_zones()
388 1             self.persistent_info["availability_zone"] = self.availability_zone
389             # force to get again security_groups_ids next time they are needed
390 1             self.security_groups_id = None
391
392 1     def __net_os2mano(self, net_list_dict):
393         """Transform the net openstack format to mano format
394         net_list_dict can be a list of dict or a single dict"""
395 0         if type(net_list_dict) is dict:
396 0             net_list_ = (net_list_dict,)
397 0         elif type(net_list_dict) is list:
398 0             net_list_ = net_list_dict
399         else:
400 0             raise TypeError("param net_list_dict must be a list or a dictionary")
401 0         for net in net_list_:
402 0             if net.get("provider:network_type") == "vlan":
403 0                 net["type"] = "data"
404             else:
405 0                 net["type"] = "bridge"
406
407 1     def __classification_os2mano(self, class_list_dict):
408         """Transform the openstack format (Flow Classifier) to mano format
409         (Classification) class_list_dict can be a list of dict or a single dict
410         """
411 1         if isinstance(class_list_dict, dict):
412 0             class_list_ = [class_list_dict]
413 1         elif isinstance(class_list_dict, list):
414 1             class_list_ = class_list_dict
415         else:
416 0             raise TypeError("param class_list_dict must be a list or a dictionary")
417 1         for classification in class_list_:
418 1             id = classification.pop("id")
419 1             name = classification.pop("name")
420 1             description = classification.pop("description")
421 1             project_id = classification.pop("project_id")
422 1             tenant_id = classification.pop("tenant_id")
423 1             original_classification = copy.deepcopy(classification)
424 1             classification.clear()
425 1             classification["ctype"] = "legacy_flow_classifier"
426 1             classification["definition"] = original_classification
427 1             classification["id"] = id
428 1             classification["name"] = name
429 1             classification["description"] = description
430 1             classification["project_id"] = project_id
431 1             classification["tenant_id"] = tenant_id
432
433 1     def __sfi_os2mano(self, sfi_list_dict):
434         """Transform the openstack format (Port Pair) to mano format (SFI)
435         sfi_list_dict can be a list of dict or a single dict
436         """
437 1         if isinstance(sfi_list_dict, dict):
438 0             sfi_list_ = [sfi_list_dict]
439 1         elif isinstance(sfi_list_dict, list):
440 1             sfi_list_ = sfi_list_dict
441         else:
442 0             raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 1         for sfi in sfi_list_:
445 1             sfi["ingress_ports"] = []
446 1             sfi["egress_ports"] = []
447
448 1             if sfi.get("ingress"):
449 1                 sfi["ingress_ports"].append(sfi["ingress"])
450
451 1             if sfi.get("egress"):
452 1                 sfi["egress_ports"].append(sfi["egress"])
453
454 1             del sfi["ingress"]
455 1             del sfi["egress"]
456 1             params = sfi.get("service_function_parameters")
457 1             sfc_encap = False
458
459 1             if params:
460 1                 correlation = params.get("correlation")
461
462 1                 if correlation:
463 1                     sfc_encap = True
464
465 1             sfi["sfc_encap"] = sfc_encap
466 1             del sfi["service_function_parameters"]
467
468 1     def __sf_os2mano(self, sf_list_dict):
469         """Transform the openstack format (Port Pair Group) to mano format (SF)
470         sf_list_dict can be a list of dict or a single dict
471         """
472 1         if isinstance(sf_list_dict, dict):
473 0             sf_list_ = [sf_list_dict]
474 1         elif isinstance(sf_list_dict, list):
475 1             sf_list_ = sf_list_dict
476         else:
477 0             raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 1         for sf in sf_list_:
480 1             del sf["port_pair_group_parameters"]
481 1             sf["sfis"] = sf["port_pairs"]
482 1             del sf["port_pairs"]
483
484 1     def __sfp_os2mano(self, sfp_list_dict):
485         """Transform the openstack format (Port Chain) to mano format (SFP)
486         sfp_list_dict can be a list of dict or a single dict
487         """
488 1         if isinstance(sfp_list_dict, dict):
489 0             sfp_list_ = [sfp_list_dict]
490 1         elif isinstance(sfp_list_dict, list):
491 1             sfp_list_ = sfp_list_dict
492         else:
493 0             raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 1         for sfp in sfp_list_:
496 1             params = sfp.pop("chain_parameters")
497 1             sfc_encap = False
498
499 1             if params:
500 1                 correlation = params.get("correlation")
501
502 1                 if correlation:
503 1                     sfc_encap = True
504
505 1             sfp["sfc_encap"] = sfc_encap
506 1             sfp["spi"] = sfp.pop("chain_id")
507 1             sfp["classifications"] = sfp.pop("flow_classifiers")
508 1             sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510     # placeholder for now; read TODO note below
511 1     def _validate_classification(self, type, definition):
512         # only legacy_flow_classifier Type is supported at this point
513 1         return True
514         # TODO(igordcard): this method should be an abstract method of an
515         # abstract Classification class to be implemented by the specific
516         # Types. Also, abstract vimconnector should call the validation
517         # method before the implemented VIM connectors are called.
518
519 1     def _format_exception(self, exception):
520         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
521 0         message_error = str(exception)
522 0         tip = ""
523
524 0         if isinstance(
525             exception,
526             (
527                 neExceptions.NetworkNotFoundClient,
528                 nvExceptions.NotFound,
529                 ksExceptions.NotFound,
530                 gl1Exceptions.HTTPNotFound,
531             ),
532         ):
533 0             raise vimconn.VimConnNotFoundException(
534                 type(exception).__name__ + ": " + message_error
535             )
536 0         elif isinstance(
537             exception,
538             (
539                 HTTPException,
540                 gl1Exceptions.HTTPException,
541                 gl1Exceptions.CommunicationError,
542                 ConnectionError,
543                 ksExceptions.ConnectionError,
544                 neExceptions.ConnectionFailed,
545             ),
546         ):
547 0             if type(exception).__name__ == "SSLError":
548 0                 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 0             raise vimconn.VimConnConnectionException(
551                 "Invalid URL or credentials{}: {}".format(tip, message_error)
552             )
553 0         elif isinstance(
554             exception,
555             (
556                 KeyError,
557                 nvExceptions.BadRequest,
558                 ksExceptions.BadRequest,
559             ),
560         ):
561 0             raise vimconn.VimConnException(
562                 type(exception).__name__ + ": " + message_error
563             )
564 0         elif isinstance(
565             exception,
566             (
567                 nvExceptions.ClientException,
568                 ksExceptions.ClientException,
569                 neExceptions.NeutronException,
570             ),
571         ):
572 0             raise vimconn.VimConnUnexpectedResponse(
573                 type(exception).__name__ + ": " + message_error
574             )
575 0         elif isinstance(exception, nvExceptions.Conflict):
576 0             raise vimconn.VimConnConflictException(
577                 type(exception).__name__ + ": " + message_error
578             )
579 0         elif isinstance(exception, vimconn.VimConnException):
580 0             raise exception
581         else:  # ()
582 0             self.logger.error("General Exception " + message_error, exc_info=True)
583
584 0             raise vimconn.VimConnConnectionException(
585                 type(exception).__name__ + ": " + message_error
586             )
587
588 1     def _get_ids_from_name(self):
589         """
590          Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591         :return: None
592         """
593         # get tenant_id if only tenant_name is supplied
594 0         self._reload_connection()
595
596 0         if not self.my_tenant_id:
597 0             raise vimconn.VimConnConnectionException(
598                 "Error getting tenant information from name={} id={}".format(
599                     self.tenant_name, self.tenant_id
600                 )
601             )
602
603 0         if self.config.get("security_groups") and not self.security_groups_id:
604             # convert from name to id
605 0             neutron_sg_list = self.neutron.list_security_groups(
606                 tenant_id=self.my_tenant_id
607             )["security_groups"]
608
609 0             self.security_groups_id = []
610 0             for sg in self.config.get("security_groups"):
611 0                 for neutron_sg in neutron_sg_list:
612 0                     if sg in (neutron_sg["id"], neutron_sg["name"]):
613 0                         self.security_groups_id.append(neutron_sg["id"])
614 0                         break
615                 else:
616 0                     self.security_groups_id = None
617
618 0                     raise vimconn.VimConnConnectionException(
619                         "Not found security group {} for this tenant".format(sg)
620                     )
621
622 1     def check_vim_connectivity(self):
623         # just get network list to check connectivity and credentials
624 0         self.get_network_list(filter_dict={})
625
626 1     def get_tenant_list(self, filter_dict={}):
627         """Obtain tenants of VIM
628         filter_dict can contain the following keys:
629             name: filter by tenant name
630             id: filter by tenant uuid/id
631             <other VIM specific>
632         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633         """
634 0         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 0         try:
637 0             self._reload_connection()
638
639 0             if self.api_version3:
640 0                 project_class_list = self.keystone.projects.list(
641                     name=filter_dict.get("name")
642                 )
643             else:
644 0                 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 0             project_list = []
647
648 0             for project in project_class_list:
649 0                 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 0                     continue
651
652 0                 project_list.append(project.to_dict())
653
654 0             return project_list
655 0         except (
656             ksExceptions.ConnectionError,
657             ksExceptions.ClientException,
658             ConnectionError,
659         ) as e:
660 0             self._format_exception(e)
661
662 1     def new_tenant(self, tenant_name, tenant_description):
663         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 0         self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 0         try:
667 0             self._reload_connection()
668
669 0             if self.api_version3:
670 0                 project = self.keystone.projects.create(
671                     tenant_name,
672                     self.config.get("project_domain_id", "default"),
673                     description=tenant_description,
674                     is_domain=False,
675                 )
676             else:
677 0                 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 0             return project.id
680 0         except (
681             ksExceptions.ConnectionError,
682             ksExceptions.ClientException,
683             ksExceptions.BadRequest,
684             ConnectionError,
685         ) as e:
686 0             self._format_exception(e)
687
688 1     def delete_tenant(self, tenant_id):
689         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 0         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 0         try:
693 0             self._reload_connection()
694
695 0             if self.api_version3:
696 0                 self.keystone.projects.delete(tenant_id)
697             else:
698 0                 self.keystone.tenants.delete(tenant_id)
699
700 0             return tenant_id
701 0         except (
702             ksExceptions.ConnectionError,
703             ksExceptions.ClientException,
704             ksExceptions.NotFound,
705             ConnectionError,
706         ) as e:
707 0             self._format_exception(e)
708
709 1     def new_network(
710         self,
711         net_name,
712         net_type,
713         ip_profile=None,
714         shared=False,
715         provider_network_profile=None,
716     ):
717         """Adds a tenant network to VIM
718         Params:
719             'net_name': name of the network
720             'net_type': one of:
721                 'bridge': overlay isolated network
722                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
723                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
724             'ip_profile': is a dict containing the IP parameters of the network
725                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729                 'dhcp_enabled': True or False
730                 'dhcp_start_address': ip_schema, first IP to grant
731                 'dhcp_count': number of IPs to grant.
732             'shared': if this network can be seen/use by other tenants/organization
733             'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734                                                              physical-network: physnet-label}
735         Returns a tuple with the network identifier and created_items, or raises an exception on error
736             created_items can be None or a dictionary where this method can include key-values that will be passed to
737             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739             as not present.
740         """
741 0         self.logger.debug(
742             "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743         )
744         # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 0         try:
747 0             vlan = None
748
749 0             if provider_network_profile:
750 0                 vlan = provider_network_profile.get("segmentation-id")
751
752 0             new_net = None
753 0             created_items = {}
754 0             self._reload_connection()
755 0             network_dict = {"name": net_name, "admin_state_up": True}
756
757 0             if net_type in ("data", "ptp") or provider_network_profile:
758 0                 provider_physical_network = None
759
760 0                 if provider_network_profile and provider_network_profile.get(
761                     "physical-network"
762                 ):
763 0                     provider_physical_network = provider_network_profile.get(
764                         "physical-network"
765                     )
766
767                     # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768                     # or not declared, just ignore the checking
769 0                     if (
770                         isinstance(
771                             self.config.get("dataplane_physical_net"), (tuple, list)
772                         )
773                         and provider_physical_network
774                         not in self.config["dataplane_physical_net"]
775                     ):
776 0                         raise vimconn.VimConnConflictException(
777                             "Invalid parameter 'provider-network:physical-network' "
778                             "for network creation. '{}' is not one of the declared "
779                             "list at VIM_config:dataplane_physical_net".format(
780                                 provider_physical_network
781                             )
782                         )
783
784                 # use the default dataplane_physical_net
785 0                 if not provider_physical_network:
786 0                     provider_physical_network = self.config.get(
787                         "dataplane_physical_net"
788                     )
789
790                     # if it is non empty list, use the first value. If it is a string use the value directly
791 0                     if (
792                         isinstance(provider_physical_network, (tuple, list))
793                         and provider_physical_network
794                     ):
795 0                         provider_physical_network = provider_physical_network[0]
796
797 0                 if not provider_physical_network:
798 0                     raise vimconn.VimConnConflictException(
799                         "missing information needed for underlay networks. Provide "
800                         "'dataplane_physical_net' configuration at VIM or use the NS "
801                         "instantiation parameter 'provider-network.physical-network'"
802                         " for the VLD"
803                     )
804
805 0                 if not self.config.get("multisegment_support"):
806 0                     network_dict[
807                         "provider:physical_network"
808                     ] = provider_physical_network
809
810 0                     if (
811                         provider_network_profile
812                         and "network-type" in provider_network_profile
813                     ):
814 0                         network_dict[
815                             "provider:network_type"
816                         ] = provider_network_profile["network-type"]
817                     else:
818 0                         network_dict["provider:network_type"] = self.config.get(
819                             "dataplane_network_type", "vlan"
820                         )
821
822 0                     if vlan:
823 0                         network_dict["provider:segmentation_id"] = vlan
824                 else:
825                     # Multi-segment case
826 0                     segment_list = []
827 0                     segment1_dict = {
828                         "provider:physical_network": "",
829                         "provider:network_type": "vxlan",
830                     }
831 0                     segment_list.append(segment1_dict)
832 0                     segment2_dict = {
833                         "provider:physical_network": provider_physical_network,
834                         "provider:network_type": "vlan",
835                     }
836
837 0                     if vlan:
838 0                         segment2_dict["provider:segmentation_id"] = vlan
839 0                     elif self.config.get("multisegment_vlan_range"):
840 0                         vlanID = self._generate_multisegment_vlanID()
841 0                         segment2_dict["provider:segmentation_id"] = vlanID
842
843                     # else
844                     #     raise vimconn.VimConnConflictException(
845                     #         "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846                     #         network")
847 0                     segment_list.append(segment2_dict)
848 0                     network_dict["segments"] = segment_list
849
850                 # VIO Specific Changes. It needs a concrete VLAN
851 0                 if self.vim_type == "VIO" and vlan is None:
852 0                     if self.config.get("dataplane_net_vlan_range") is None:
853 0                         raise vimconn.VimConnConflictException(
854                             "You must provide 'dataplane_net_vlan_range' in format "
855                             "[start_ID - end_ID] at VIM_config for creating underlay "
856                             "networks"
857                         )
858
859 0                     network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 0             network_dict["shared"] = shared
862
863 0             if self.config.get("disable_network_port_security"):
864 0                 network_dict["port_security_enabled"] = False
865
866 0             if self.config.get("neutron_availability_zone_hints"):
867 0                 hints = self.config.get("neutron_availability_zone_hints")
868
869 0                 if isinstance(hints, str):
870 0                     hints = [hints]
871
872 0                 network_dict["availability_zone_hints"] = hints
873
874 0             new_net = self.neutron.create_network({"network": network_dict})
875             # print new_net
876             # create subnetwork, even if there is no profile
877
878 0             if not ip_profile:
879 0                 ip_profile = {}
880
881 0             if not ip_profile.get("subnet_address"):
882                 # Fake subnet is required
883 0                 subnet_rand = random.randint(0, 255)
884 0                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 0             if "ip_version" not in ip_profile:
887 0                 ip_profile["ip_version"] = "IPv4"
888
889 0             subnet = {
890                 "name": net_name + "-subnet",
891                 "network_id": new_net["network"]["id"],
892                 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893                 "cidr": ip_profile["subnet_address"],
894             }
895
896             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 0             if ip_profile.get("gateway_address"):
898 0                 subnet["gateway_ip"] = ip_profile["gateway_address"]
899             else:
900 0                 subnet["gateway_ip"] = None
901
902 0             if ip_profile.get("dns_address"):
903 0                 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 0             if "dhcp_enabled" in ip_profile:
906 0                 subnet["enable_dhcp"] = (
907                     False
908                     if ip_profile["dhcp_enabled"] == "false"
909                     or ip_profile["dhcp_enabled"] is False
910                     else True
911                 )
912
913 0             if ip_profile.get("dhcp_start_address"):
914 0                 subnet["allocation_pools"] = []
915 0                 subnet["allocation_pools"].append(dict())
916 0                 subnet["allocation_pools"][0]["start"] = ip_profile[
917                     "dhcp_start_address"
918                 ]
919
920 0             if ip_profile.get("dhcp_count"):
921                 # parts = ip_profile["dhcp_start_address"].split(".")
922                 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 0                 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 0                 ip_int += ip_profile["dhcp_count"] - 1
925 0                 ip_str = str(netaddr.IPAddress(ip_int))
926 0                 subnet["allocation_pools"][0]["end"] = ip_str
927
928             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 0             self.neutron.create_subnet({"subnet": subnet})
930
931 0             if net_type == "data" and self.config.get("multisegment_support"):
932 0                 if self.config.get("l2gw_support"):
933 0                     l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 0                     for l2gw in l2gw_list:
935 0                         l2gw_conn = {
936                             "l2_gateway_id": l2gw["id"],
937                             "network_id": new_net["network"]["id"],
938                             "segmentation_id": str(vlanID),
939                         }
940 0                         new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941                             {"l2_gateway_connection": l2gw_conn}
942                         )
943 0                         created_items[
944                             "l2gwconn:"
945                             + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946                         ] = True
947
948 0             return new_net["network"]["id"], created_items
949 0         except Exception as e:
950             # delete l2gw connections (if any) before deleting the network
951 0             for k, v in created_items.items():
952 0                 if not v:  # skip already deleted
953 0                     continue
954
955 0                 try:
956 0                     k_item, _, k_id = k.partition(":")
957
958 0                     if k_item == "l2gwconn":
959 0                         self.neutron.delete_l2_gateway_connection(k_id)
960 0                 except Exception as e2:
961 0                     self.logger.error(
962                         "Error deleting l2 gateway connection: {}: {}".format(
963                             type(e2).__name__, e2
964                         )
965                     )
966
967 0             if new_net:
968 0                 self.neutron.delete_network(new_net["network"]["id"])
969
970 0             self._format_exception(e)
971
972 1     def get_network_list(self, filter_dict={}):
973         """Obtain tenant networks of VIM
974         Filter_dict can be:
975             name: network name
976             id: network uuid
977             shared: boolean
978             tenant_id: tenant
979             admin_state_up: boolean
980             status: 'ACTIVE'
981         Returns the network list of dictionaries
982         """
983 0         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 0         try:
986 0             self._reload_connection()
987 0             filter_dict_os = filter_dict.copy()
988
989 0             if self.api_version3 and "tenant_id" in filter_dict_os:
990                 # TODO check
991 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 0             net_dict = self.neutron.list_networks(**filter_dict_os)
994 0             net_list = net_dict["networks"]
995 0             self.__net_os2mano(net_list)
996
997 0             return net_list
998 0         except (
999             neExceptions.ConnectionFailed,
1000             ksExceptions.ClientException,
1001             neExceptions.NeutronException,
1002             ConnectionError,
1003         ) as e:
1004 0             self._format_exception(e)
1005
1006 1     def get_network(self, net_id):
1007         """Obtain details of network from VIM
1008         Returns the network information from a network id"""
1009 0         self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 0         filter_dict = {"id": net_id}
1011 0         net_list = self.get_network_list(filter_dict)
1012
1013 0         if len(net_list) == 0:
1014 0             raise vimconn.VimConnNotFoundException(
1015                 "Network '{}' not found".format(net_id)
1016             )
1017 0         elif len(net_list) > 1:
1018 0             raise vimconn.VimConnConflictException(
1019                 "Found more than one network with this criteria"
1020             )
1021
1022 0         net = net_list[0]
1023 0         subnets = []
1024 0         for subnet_id in net.get("subnets", ()):
1025 0             try:
1026 0                 subnet = self.neutron.show_subnet(subnet_id)
1027 0             except Exception as e:
1028 0                 self.logger.error(
1029                     "osconnector.get_network(): Error getting subnet %s %s"
1030                     % (net_id, str(e))
1031                 )
1032 0                 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 0             subnets.append(subnet)
1035
1036 0         net["subnets"] = subnets
1037 0         net["encapsulation"] = net.get("provider:network_type")
1038 0         net["encapsulation_type"] = net.get("provider:network_type")
1039 0         net["segmentation_id"] = net.get("provider:segmentation_id")
1040 0         net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 0         return net
1043
1044 1     def delete_network(self, net_id, created_items=None):
1045         """
1046         Removes a tenant network from VIM and its associated elements
1047         :param net_id: VIM identifier of the network, provided by method new_network
1048         :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049         Returns the network identifier or raises an exception upon error or when network is not found
1050         """
1051 0         self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 0         if created_items is None:
1054 0             created_items = {}
1055
1056 0         try:
1057 0             self._reload_connection()
1058             # delete l2gw connections (if any) before deleting the network
1059 0             for k, v in created_items.items():
1060 0                 if not v:  # skip already deleted
1061 0                     continue
1062
1063 0                 try:
1064 0                     k_item, _, k_id = k.partition(":")
1065 0                     if k_item == "l2gwconn":
1066 0                         self.neutron.delete_l2_gateway_connection(k_id)
1067 0                 except Exception as e:
1068 0                     self.logger.error(
1069                         "Error deleting l2 gateway connection: {}: {}".format(
1070                             type(e).__name__, e
1071                         )
1072                     )
1073
1074             # delete VM ports attached to this networks before the network
1075 0             ports = self.neutron.list_ports(network_id=net_id)
1076 0             for p in ports["ports"]:
1077 0                 try:
1078 0                     self.neutron.delete_port(p["id"])
1079 0                 except Exception as e:
1080 0                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 0             self.neutron.delete_network(net_id)
1083
1084 0             return net_id
1085 0         except (
1086             neExceptions.ConnectionFailed,
1087             neExceptions.NetworkNotFoundClient,
1088             neExceptions.NeutronException,
1089             ksExceptions.ClientException,
1090             neExceptions.NeutronException,
1091             ConnectionError,
1092         ) as e:
1093 0             self._format_exception(e)
1094
1095 1     def refresh_nets_status(self, net_list):
1096         """Get the status of the networks
1097         Params: the list of network identifiers
1098         Returns a dictionary with:
1099             net_id:         #VIM id of this network
1100                 status:     #Mandatory. Text with one of:
1101                             #  DELETED (not found at vim)
1102                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103                             #  OTHER (Vim reported other status not understood)
1104                             #  ERROR (VIM indicates an ERROR status)
1105                             #  ACTIVE, INACTIVE, DOWN (admin down),
1106                             #  BUILD (on building process)
1107                             #
1108                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
1109                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
1110         """
1111 0         net_dict = {}
1112
1113 0         for net_id in net_list:
1114 0             net = {}
1115
1116 0             try:
1117 0                 net_vim = self.get_network(net_id)
1118
1119 0                 if net_vim["status"] in netStatus2manoFormat:
1120 0                     net["status"] = netStatus2manoFormat[net_vim["status"]]
1121                 else:
1122 0                     net["status"] = "OTHER"
1123 0                     net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 0                 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 0                     net["status"] = "DOWN"
1127
1128 0                 net["vim_info"] = self.serialize(net_vim)
1129
1130 0                 if net_vim.get("fault"):  # TODO
1131 0                     net["error_msg"] = str(net_vim["fault"])
1132 0             except vimconn.VimConnNotFoundException as e:
1133 0                 self.logger.error("Exception getting net status: %s", str(e))
1134 0                 net["status"] = "DELETED"
1135 0                 net["error_msg"] = str(e)
1136 0             except vimconn.VimConnException as e:
1137 0                 self.logger.error("Exception getting net status: %s", str(e))
1138 0                 net["status"] = "VIM_ERROR"
1139 0                 net["error_msg"] = str(e)
1140 0             net_dict[net_id] = net
1141 0         return net_dict
1142
1143 1     def get_flavor(self, flavor_id):
1144         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
1145 0         self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 0         try:
1148 0             self._reload_connection()
1149 0             flavor = self.nova.flavors.find(id=flavor_id)
1150             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 0             return flavor.to_dict()
1153 0         except (
1154             nvExceptions.NotFound,
1155             nvExceptions.ClientException,
1156             ksExceptions.ClientException,
1157             ConnectionError,
1158         ) as e:
1159 0             self._format_exception(e)
1160
1161 1     def get_flavor_id_from_data(self, flavor_dict):
1162         """Obtain flavor id that match the flavor description
1163         Returns the flavor_id or raises a vimconnNotFoundException
1164         flavor_dict: contains the required ram, vcpus, disk
1165         If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166             and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167             vimconnNotFoundException is raised
1168         """
1169 0         exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 0         try:
1172 0             self._reload_connection()
1173 0             flavor_candidate_id = None
1174 0             flavor_candidate_data = (10000, 10000, 10000)
1175 0             flavor_target = (
1176                 flavor_dict["ram"],
1177                 flavor_dict["vcpus"],
1178                 flavor_dict["disk"],
1179                 flavor_dict.get("ephemeral", 0),
1180                 flavor_dict.get("swap", 0),
1181             )
1182             # numa=None
1183 0             extended = flavor_dict.get("extended", {})
1184 0             if extended:
1185                 # TODO
1186 0                 raise vimconn.VimConnNotFoundException(
1187                     "Flavor with EPA still not implemented"
1188                 )
1189                 # if len(numas) > 1:
1190                 #     raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191                 # numa=numas[0]
1192                 # numas = extended.get("numas")
1193 0             for flavor in self.nova.flavors.list():
1194 0                 epa = flavor.get_keys()
1195
1196 0                 if epa:
1197 0                     continue
1198                     # TODO
1199
1200 0                 flavor_data = (
1201                     flavor.ram,
1202                     flavor.vcpus,
1203                     flavor.disk,
1204                     flavor.ephemeral,
1205                     flavor.swap if isinstance(flavor.swap, int) else 0,
1206                 )
1207 0                 if flavor_data == flavor_target:
1208 0                     return flavor.id
1209 0                 elif (
1210                     not exact_match
1211                     and flavor_target < flavor_data < flavor_candidate_data
1212                 ):
1213 0                     flavor_candidate_id = flavor.id
1214 0                     flavor_candidate_data = flavor_data
1215
1216 0             if not exact_match and flavor_candidate_id:
1217 0                 return flavor_candidate_id
1218
1219 0             raise vimconn.VimConnNotFoundException(
1220                 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221             )
1222 0         except (
1223             nvExceptions.NotFound,
1224             nvExceptions.ClientException,
1225             ksExceptions.ClientException,
1226             ConnectionError,
1227         ) as e:
1228 0             self._format_exception(e)
1229
1230 1     @staticmethod
1231 1     def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232         """Process resource quota and fill up extra_specs.
1233         Args:
1234             quota       (dict):         Keeping the quota of resurces
1235             prefix      (str)           Prefix
1236             extra_specs (dict)          Dict to be filled to be used during flavor creation
1237
1238         """
1239 0         if "limit" in quota:
1240 0             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 0         if "reserve" in quota:
1243 0             extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 0         if "shares" in quota:
1246 0             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 0             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 1     @staticmethod
1250 1     def process_numa_memory(
1251         numa: dict, node_id: Optional[int], extra_specs: dict
1252     ) -> None:
1253         """Set the memory in extra_specs.
1254         Args:
1255             numa        (dict):         A dictionary which includes numa information
1256             node_id     (int):          ID of numa node
1257             extra_specs (dict):         To be filled.
1258
1259         """
1260 1         if not numa.get("memory"):
1261 1             return
1262 1         memory_mb = numa["memory"] * 1024
1263 1         memory = "hw:numa_mem.{}".format(node_id)
1264 1         extra_specs[memory] = int(memory_mb)
1265
1266 1     @staticmethod
1267 1     def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268         """Set the cpu in extra_specs.
1269         Args:
1270             numa        (dict):         A dictionary which includes numa information
1271             node_id     (int):          ID of numa node
1272             extra_specs (dict):         To be filled.
1273
1274         """
1275 1         if not numa.get("vcpu"):
1276 1             return
1277 1         vcpu = numa["vcpu"]
1278 1         cpu = "hw:numa_cpus.{}".format(node_id)
1279 1         vcpu = ",".join(map(str, vcpu))
1280 1         extra_specs[cpu] = vcpu
1281
1282 1     @staticmethod
1283 1     def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284         """Fill up extra_specs if numa has paired-threads.
1285         Args:
1286             numa        (dict):         A dictionary which includes numa information
1287             extra_specs (dict):         To be filled.
1288
1289         Returns:
1290             threads       (int)           Number of virtual cpus
1291
1292         """
1293 1         if not numa.get("paired-threads"):
1294 1             return
1295
1296         # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 1         threads = numa["paired-threads"] * 2
1298 1         extra_specs["hw:cpu_thread_policy"] = "require"
1299 1         extra_specs["hw:cpu_policy"] = "dedicated"
1300 1         return threads
1301
1302 1     @staticmethod
1303 1     def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304         """Fill up extra_specs if numa has cores.
1305         Args:
1306             numa        (dict):         A dictionary which includes numa information
1307             extra_specs (dict):         To be filled.
1308
1309         Returns:
1310             cores       (int)           Number of virtual cpus
1311
1312         """
1313         # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314         # architecture, or a non-SMT architecture will be emulated
1315 1         if not numa.get("cores"):
1316 1             return
1317 1         cores = numa["cores"]
1318 1         extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 1         extra_specs["hw:cpu_policy"] = "dedicated"
1320 1         return cores
1321
1322 1     @staticmethod
1323 1     def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324         """Fill up extra_specs if numa has threads.
1325         Args:
1326             numa        (dict):         A dictionary which includes numa information
1327             extra_specs (dict):         To be filled.
1328
1329         Returns:
1330             threads       (int)           Number of virtual cpus
1331
1332         """
1333         # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 1         if not numa.get("threads"):
1335 1             return
1336 1         threads = numa["threads"]
1337 1         extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 1         extra_specs["hw:cpu_policy"] = "dedicated"
1339 1         return threads
1340
1341 1     def _process_numa_parameters_of_flavor(
1342         self, numas: List, extra_specs: Dict
1343     ) -> None:
1344         """Process numa parameters and fill up extra_specs.
1345
1346         Args:
1347             numas   (list):             List of dictionary which includes numa information
1348             extra_specs (dict):         To be filled.
1349
1350         """
1351 1         numa_nodes = len(numas)
1352 1         extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 1         cpu_cores, cpu_threads = 0, 0
1354
1355 1         if self.vim_type == "VIO":
1356 1             self.process_vio_numa_nodes(numa_nodes, extra_specs)
1357
1358 1         for numa in numas:
1359 1             if "id" in numa:
1360 1                 node_id = numa["id"]
1361                 # overwrite ram and vcpus
1362                 # check if key "memory" is present in numa else use ram value at flavor
1363 1                 self.process_numa_memory(numa, node_id, extra_specs)
1364 1                 self.process_numa_vcpu(numa, node_id, extra_specs)
1365
1366             # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1367 1             extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1368
1369 1             if "paired-threads" in numa:
1370 1                 threads = self.process_numa_paired_threads(numa, extra_specs)
1371 1                 cpu_threads += threads
1372
1373 1             elif "cores" in numa:
1374 1                 cores = self.process_numa_cores(numa, extra_specs)
1375 1                 cpu_cores += cores
1376
1377 1             elif "threads" in numa:
1378 1                 threads = self.process_numa_threads(numa, extra_specs)
1379 1                 cpu_threads += threads
1380
1381 1         if cpu_cores:
1382 1             extra_specs["hw:cpu_cores"] = str(cpu_cores)
1383 1         if cpu_threads:
1384 1             extra_specs["hw:cpu_threads"] = str(cpu_threads)
1385
1386 1     @staticmethod
1387 1     def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1388         """According to number of numa nodes, updates the extra_specs for VIO.
1389
1390         Args:
1391
1392             numa_nodes      (int):         List keeps the numa node numbers
1393             extra_specs     (dict):        Extra specs dict to be updated
1394
1395         """
1396         # If there are several numas, we do not define specific affinity.
1397 1         extra_specs["vmware:latency_sensitivity_level"] = "high"
1398
1399 1     def _change_flavor_name(
1400         self, name: str, name_suffix: int, flavor_data: dict
1401     ) -> str:
1402         """Change the flavor name if the name already exists.
1403
1404         Args:
1405             name    (str):          Flavor name to be checked
1406             name_suffix (int):      Suffix to be appended to name
1407             flavor_data (dict):     Flavor dict
1408
1409         Returns:
1410             name    (str):          New flavor name to be used
1411
1412         """
1413         # Get used names
1414 1         fl = self.nova.flavors.list()
1415 1         fl_names = [f.name for f in fl]
1416
1417 1         while name in fl_names:
1418 1             name_suffix += 1
1419 1             name = flavor_data["name"] + "-" + str(name_suffix)
1420
1421 1         return name
1422
1423 1     def _process_extended_config_of_flavor(
1424         self, extended: dict, extra_specs: dict
1425     ) -> None:
1426         """Process the extended dict to fill up extra_specs.
1427         Args:
1428
1429             extended                    (dict):         Keeping the extra specification of flavor
1430             extra_specs                 (dict)          Dict to be filled to be used during flavor creation
1431
1432         """
1433 1         quotas = {
1434             "cpu-quota": "cpu",
1435             "mem-quota": "memory",
1436             "vif-quota": "vif",
1437             "disk-io-quota": "disk_io",
1438         }
1439
1440 1         page_sizes = {
1441             "LARGE": "large",
1442             "SMALL": "small",
1443             "SIZE_2MB": "2MB",
1444             "SIZE_1GB": "1GB",
1445             "PREFER_LARGE": "any",
1446         }
1447
1448 1         policies = {
1449             "cpu-pinning-policy": "hw:cpu_policy",
1450             "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1451             "mem-policy": "hw:numa_mempolicy",
1452         }
1453
1454 1         numas = extended.get("numas")
1455 1         if numas:
1456 1             self._process_numa_parameters_of_flavor(numas, extra_specs)
1457
1458 1         for quota, item in quotas.items():
1459 1             if quota in extended.keys():
1460 1                 self.process_resource_quota(extended.get(quota), item, extra_specs)
1461
1462         # Set the mempage size as specified in the descriptor
1463 1         if extended.get("mempage-size"):
1464 1             if extended["mempage-size"] in page_sizes.keys():
1465 1                 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1466             else:
1467                 # Normally, validations in NBI should not allow to this condition.
1468 1                 self.logger.debug(
1469                     "Invalid mempage-size %s. Will be ignored",
1470                     extended.get("mempage-size"),
1471                 )
1472
1473 1         for policy, hw_policy in policies.items():
1474 1             if extended.get(policy):
1475 1                 extra_specs[hw_policy] = extended[policy].lower()
1476
1477 1     @staticmethod
1478 1     def _get_flavor_details(flavor_data: dict) -> Tuple:
1479         """Returns the details of flavor
1480         Args:
1481             flavor_data     (dict):     Dictionary that includes required flavor details
1482
1483         Returns:
1484             ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
1485
1486         """
1487 1         return (
1488             flavor_data.get("ram", 64),
1489             flavor_data.get("vcpus", 1),
1490             {},
1491             flavor_data.get("extended"),
1492         )
1493
1494 1     def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1495         """Adds a tenant flavor to openstack VIM.
1496         if change_name_if_used is True, it will change name in case of conflict,
1497         because it is not supported name repetition.
1498
1499         Args:
1500             flavor_data (dict):             Flavor details to be processed
1501             change_name_if_used (bool):     Change name in case of conflict
1502
1503         Returns:
1504              flavor_id  (str):     flavor identifier
1505
1506         """
1507 1         self.logger.debug("Adding flavor '%s'", str(flavor_data))
1508 1         retry = 0
1509 1         max_retries = 3
1510 1         name_suffix = 0
1511
1512 1         try:
1513 1             name = flavor_data["name"]
1514 1             while retry < max_retries:
1515 1                 retry += 1
1516 1                 try:
1517 1                     self._reload_connection()
1518
1519 1                     if change_name_if_used:
1520 1                         name = self._change_flavor_name(name, name_suffix, flavor_data)
1521
1522 1                     ram, vcpus, extra_specs, extended = self._get_flavor_details(
1523                         flavor_data
1524                     )
1525 1                     if extended:
1526 1                         self._process_extended_config_of_flavor(extended, extra_specs)
1527
1528                     # Create flavor
1529
1530 1                     new_flavor = self.nova.flavors.create(
1531                         name=name,
1532                         ram=ram,
1533                         vcpus=vcpus,
1534                         disk=flavor_data.get("disk", 0),
1535                         ephemeral=flavor_data.get("ephemeral", 0),
1536                         swap=flavor_data.get("swap", 0),
1537                         is_public=flavor_data.get("is_public", True),
1538                     )
1539
1540                     # Add metadata
1541 1                     if extra_specs:
1542 1                         new_flavor.set_keys(extra_specs)
1543
1544 1                     return new_flavor.id
1545
1546 1                 except nvExceptions.Conflict as e:
1547 1                     if change_name_if_used and retry < max_retries:
1548 1                         continue
1549
1550 1                     self._format_exception(e)
1551
1552 1         except (
1553             ksExceptions.ClientException,
1554             nvExceptions.ClientException,
1555             ConnectionError,
1556             KeyError,
1557         ) as e:
1558 1             self._format_exception(e)
1559
1560 1     def delete_flavor(self, flavor_id):
1561         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1562 0         try:
1563 0             self._reload_connection()
1564 0             self.nova.flavors.delete(flavor_id)
1565
1566 0             return flavor_id
1567         # except nvExceptions.BadRequest as e:
1568 0         except (
1569             nvExceptions.NotFound,
1570             ksExceptions.ClientException,
1571             nvExceptions.ClientException,
1572             ConnectionError,
1573         ) as e:
1574 0             self._format_exception(e)
1575
1576 1     def new_image(self, image_dict):
1577         """
1578         Adds a tenant image to VIM. imge_dict is a dictionary with:
1579             name: name
1580             disk_format: qcow2, vhd, vmdk, raw (by default), ...
1581             location: path or URI
1582             public: "yes" or "no"
1583             metadata: metadata of the image
1584         Returns the image_id
1585         """
1586 0         retry = 0
1587 0         max_retries = 3
1588
1589 0         while retry < max_retries:
1590 0             retry += 1
1591 0             try:
1592 0                 self._reload_connection()
1593
1594                 # determine format  http://docs.openstack.org/developer/glance/formats.html
1595 0                 if "disk_format" in image_dict:
1596 0                     disk_format = image_dict["disk_format"]
1597                 else:  # autodiscover based on extension
1598 0                     if image_dict["location"].endswith(".qcow2"):
1599 0                         disk_format = "qcow2"
1600 0                     elif image_dict["location"].endswith(".vhd"):
1601 0                         disk_format = "vhd"
1602 0                     elif image_dict["location"].endswith(".vmdk"):
1603 0                         disk_format = "vmdk"
1604 0                     elif image_dict["location"].endswith(".vdi"):
1605 0                         disk_format = "vdi"
1606 0                     elif image_dict["location"].endswith(".iso"):
1607 0                         disk_format = "iso"
1608 0                     elif image_dict["location"].endswith(".aki"):
1609 0                         disk_format = "aki"
1610 0                     elif image_dict["location"].endswith(".ari"):
1611 0                         disk_format = "ari"
1612 0                     elif image_dict["location"].endswith(".ami"):
1613 0                         disk_format = "ami"
1614                     else:
1615 0                         disk_format = "raw"
1616
1617 0                 self.logger.debug(
1618                     "new_image: '%s' loading from '%s'",
1619                     image_dict["name"],
1620                     image_dict["location"],
1621                 )
1622 0                 if self.vim_type == "VIO":
1623 0                     container_format = "bare"
1624 0                     if "container_format" in image_dict:
1625 0                         container_format = image_dict["container_format"]
1626
1627 0                     new_image = self.glance.images.create(
1628                         name=image_dict["name"],
1629                         container_format=container_format,
1630                         disk_format=disk_format,
1631                     )
1632                 else:
1633 0                     new_image = self.glance.images.create(name=image_dict["name"])
1634
1635 0                 if image_dict["location"].startswith("http"):
1636                     # TODO there is not a method to direct download. It must be downloaded locally with requests
1637 0                     raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1638                 else:  # local path
1639 0                     with open(image_dict["location"]) as fimage:
1640 0                         self.glance.images.upload(new_image.id, fimage)
1641                         # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1642                         #  image_dict.get("public","yes")=="yes",
1643                         #    container_format="bare", data=fimage, disk_format=disk_format)
1644
1645 0                 metadata_to_load = image_dict.get("metadata")
1646
1647                 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1648                 #  for openstack
1649 0                 if self.vim_type == "VIO":
1650 0                     metadata_to_load["upload_location"] = image_dict["location"]
1651                 else:
1652 0                     metadata_to_load["location"] = image_dict["location"]
1653
1654 0                 self.glance.images.update(new_image.id, **metadata_to_load)
1655
1656 0                 return new_image.id
1657 0             except (
1658                 nvExceptions.Conflict,
1659                 ksExceptions.ClientException,
1660                 nvExceptions.ClientException,
1661             ) as e:
1662 0                 self._format_exception(e)
1663 0             except (
1664                 HTTPException,
1665                 gl1Exceptions.HTTPException,
1666                 gl1Exceptions.CommunicationError,
1667                 ConnectionError,
1668             ) as e:
1669 0                 if retry == max_retries:
1670 0                     continue
1671
1672 0                 self._format_exception(e)
1673 0             except IOError as e:  # can not open the file
1674 0                 raise vimconn.VimConnConnectionException(
1675                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1676                     http_code=vimconn.HTTP_Bad_Request,
1677                 )
1678
1679 1     def delete_image(self, image_id):
1680         """Deletes a tenant image from openstack VIM. Returns the old id"""
1681 0         try:
1682 0             self._reload_connection()
1683 0             self.glance.images.delete(image_id)
1684
1685 0             return image_id
1686 0         except (
1687             nvExceptions.NotFound,
1688             ksExceptions.ClientException,
1689             nvExceptions.ClientException,
1690             gl1Exceptions.CommunicationError,
1691             gl1Exceptions.HTTPNotFound,
1692             ConnectionError,
1693         ) as e:  # TODO remove
1694 0             self._format_exception(e)
1695
1696 1     def get_image_id_from_path(self, path):
1697         """Get the image id from image path in the VIM database. Returns the image_id"""
1698 0         try:
1699 0             self._reload_connection()
1700 0             images = self.glance.images.list()
1701
1702 0             for image in images:
1703 0                 if image.metadata.get("location") == path:
1704 0                     return image.id
1705
1706 0             raise vimconn.VimConnNotFoundException(
1707                 "image with location '{}' not found".format(path)
1708             )
1709 0         except (
1710             ksExceptions.ClientException,
1711             nvExceptions.ClientException,
1712             gl1Exceptions.CommunicationError,
1713             ConnectionError,
1714         ) as e:
1715 0             self._format_exception(e)
1716
1717 1     def get_image_list(self, filter_dict={}):
1718         """Obtain tenant images from VIM
1719         Filter_dict can be:
1720             id: image id
1721             name: image name
1722             checksum: image checksum
1723         Returns the image list of dictionaries:
1724             [{<the fields at Filter_dict plus some VIM specific>}, ...]
1725             List can be empty
1726         """
1727 0         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1728
1729 0         try:
1730 0             self._reload_connection()
1731             # filter_dict_os = filter_dict.copy()
1732             # First we filter by the available filter fields: name, id. The others are removed.
1733 0             image_list = self.glance.images.list()
1734 0             filtered_list = []
1735
1736 0             for image in image_list:
1737 0                 try:
1738 0                     if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1739 0                         continue
1740
1741 0                     if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1742 0                         continue
1743
1744 0                     if (
1745                         filter_dict.get("checksum")
1746                         and image["checksum"] != filter_dict["checksum"]
1747                     ):
1748 0                         continue
1749
1750 0                     filtered_list.append(image.copy())
1751 0                 except gl1Exceptions.HTTPNotFound:
1752 0                     pass
1753
1754 0             return filtered_list
1755 0         except (
1756             ksExceptions.ClientException,
1757             nvExceptions.ClientException,
1758             gl1Exceptions.CommunicationError,
1759             ConnectionError,
1760         ) as e:
1761 0             self._format_exception(e)
1762
1763 1     def __wait_for_vm(self, vm_id, status):
1764         """wait until vm is in the desired status and return True.
1765         If the VM gets in ERROR status, return false.
1766         If the timeout is reached generate an exception"""
1767 0         elapsed_time = 0
1768 0         while elapsed_time < server_timeout:
1769 0             vm_status = self.nova.servers.get(vm_id).status
1770
1771 0             if vm_status == status:
1772 0                 return True
1773
1774 0             if vm_status == "ERROR":
1775 0                 return False
1776
1777 0             time.sleep(5)
1778 0             elapsed_time += 5
1779
1780         # if we exceeded the timeout rollback
1781 0         if elapsed_time >= server_timeout:
1782 0             raise vimconn.VimConnException(
1783                 "Timeout waiting for instance " + vm_id + " to get " + status,
1784                 http_code=vimconn.HTTP_Request_Timeout,
1785             )
1786
1787 1     def _get_openstack_availablity_zones(self):
1788         """
1789         Get from openstack availability zones available
1790         :return:
1791         """
1792 1         try:
1793 1             openstack_availability_zone = self.nova.availability_zones.list()
1794 0             openstack_availability_zone = [
1795                 str(zone.zoneName)
1796                 for zone in openstack_availability_zone
1797                 if zone.zoneName != "internal"
1798             ]
1799
1800 0             return openstack_availability_zone
1801 1         except Exception:
1802 1             return None
1803
1804 1     def _set_availablity_zones(self):
1805         """
1806         Set vim availablity zone
1807         :return:
1808         """
1809 1         if "availability_zone" in self.config:
1810 0             vim_availability_zones = self.config.get("availability_zone")
1811
1812 0             if isinstance(vim_availability_zones, str):
1813 0                 self.availability_zone = [vim_availability_zones]
1814 0             elif isinstance(vim_availability_zones, list):
1815 0                 self.availability_zone = vim_availability_zones
1816         else:
1817 1             self.availability_zone = self._get_openstack_availablity_zones()
1818
1819 1     def _get_vm_availability_zone(
1820         self, availability_zone_index, availability_zone_list
1821     ):
1822         """
1823         Return thge availability zone to be used by the created VM.
1824         :return: The VIM availability zone to be used or None
1825         """
1826 0         if availability_zone_index is None:
1827 0             if not self.config.get("availability_zone"):
1828 0                 return None
1829 0             elif isinstance(self.config.get("availability_zone"), str):
1830 0                 return self.config["availability_zone"]
1831             else:
1832                 # TODO consider using a different parameter at config for default AV and AV list match
1833 0                 return self.config["availability_zone"][0]
1834
1835 0         vim_availability_zones = self.availability_zone
1836         # check if VIM offer enough availability zones describe in the VNFD
1837 0         if vim_availability_zones and len(availability_zone_list) <= len(
1838             vim_availability_zones
1839         ):
1840             # check if all the names of NFV AV match VIM AV names
1841 0             match_by_index = False
1842 0             for av in availability_zone_list:
1843 0                 if av not in vim_availability_zones:
1844 0                     match_by_index = True
1845 0                     break
1846
1847 0             if match_by_index:
1848 0                 return vim_availability_zones[availability_zone_index]
1849             else:
1850 0                 return availability_zone_list[availability_zone_index]
1851         else:
1852 0             raise vimconn.VimConnConflictException(
1853                 "No enough availability zones at VIM for this deployment"
1854             )
1855
1856 1     def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1857         """Fill up the security_groups in the port_dict.
1858
1859         Args:
1860             net (dict):             Network details
1861             port_dict   (dict):     Port details
1862
1863         """
1864 1         if (
1865             self.config.get("security_groups")
1866             and net.get("port_security") is not False
1867             and not self.config.get("no_port_security_extension")
1868         ):
1869 1             if not self.security_groups_id:
1870 1                 self._get_ids_from_name()
1871
1872 1             port_dict["security_groups"] = self.security_groups_id
1873
1874 1     def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1875         """Fill up the network binding depending on network type in the port_dict.
1876
1877         Args:
1878             net (dict):             Network details
1879             port_dict   (dict):     Port details
1880
1881         """
1882 1         if not net.get("type"):
1883 1             raise vimconn.VimConnException("Type is missing in the network details.")
1884
1885 1         if net["type"] == "virtual":
1886 1             pass
1887
1888         # For VF
1889 1         elif net["type"] == "VF" or net["type"] == "SR-IOV":
1890 1             port_dict["binding:vnic_type"] = "direct"
1891
1892             # VIO specific Changes
1893 1             if self.vim_type == "VIO":
1894                 # Need to create port with port_security_enabled = False and no-security-groups
1895 1                 port_dict["port_security_enabled"] = False
1896 1                 port_dict["provider_security_groups"] = []
1897 1                 port_dict["security_groups"] = []
1898
1899         else:
1900             # For PT PCI-PASSTHROUGH
1901 1             port_dict["binding:vnic_type"] = "direct-physical"
1902
1903 1     @staticmethod
1904 1     def _set_fixed_ip(new_port: dict, net: dict) -> None:
1905         """Set the "ip" parameter in net dictionary.
1906
1907         Args:
1908             new_port    (dict):     New created port
1909             net         (dict):     Network details
1910
1911         """
1912 1         fixed_ips = new_port["port"].get("fixed_ips")
1913
1914 1         if fixed_ips:
1915 1             net["ip"] = fixed_ips[0].get("ip_address")
1916         else:
1917 1             net["ip"] = None
1918
1919 1     @staticmethod
1920 1     def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1921         """Fill up the mac_address and fixed_ips in port_dict.
1922
1923         Args:
1924             net (dict):             Network details
1925             port_dict   (dict):     Port details
1926
1927         """
1928 1         if net.get("mac_address"):
1929 1             port_dict["mac_address"] = net["mac_address"]
1930
1931 1         if net.get("ip_address"):
1932 1             port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1933             # TODO add "subnet_id": <subnet_id>
1934
1935 1     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1936         """Create new port using neutron.
1937
1938         Args:
1939             port_dict   (dict):         Port details
1940             created_items   (dict):     All created items
1941             net (dict):                 Network details
1942
1943         Returns:
1944             new_port    (dict):         New created port
1945
1946         """
1947 1         new_port = self.neutron.create_port({"port": port_dict})
1948 1         created_items["port:" + str(new_port["port"]["id"])] = True
1949 1         net["mac_adress"] = new_port["port"]["mac_address"]
1950 1         net["vim_id"] = new_port["port"]["id"]
1951
1952 1         return new_port
1953
1954 1     def _create_port(
1955         self, net: dict, name: str, created_items: dict
1956     ) -> Tuple[dict, dict]:
1957         """Create port using net details.
1958
1959         Args:
1960             net (dict):                 Network details
1961             name    (str):              Name to be used as network name if net dict does not include name
1962             created_items   (dict):     All created items
1963
1964         Returns:
1965             new_port, port              New created port, port dictionary
1966
1967         """
1968
1969 1         port_dict = {
1970             "network_id": net["net_id"],
1971             "name": net.get("name"),
1972             "admin_state_up": True,
1973         }
1974
1975 1         if not port_dict["name"]:
1976 1             port_dict["name"] = name
1977
1978 1         self._prepare_port_dict_security_groups(net, port_dict)
1979
1980 1         self._prepare_port_dict_binding(net, port_dict)
1981
1982 1         vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1983
1984 1         new_port = self._create_new_port(port_dict, created_items, net)
1985
1986 1         vimconnector._set_fixed_ip(new_port, net)
1987
1988 1         port = {"port-id": new_port["port"]["id"]}
1989
1990 1         if float(self.nova.api_version.get_string()) >= 2.32:
1991 1             port["tag"] = new_port["port"]["name"]
1992
1993 1         return new_port, port
1994
1995 1     def _prepare_network_for_vminstance(
1996         self,
1997         name: str,
1998         net_list: list,
1999         created_items: dict,
2000         net_list_vim: list,
2001         external_network: list,
2002         no_secured_ports: list,
2003     ) -> None:
2004         """Create port and fill up net dictionary for new VM instance creation.
2005
2006         Args:
2007             name    (str):                  Name of network
2008             net_list    (list):             List of networks
2009             created_items   (dict):         All created items belongs to a VM
2010             net_list_vim    (list):         List of ports
2011             external_network    (list):     List of external-networks
2012             no_secured_ports    (list):     Port security disabled ports
2013         """
2014
2015 1         self._reload_connection()
2016
2017 1         for net in net_list:
2018             # Skip non-connected iface
2019 1             if not net.get("net_id"):
2020 1                 continue
2021
2022 1             new_port, port = self._create_port(net, name, created_items)
2023
2024 1             net_list_vim.append(port)
2025
2026 1             if net.get("floating_ip", False):
2027 1                 net["exit_on_floating_ip_error"] = True
2028 1                 external_network.append(net)
2029
2030 1             elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2031 1                 net["exit_on_floating_ip_error"] = False
2032 1                 external_network.append(net)
2033 1                 net["floating_ip"] = self.config.get("use_floating_ip")
2034
2035             # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2036             # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2037 1             if net.get("port_security") is False and not self.config.get(
2038                 "no_port_security_extension"
2039             ):
2040 1                 no_secured_ports.append(
2041                     (
2042                         new_port["port"]["id"],
2043                         net.get("port_security_disable_strategy"),
2044                     )
2045                 )
2046
2047 1     def _prepare_persistent_root_volumes(
2048         self,
2049         name: str,
2050         vm_av_zone: list,
2051         disk: dict,
2052         base_disk_index: int,
2053         block_device_mapping: dict,
2054         existing_vim_volumes: list,
2055         created_items: dict,
2056     ) -> Optional[str]:
2057         """Prepare persistent root volumes for new VM instance.
2058
2059         Args:
2060             name    (str):                      Name of VM instance
2061             vm_av_zone  (list):                 List of availability zones
2062             disk    (dict):                     Disk details
2063             base_disk_index (int):              Disk index
2064             block_device_mapping    (dict):     Block device details
2065             existing_vim_volumes    (list):     Existing disk details
2066             created_items   (dict):             All created items belongs to VM
2067
2068         Returns:
2069             boot_volume_id  (str):              ID of boot volume
2070
2071         """
2072         # Disk may include only vim_volume_id or only vim_id."
2073         # Use existing persistent root volume finding with volume_id or vim_id
2074 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2075
2076 1         if disk.get(key_id):
2077 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2078 1             existing_vim_volumes.append({"id": disk[key_id]})
2079
2080         else:
2081             # Create persistent root volume
2082 1             volume = self.cinder.volumes.create(
2083                 size=disk["size"],
2084                 name=name + "vd" + chr(base_disk_index),
2085                 imageRef=disk["image_id"],
2086                 # Make sure volume is in the same AZ as the VM to be attached to
2087                 availability_zone=vm_av_zone,
2088             )
2089 1             boot_volume_id = volume.id
2090 1             created_items["volume:" + str(volume.id)] = True
2091 1             block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2092
2093 1             return boot_volume_id
2094
2095 1     def _prepare_non_root_persistent_volumes(
2096         self,
2097         name: str,
2098         disk: dict,
2099         vm_av_zone: list,
2100         block_device_mapping: dict,
2101         base_disk_index: int,
2102         existing_vim_volumes: list,
2103         created_items: dict,
2104     ) -> None:
2105         """Prepare persistent volumes for new VM instance.
2106
2107         Args:
2108             name    (str):                      Name of VM instance
2109             disk    (dict):                     Disk details
2110             vm_av_zone  (list):                 List of availability zones
2111             block_device_mapping    (dict):     Block device details
2112             base_disk_index (int):              Disk index
2113             existing_vim_volumes    (list):     Existing disk details
2114             created_items   (dict):             All created items belongs to VM
2115         """
2116         # Non-root persistent volumes
2117         # Disk may include only vim_volume_id or only vim_id."
2118 1         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2119
2120 1         if disk.get(key_id):
2121             # Use existing persistent volume
2122 1             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2123 1             existing_vim_volumes.append({"id": disk[key_id]})
2124
2125         else:
2126             # Create persistent volume
2127 1             volume = self.cinder.volumes.create(
2128                 size=disk["size"],
2129                 name=name + "vd" + chr(base_disk_index),
2130                 # Make sure volume is in the same AZ as the VM to be attached to
2131                 availability_zone=vm_av_zone,
2132             )
2133 1             created_items["volume:" + str(volume.id)] = True
2134 1             block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2135
2136 1     def _wait_for_created_volumes_availability(
2137         self, elapsed_time: int, created_items: dict
2138     ) -> Optional[int]:
2139         """Wait till created volumes become available.
2140
2141         Args:
2142             elapsed_time    (int):          Passed time while waiting
2143             created_items   (dict):         All created items belongs to VM
2144
2145         Returns:
2146             elapsed_time    (int):          Time spent while waiting
2147
2148         """
2149
2150 1         while elapsed_time < volume_timeout:
2151 1             for created_item in created_items:
2152 1                 v, _, volume_id = created_item.partition(":")
2153 1                 if v == "volume":
2154 1                     if self.cinder.volumes.get(volume_id).status != "available":
2155 1                         break
2156             else:
2157                 # All ready: break from while
2158 1                 break
2159
2160 1             time.sleep(5)
2161 1             elapsed_time += 5
2162
2163 1         return elapsed_time
2164
2165 1     def _wait_for_existing_volumes_availability(
2166         self, elapsed_time: int, existing_vim_volumes: list
2167     ) -> Optional[int]:
2168         """Wait till existing volumes become available.
2169
2170         Args:
2171             elapsed_time    (int):          Passed time while waiting
2172             existing_vim_volumes   (list):  Existing volume details
2173
2174         Returns:
2175             elapsed_time    (int):          Time spent while waiting
2176
2177         """
2178
2179 1         while elapsed_time < volume_timeout:
2180 1             for volume in existing_vim_volumes:
2181 1                 if self.cinder.volumes.get(volume["id"]).status != "available":
2182 1                     break
2183             else:  # all ready: break from while
2184 1                 break
2185
2186 1             time.sleep(5)
2187 1             elapsed_time += 5
2188
2189 1         return elapsed_time
2190
2191 1     def _prepare_disk_for_vminstance(
2192         self,
2193         name: str,
2194         existing_vim_volumes: list,
2195         created_items: dict,
2196         vm_av_zone: list,
2197         block_device_mapping: dict,
2198         disk_list: list = None,
2199     ) -> None:
2200         """Prepare all volumes for new VM instance.
2201
2202         Args:
2203             name    (str):                      Name of Instance
2204             existing_vim_volumes    (list):     List of existing volumes
2205             created_items   (dict):             All created items belongs to VM
2206             vm_av_zone  (list):                 VM availability zone
2207             block_device_mapping (dict):        Block devices to be attached to VM
2208             disk_list   (list):                 List of disks
2209
2210         """
2211         # Create additional volumes in case these are present in disk_list
2212 1         base_disk_index = ord("b")
2213 1         boot_volume_id = None
2214 1         elapsed_time = 0
2215
2216 1         for disk in disk_list:
2217 1             if "image_id" in disk:
2218                 # Root persistent volume
2219 1                 base_disk_index = ord("a")
2220 1                 boot_volume_id = self._prepare_persistent_root_volumes(
2221                     name=name,
2222                     vm_av_zone=vm_av_zone,
2223                     disk=disk,
2224                     base_disk_index=base_disk_index,
2225                     block_device_mapping=block_device_mapping,
2226                     existing_vim_volumes=existing_vim_volumes,
2227                     created_items=created_items,
2228                 )
2229             else:
2230                 # Non-root persistent volume
2231 1                 self._prepare_non_root_persistent_volumes(
2232                     name=name,
2233                     disk=disk,
2234                     vm_av_zone=vm_av_zone,
2235                     block_device_mapping=block_device_mapping,
2236                     base_disk_index=base_disk_index,
2237                     existing_vim_volumes=existing_vim_volumes,
2238                     created_items=created_items,
2239                 )
2240 1             base_disk_index += 1
2241
2242         # Wait until created volumes are with status available
2243 1         elapsed_time = self._wait_for_created_volumes_availability(
2244             elapsed_time, created_items
2245         )
2246         # Wait until existing volumes in vim are with status available
2247 1         elapsed_time = self._wait_for_existing_volumes_availability(
2248             elapsed_time, existing_vim_volumes
2249         )
2250         # If we exceeded the timeout rollback
2251 1         if elapsed_time >= volume_timeout:
2252 1             raise vimconn.VimConnException(
2253                 "Timeout creating volumes for instance " + name,
2254                 http_code=vimconn.HTTP_Request_Timeout,
2255             )
2256 1         if boot_volume_id:
2257 1             self.cinder.volumes.set_bootable(boot_volume_id, True)
2258
2259 1     def _find_the_external_network_for_floating_ip(self):
2260         """Get the external network ip in order to create floating IP.
2261
2262         Returns:
2263             pool_id (str):      External network pool ID
2264
2265         """
2266
2267         # Find the external network
2268 1         external_nets = list()
2269
2270 1         for net in self.neutron.list_networks()["networks"]:
2271 1             if net["router:external"]:
2272 1                 external_nets.append(net)
2273
2274 1         if len(external_nets) == 0:
2275 1             raise vimconn.VimConnException(
2276                 "Cannot create floating_ip automatically since "
2277                 "no external network is present",
2278                 http_code=vimconn.HTTP_Conflict,
2279             )
2280
2281 1         if len(external_nets) > 1:
2282 1             raise vimconn.VimConnException(
2283                 "Cannot create floating_ip automatically since "
2284                 "multiple external networks are present",
2285                 http_code=vimconn.HTTP_Conflict,
2286             )
2287
2288         # Pool ID
2289 1         return external_nets[0].get("id")
2290
2291 1     def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2292         """Trigger neutron to create a new floating IP using external network ID.
2293
2294         Args:
2295             param   (dict):             Input parameters to create a floating IP
2296             created_items   (dict):     All created items belongs to new VM instance
2297
2298         Raises:
2299
2300             VimConnException
2301         """
2302 1         try:
2303 1             self.logger.debug("Creating floating IP")
2304 1             new_floating_ip = self.neutron.create_floatingip(param)
2305 1             free_floating_ip = new_floating_ip["floatingip"]["id"]
2306 1             created_items["floating_ip:" + str(free_floating_ip)] = True
2307
2308 1         except Exception as e:
2309 1             raise vimconn.VimConnException(
2310                 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2311                 http_code=vimconn.HTTP_Conflict,
2312             )
2313
2314 1     def _create_floating_ip(
2315         self, floating_network: dict, server: object, created_items: dict
2316     ) -> None:
2317         """Get the available Pool ID and create a new floating IP.
2318
2319         Args:
2320             floating_network    (dict):         Dict including external network ID
2321             server   (object):                  Server object
2322             created_items   (dict):             All created items belongs to new VM instance
2323
2324         """
2325
2326         # Pool_id is available
2327 1         if (
2328             isinstance(floating_network["floating_ip"], str)
2329             and floating_network["floating_ip"].lower() != "true"
2330         ):
2331 1             pool_id = floating_network["floating_ip"]
2332
2333         # Find the Pool_id
2334         else:
2335 1             pool_id = self._find_the_external_network_for_floating_ip()
2336
2337 1         param = {
2338             "floatingip": {
2339                 "floating_network_id": pool_id,
2340                 "tenant_id": server.tenant_id,
2341             }
2342         }
2343
2344 1         self._neutron_create_float_ip(param, created_items)
2345
2346 1     def _find_floating_ip(
2347         self,
2348         server: object,
2349         floating_ips: list,
2350         floating_network: dict,
2351     ) -> Optional[str]:
2352         """Find the available free floating IPs if there are.
2353
2354         Args:
2355             server  (object):                   Server object
2356             floating_ips    (list):             List of floating IPs
2357             floating_network    (dict):         Details of floating network such as ID
2358
2359         Returns:
2360             free_floating_ip    (str):          Free floating ip address
2361
2362         """
2363 1         for fip in floating_ips:
2364 1             if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2365 1                 continue
2366
2367 1             if isinstance(floating_network["floating_ip"], str):
2368 1                 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2369 0                     continue
2370
2371 1             return fip["id"]
2372
2373 1     def _assign_floating_ip(
2374         self, free_floating_ip: str, floating_network: dict
2375     ) -> Dict:
2376         """Assign the free floating ip address to port.
2377
2378         Args:
2379             free_floating_ip    (str):          Floating IP to be assigned
2380             floating_network    (dict):         ID of floating network
2381
2382         Returns:
2383             fip (dict)          (dict):         Floating ip details
2384
2385         """
2386         # The vim_id key contains the neutron.port_id
2387 1         self.neutron.update_floatingip(
2388             free_floating_ip,
2389             {"floatingip": {"port_id": floating_network["vim_id"]}},
2390         )
2391         # For race condition ensure not re-assigned to other VM after 5 seconds
2392 1         time.sleep(5)
2393
2394 1         return self.neutron.show_floatingip(free_floating_ip)
2395
2396 1     def _get_free_floating_ip(
2397         self, server: object, floating_network: dict
2398     ) -> Optional[str]:
2399         """Get the free floating IP address.
2400
2401         Args:
2402             server  (object):               Server Object
2403             floating_network    (dict):     Floating network details
2404
2405         Returns:
2406             free_floating_ip    (str):      Free floating ip addr
2407
2408         """
2409
2410 1         floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2411
2412         # Randomize
2413 1         random.shuffle(floating_ips)
2414
2415 1         return self._find_floating_ip(server, floating_ips, floating_network)
2416
2417 1     def _prepare_external_network_for_vminstance(
2418         self,
2419         external_network: list,
2420         server: object,
2421         created_items: dict,
2422         vm_start_time: float,
2423     ) -> None:
2424         """Assign floating IP address for VM instance.
2425
2426         Args:
2427             external_network    (list):         ID of External network
2428             server  (object):                   Server Object
2429             created_items   (dict):             All created items belongs to new VM instance
2430             vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
2431
2432         Raises:
2433             VimConnException
2434
2435         """
2436 1         for floating_network in external_network:
2437 1             try:
2438 1                 assigned = False
2439 1                 floating_ip_retries = 3
2440                 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2441                 # several times
2442 1                 while not assigned:
2443 1                     free_floating_ip = self._get_free_floating_ip(
2444                         server, floating_network
2445                     )
2446
2447 1                     if not free_floating_ip:
2448 1                         self._create_floating_ip(
2449                             floating_network, server, created_items
2450                         )
2451
2452 1                     try:
2453                         # For race condition ensure not already assigned
2454 1                         fip = self.neutron.show_floatingip(free_floating_ip)
2455
2456 1                         if fip["floatingip"].get("port_id"):
2457 1                             continue
2458
2459                         # Assign floating ip
2460 1                         fip = self._assign_floating_ip(
2461                             free_floating_ip, floating_network
2462                         )
2463
2464 1                         if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2465 1                             self.logger.warning(
2466                                 "floating_ip {} re-assigned to other port".format(
2467                                     free_floating_ip
2468                                 )
2469                             )
2470 1                             continue
2471
2472 1                         self.logger.debug(
2473                             "Assigned floating_ip {} to VM {}".format(
2474                                 free_floating_ip, server.id
2475                             )
2476                         )
2477
2478 1                         assigned = True
2479
2480 1                     except Exception as e:
2481                         # Openstack need some time after VM creation to assign an IP. So retry if fails
2482 1                         vm_status = self.nova.servers.get(server.id).status
2483
2484 1                         if vm_status not in ("ACTIVE", "ERROR"):
2485 1                             if time.time() - vm_start_time < server_timeout:
2486 1                                 time.sleep(5)
2487 1                                 continue
2488 1                         elif floating_ip_retries > 0:
2489 1                             floating_ip_retries -= 1
2490 1                             continue
2491
2492 1                         raise vimconn.VimConnException(
2493                             "Cannot create floating_ip: {} {}".format(
2494                                 type(e).__name__, e
2495                             ),
2496                             http_code=vimconn.HTTP_Conflict,
2497                         )
2498
2499 1             except Exception as e:
2500 1                 if not floating_network["exit_on_floating_ip_error"]:
2501 1                     self.logger.error("Cannot create floating_ip. %s", str(e))
2502 1                     continue
2503
2504 1                 raise
2505
2506 1     def _update_port_security_for_vminstance(
2507         self,
2508         no_secured_ports: list,
2509         server: object,
2510     ) -> None:
2511         """Updates the port security according to no_secured_ports list.
2512
2513         Args:
2514             no_secured_ports    (list):     List of ports that security will be disabled
2515             server  (object):               Server Object
2516
2517         Raises:
2518             VimConnException
2519
2520         """
2521         # Wait until the VM is active and then disable the port-security
2522 1         if no_secured_ports:
2523 1             self.__wait_for_vm(server.id, "ACTIVE")
2524
2525 1         for port in no_secured_ports:
2526 1             port_update = {
2527                 "port": {"port_security_enabled": False, "security_groups": None}
2528             }
2529
2530 1             if port[1] == "allow-address-pairs":
2531 1                 port_update = {
2532                     "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2533                 }
2534
2535 1             try:
2536 1                 self.neutron.update_port(port[0], port_update)
2537
2538 1             except Exception:
2539 1                 raise vimconn.VimConnException(
2540                     "It was not possible to disable port security for port {}".format(
2541                         port[0]
2542                     )
2543                 )
2544
2545 1     def new_vminstance(
2546         self,
2547         name: str,
2548         description: str,
2549         start: bool,
2550         image_id: str,
2551         flavor_id: str,
2552         affinity_group_list: list,
2553         net_list: list,
2554         cloud_config=None,
2555         disk_list=None,
2556         availability_zone_index=None,
2557         availability_zone_list=None,
2558     ) -> tuple:
2559         """Adds a VM instance to VIM.
2560
2561         Args:
2562             name    (str):          name of VM
2563             description (str):      description
2564             start   (bool):         indicates if VM must start or boot in pause mode. Ignored
2565             image_id    (str)       image uuid
2566             flavor_id   (str)       flavor uuid
2567             affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
2568             net_list    (list):         list of interfaces, each one is a dictionary with:
2569                 name:   name of network
2570                 net_id:     network uuid to connect
2571                 vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
2572                 model:  interface model, ignored #TODO
2573                 mac_address:    used for  SR-IOV ifaces #TODO for other types
2574                 use:    'data', 'bridge',  'mgmt'
2575                 type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2576                 vim_id:     filled/added by this function
2577                 floating_ip:    True/False (or it can be None)
2578                 port_security:  True/False
2579             cloud_config    (dict): (optional) dictionary with:
2580                 key-pairs:      (optional) list of strings with the public key to be inserted to the default user
2581                 users:      (optional) list of users to be inserted, each item is a dict with:
2582                     name:   (mandatory) user name,
2583                     key-pairs: (optional) list of strings with the public key to be inserted to the user
2584                 user-data:  (optional) string is a text script to be passed directly to cloud-init
2585                 config-files:   (optional). List of files to be transferred. Each item is a dict with:
2586                     dest:   (mandatory) string with the destination absolute path
2587                     encoding:   (optional, by default text). Can be one of:
2588                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2589                     content :    (mandatory) string with the content of the file
2590                     permissions:    (optional) string with file permissions, typically octal notation '0644'
2591                     owner:  (optional) file owner, string with the format 'owner:group'
2592                 boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
2593             disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
2594                 image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2595                 size:   (mandatory) string with the size of the disk in GB
2596                 vim_id:  (optional) should use this existing volume id
2597             availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
2598             availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
2599                 availability_zone_index is None
2600                 #TODO ip, security groups
2601
2602         Returns:
2603             A tuple with the instance identifier and created_items or raises an exception on error
2604             created_items can be None or a dictionary where this method can include key-values that will be passed to
2605             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2606             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2607             as not present.
2608         """
2609 1         self.logger.debug(
2610             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2611             image_id,
2612             flavor_id,
2613             str(net_list),
2614         )
2615
2616 1         try:
2617 1             server = None
2618 1             created_items = {}
2619 1             net_list_vim = []
2620             # list of external networks to be connected to instance, later on used to create floating_ip
2621 1             external_network = []
2622             # List of ports with port-security disabled
2623 1             no_secured_ports = []
2624 1             block_device_mapping = {}
2625 1             existing_vim_volumes = []
2626 1             server_group_id = None
2627 1             scheduller_hints = {}
2628
2629             # Check the Openstack Connection
2630 1             self._reload_connection()
2631
2632             # Prepare network list
2633 1             self._prepare_network_for_vminstance(
2634                 name=name,
2635                 net_list=net_list,
2636                 created_items=created_items,
2637                 net_list_vim=net_list_vim,
2638                 external_network=external_network,
2639                 no_secured_ports=no_secured_ports,
2640             )
2641
2642             # Cloud config
2643 1             config_drive, userdata = self._create_user_data(cloud_config)
2644
2645             # Get availability Zone
2646 1             vm_av_zone = self._get_vm_availability_zone(
2647                 availability_zone_index, availability_zone_list
2648             )
2649
2650 1             if disk_list:
2651                 # Prepare disks
2652 1                 self._prepare_disk_for_vminstance(
2653                     name=name,
2654                     existing_vim_volumes=existing_vim_volumes,
2655                     created_items=created_items,
2656                     vm_av_zone=vm_av_zone,
2657                     block_device_mapping=block_device_mapping,
2658                     disk_list=disk_list,
2659                 )
2660
2661 1             if affinity_group_list:
2662                 # Only first id on the list will be used. Openstack restriction
2663 1                 server_group_id = affinity_group_list[0]["affinity_group_id"]
2664 1                 scheduller_hints["group"] = server_group_id
2665
2666 1             self.logger.debug(
2667                 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2668                 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2669                 "block_device_mapping={}, server_group={})".format(
2670                     name,
2671                     image_id,
2672                     flavor_id,
2673                     net_list_vim,
2674                     self.config.get("security_groups"),
2675                     vm_av_zone,
2676                     self.config.get("keypair"),
2677                     userdata,
2678                     config_drive,
2679                     block_device_mapping,
2680                     server_group_id,
2681                 )
2682             )
2683
2684             # Create VM
2685 1             server = self.nova.servers.create(
2686                 name=name,
2687                 image=image_id,
2688                 flavor=flavor_id,
2689                 nics=net_list_vim,
2690                 security_groups=self.config.get("security_groups"),
2691                 # TODO remove security_groups in future versions. Already at neutron port
2692                 availability_zone=vm_av_zone,
2693                 key_name=self.config.get("keypair"),
2694                 userdata=userdata,
2695                 config_drive=config_drive,
2696                 block_device_mapping=block_device_mapping,
2697                 scheduler_hints=scheduller_hints,
2698             )
2699
2700 1             vm_start_time = time.time()
2701
2702 1             self._update_port_security_for_vminstance(no_secured_ports, server)
2703
2704 1             self._prepare_external_network_for_vminstance(
2705                 external_network=external_network,
2706                 server=server,
2707                 created_items=created_items,
2708                 vm_start_time=vm_start_time,
2709             )
2710
2711 1             return server.id, created_items
2712
2713 1         except Exception as e:
2714 1             server_id = None
2715 1             if server:
2716 1                 server_id = server.id
2717
2718 1             try:
2719 1                 self.delete_vminstance(server_id, created_items)
2720
2721 0             except Exception as e2:
2722 0                 self.logger.error("new_vminstance rollback fail {}".format(e2))
2723
2724 1             self._format_exception(e)
2725
2726 1     def get_vminstance(self, vm_id):
2727         """Returns the VM instance information from VIM"""
2728         # self.logger.debug("Getting VM from VIM")
2729 0         try:
2730 0             self._reload_connection()
2731 0             server = self.nova.servers.find(id=vm_id)
2732             # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2733
2734 0             return server.to_dict()
2735 0         except (
2736             ksExceptions.ClientException,
2737             nvExceptions.ClientException,
2738             nvExceptions.NotFound,
2739             ConnectionError,
2740         ) as e:
2741 0             self._format_exception(e)
2742
2743 1     def get_vminstance_console(self, vm_id, console_type="vnc"):
2744         """
2745         Get a console for the virtual machine
2746         Params:
2747             vm_id: uuid of the VM
2748             console_type, can be:
2749                 "novnc" (by default), "xvpvnc" for VNC types,
2750                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2751         Returns dict with the console parameters:
2752                 protocol: ssh, ftp, http, https, ...
2753                 server:   usually ip address
2754                 port:     the http, ssh, ... port
2755                 suffix:   extra text, e.g. the http path and query string
2756         """
2757 0         self.logger.debug("Getting VM CONSOLE from VIM")
2758
2759 0         try:
2760 0             self._reload_connection()
2761 0             server = self.nova.servers.find(id=vm_id)
2762
2763 0             if console_type is None or console_type == "novnc":
2764 0                 console_dict = server.get_vnc_console("novnc")
2765 0             elif console_type == "xvpvnc":
2766 0                 console_dict = server.get_vnc_console(console_type)
2767 0             elif console_type == "rdp-html5":
2768 0                 console_dict = server.get_rdp_console(console_type)
2769 0             elif console_type == "spice-html5":
2770 0                 console_dict = server.get_spice_console(console_type)
2771             else:
2772 0                 raise vimconn.VimConnException(
2773                     "console type '{}' not allowed".format(console_type),
2774                     http_code=vimconn.HTTP_Bad_Request,
2775                 )
2776
2777 0             console_dict1 = console_dict.get("console")
2778
2779 0             if console_dict1:
2780 0                 console_url = console_dict1.get("url")
2781
2782 0                 if console_url:
2783                     # parse console_url
2784 0                     protocol_index = console_url.find("//")
2785 0                     suffix_index = (
2786                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2787                     )
2788 0                     port_index = (
2789                         console_url[protocol_index + 2 : suffix_index].find(":")
2790                         + protocol_index
2791                         + 2
2792                     )
2793
2794 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2795 0                         return (
2796                             -vimconn.HTTP_Internal_Server_Error,
2797                             "Unexpected response from VIM",
2798                         )
2799
2800 0                     console_dict = {
2801                         "protocol": console_url[0:protocol_index],
2802                         "server": console_url[protocol_index + 2 : port_index],
2803                         "port": console_url[port_index:suffix_index],
2804                         "suffix": console_url[suffix_index + 1 :],
2805                     }
2806 0                     protocol_index += 2
2807
2808 0                     return console_dict
2809 0             raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2810 0         except (
2811             nvExceptions.NotFound,
2812             ksExceptions.ClientException,
2813             nvExceptions.ClientException,
2814             nvExceptions.BadRequest,
2815             ConnectionError,
2816         ) as e:
2817 0             self._format_exception(e)
2818
2819 1     def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2820         """Neutron delete ports by id.
2821         Args:
2822             k_id    (str):      Port id in the VIM
2823         """
2824 1         try:
2825 1             self.neutron.delete_port(k_id)
2826
2827 1         except Exception as e:
2828 1             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2829
2830 1     def _delete_volumes_by_id_wth_cinder(
2831         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2832     ) -> bool:
2833         """Cinder delete volume by id.
2834         Args:
2835             k   (str):                      Full item name in created_items
2836             k_id    (str):                  ID of floating ip in VIM
2837             volumes_to_hold (list):          Volumes not to delete
2838             created_items   (dict):         All created items belongs to VM
2839         """
2840 1         try:
2841 1             if k_id in volumes_to_hold:
2842 1                 return
2843
2844 1             if self.cinder.volumes.get(k_id).status != "available":
2845 1                 return True
2846
2847             else:
2848 1                 self.cinder.volumes.delete(k_id)
2849 1                 created_items[k] = None
2850
2851 1         except Exception as e:
2852 1             self.logger.error(
2853                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2854             )
2855
2856 1     def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2857         """Neutron delete floating ip by id.
2858         Args:
2859             k   (str):                      Full item name in created_items
2860             k_id    (str):                  ID of floating ip in VIM
2861             created_items   (dict):         All created items belongs to VM
2862         """
2863 1         try:
2864 1             self.neutron.delete_floatingip(k_id)
2865 1             created_items[k] = None
2866
2867 1         except Exception as e:
2868 1             self.logger.error(
2869                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2870             )
2871
2872 1     @staticmethod
2873 1     def _get_item_name_id(k: str) -> Tuple[str, str]:
2874 1         k_item, _, k_id = k.partition(":")
2875 1         return k_item, k_id
2876
2877 1     def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2878         """Delete VM ports attached to the networks before deleting virtual machine.
2879         Args:
2880             created_items   (dict):     All created items belongs to VM
2881         """
2882
2883 1         for k, v in created_items.items():
2884 1             if not v:  # skip already deleted
2885 1                 continue
2886
2887 1             try:
2888 1                 k_item, k_id = self._get_item_name_id(k)
2889 1                 if k_item == "port":
2890 1                     self._delete_ports_by_id_wth_neutron(k_id)
2891
2892 1             except Exception as e:
2893 1                 self.logger.error(
2894                     "Error deleting port: {}: {}".format(type(e).__name__, e)
2895                 )
2896
2897 1     def _delete_created_items(
2898         self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2899     ) -> bool:
2900         """Delete Volumes and floating ip if they exist in created_items."""
2901 1         for k, v in created_items.items():
2902 1             if not v:  # skip already deleted
2903 1                 continue
2904
2905 1             try:
2906 1                 k_item, k_id = self._get_item_name_id(k)
2907
2908 1                 if k_item == "volume":
2909 1                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2910                         k, k_id, volumes_to_hold, created_items
2911                     )
2912
2913 1                     if unavailable_vol:
2914 1                         keep_waiting = True
2915
2916 1                 elif k_item == "floating_ip":
2917 1                     self._delete_floating_ip_by_id(k, k_id, created_items)
2918
2919 1             except Exception as e:
2920 1                 self.logger.error("Error deleting {}: {}".format(k, e))
2921
2922 1         return keep_waiting
2923
2924 1     def delete_vminstance(
2925         self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
2926     ) -> None:
2927         """Removes a VM instance from VIM. Returns the old identifier.
2928         Args:
2929             vm_id   (str):              Identifier of VM instance
2930             created_items   (dict):     All created items belongs to VM
2931             volumes_to_hold (list):     Volumes_to_hold
2932         """
2933 1         if created_items is None:
2934 1             created_items = {}
2935 1         if volumes_to_hold is None:
2936 1             volumes_to_hold = []
2937
2938 1         try:
2939 1             self._reload_connection()
2940
2941             # Delete VM ports attached to the networks before the virtual machine
2942 1             if created_items:
2943 1                 self._delete_vm_ports_attached_to_network(created_items)
2944
2945 1             if vm_id:
2946 1                 self.nova.servers.delete(vm_id)
2947
2948             # Although having detached, volumes should have in active status before deleting.
2949             # We ensure in this loop
2950 1             keep_waiting = True
2951 1             elapsed_time = 0
2952
2953 1             while keep_waiting and elapsed_time < volume_timeout:
2954 1                 keep_waiting = False
2955
2956                 # Delete volumes and floating IP.
2957 1                 keep_waiting = self._delete_created_items(
2958                     created_items, volumes_to_hold, keep_waiting
2959                 )
2960
2961 1                 if keep_waiting:
2962 1                     time.sleep(1)
2963 1                     elapsed_time += 1
2964
2965 1         except (
2966             nvExceptions.NotFound,
2967             ksExceptions.ClientException,
2968             nvExceptions.ClientException,
2969             ConnectionError,
2970         ) as e:
2971 0             self._format_exception(e)
2972
2973 1     def refresh_vms_status(self, vm_list):
2974         """Get the status of the virtual machines and their interfaces/ports
2975         Params: the list of VM identifiers
2976         Returns a dictionary with:
2977             vm_id:          #VIM id of this Virtual Machine
2978                 status:     #Mandatory. Text with one of:
2979                             #  DELETED (not found at vim)
2980                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2981                             #  OTHER (Vim reported other status not understood)
2982                             #  ERROR (VIM indicates an ERROR status)
2983                             #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2984                             #  CREATING (on building process), ERROR
2985                             #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2986                             #
2987                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
2988                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
2989                 interfaces:
2990                  -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
2991                     mac_address:      #Text format XX:XX:XX:XX:XX:XX
2992                     vim_net_id:       #network id where this interface is connected
2993                     vim_interface_id: #interface/port VIM id
2994                     ip_address:       #null, or text with IPv4, IPv6 address
2995                     compute_node:     #identification of compute node where PF,VF interface is allocated
2996                     pci:              #PCI address of the NIC that hosts the PF,VF
2997                     vlan:             #physical VLAN used for VF
2998         """
2999 0         vm_dict = {}
3000 0         self.logger.debug(
3001             "refresh_vms status: Getting tenant VM instance information from VIM"
3002         )
3003
3004 0         for vm_id in vm_list:
3005 0             vm = {}
3006
3007 0             try:
3008 0                 vm_vim = self.get_vminstance(vm_id)
3009
3010 0                 if vm_vim["status"] in vmStatus2manoFormat:
3011 0                     vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3012                 else:
3013 0                     vm["status"] = "OTHER"
3014 0                     vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3015
3016 0                 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3017 0                 vm_vim.pop("user_data", None)
3018 0                 vm["vim_info"] = self.serialize(vm_vim)
3019
3020 0                 vm["interfaces"] = []
3021 0                 if vm_vim.get("fault"):
3022 0                     vm["error_msg"] = str(vm_vim["fault"])
3023
3024                 # get interfaces
3025 0                 try:
3026 0                     self._reload_connection()
3027 0                     port_dict = self.neutron.list_ports(device_id=vm_id)
3028
3029 0                     for port in port_dict["ports"]:
3030 0                         interface = {}
3031 0                         interface["vim_info"] = self.serialize(port)
3032 0                         interface["mac_address"] = port.get("mac_address")
3033 0                         interface["vim_net_id"] = port["network_id"]
3034 0                         interface["vim_interface_id"] = port["id"]
3035                         # check if OS-EXT-SRV-ATTR:host is there,
3036                         # in case of non-admin credentials, it will be missing
3037
3038 0                         if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3039 0                             interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3040
3041 0                         interface["pci"] = None
3042
3043                         # check if binding:profile is there,
3044                         # in case of non-admin credentials, it will be missing
3045 0                         if port.get("binding:profile"):
3046 0                             if port["binding:profile"].get("pci_slot"):
3047                                 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3048                                 #  the slot to 0x00
3049                                 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3050                                 #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
3051 0                                 pci = port["binding:profile"]["pci_slot"]
3052                                 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3053 0                                 interface["pci"] = pci
3054
3055 0                         interface["vlan"] = None
3056
3057 0                         if port.get("binding:vif_details"):
3058 0                             interface["vlan"] = port["binding:vif_details"].get("vlan")
3059
3060                         # Get vlan from network in case not present in port for those old openstacks and cases where
3061                         # it is needed vlan at PT
3062 0                         if not interface["vlan"]:
3063                             # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3064 0                             network = self.neutron.show_network(port["network_id"])
3065
3066 0                             if (
3067                                 network["network"].get("provider:network_type")
3068                                 == "vlan"
3069                             ):
3070                                 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3071 0                                 interface["vlan"] = network["network"].get(
3072                                     "provider:segmentation_id"
3073                                 )
3074
3075 0                         ips = []
3076                         # look for floating ip address
3077 0                         try:
3078 0                             floating_ip_dict = self.neutron.list_floatingips(
3079                                 port_id=port["id"]
3080                             )
3081
3082 0                             if floating_ip_dict.get("floatingips"):
3083 0                                 ips.append(
3084                                     floating_ip_dict["floatingips"][0].get(
3085                                         "floating_ip_address"
3086                                     )
3087                                 )
3088 0                         except Exception:
3089 0                             pass
3090
3091 0                         for subnet in port["fixed_ips"]:
3092 0                             ips.append(subnet["ip_address"])
3093
3094 0                         interface["ip_address"] = ";".join(ips)
3095 0                         vm["interfaces"].append(interface)
3096 0                 except Exception as e:
3097 0                     self.logger.error(
3098                         "Error getting vm interface information {}: {}".format(
3099                             type(e).__name__, e
3100                         ),
3101                         exc_info=True,
3102                     )
3103 0             except vimconn.VimConnNotFoundException as e:
3104 0                 self.logger.error("Exception getting vm status: %s", str(e))
3105 0                 vm["status"] = "DELETED"
3106 0                 vm["error_msg"] = str(e)
3107 0             except vimconn.VimConnException as e:
3108 0                 self.logger.error("Exception getting vm status: %s", str(e))
3109 0                 vm["status"] = "VIM_ERROR"
3110 0                 vm["error_msg"] = str(e)
3111
3112 0             vm_dict[vm_id] = vm
3113
3114 0         return vm_dict
3115
3116 1     def action_vminstance(self, vm_id, action_dict, created_items={}):
3117         """Send and action over a VM instance from VIM
3118         Returns None or the console dict if the action was successfully sent to the VIM
3119         """
3120 0         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3121
3122 0         try:
3123 0             self._reload_connection()
3124 0             server = self.nova.servers.find(id=vm_id)
3125
3126 0             if "start" in action_dict:
3127 0                 if action_dict["start"] == "rebuild":
3128 0                     server.rebuild()
3129                 else:
3130 0                     if server.status == "PAUSED":
3131 0                         server.unpause()
3132 0                     elif server.status == "SUSPENDED":
3133 0                         server.resume()
3134 0                     elif server.status == "SHUTOFF":
3135 0                         server.start()
3136                     else:
3137 0                         self.logger.debug(
3138                             "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3139                         )
3140 0                         raise vimconn.VimConnException(
3141                             "Cannot 'start' instance while it is in active state",
3142                             http_code=vimconn.HTTP_Bad_Request,
3143                         )
3144
3145 0             elif "pause" in action_dict:
3146 0                 server.pause()
3147 0             elif "resume" in action_dict:
3148 0                 server.resume()
3149 0             elif "shutoff" in action_dict or "shutdown" in action_dict:
3150 0                 self.logger.debug("server status %s", server.status)
3151 0                 if server.status == "ACTIVE":
3152 0                     server.stop()
3153                 else:
3154 0                     self.logger.debug("ERROR: VM is not in Active state")
3155 0                     raise vimconn.VimConnException(
3156                         "VM is not in active state, stop operation is not allowed",
3157                         http_code=vimconn.HTTP_Bad_Request,
3158                     )
3159 0             elif "forceOff" in action_dict:
3160 0                 server.stop()  # TODO
3161 0             elif "terminate" in action_dict:
3162 0                 server.delete()
3163 0             elif "createImage" in action_dict:
3164 0                 server.create_image()
3165                 # "path":path_schema,
3166                 # "description":description_schema,
3167                 # "name":name_schema,
3168                 # "metadata":metadata_schema,
3169                 # "imageRef": id_schema,
3170                 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3171 0             elif "rebuild" in action_dict:
3172 0                 server.rebuild(server.image["id"])
3173 0             elif "reboot" in action_dict:
3174 0                 server.reboot()  # reboot_type="SOFT"
3175 0             elif "console" in action_dict:
3176 0                 console_type = action_dict["console"]
3177
3178 0                 if console_type is None or console_type == "novnc":
3179 0                     console_dict = server.get_vnc_console("novnc")
3180 0                 elif console_type == "xvpvnc":
3181 0                     console_dict = server.get_vnc_console(console_type)
3182 0                 elif console_type == "rdp-html5":
3183 0                     console_dict = server.get_rdp_console(console_type)
3184 0                 elif console_type == "spice-html5":
3185 0                     console_dict = server.get_spice_console(console_type)
3186                 else:
3187 0                     raise vimconn.VimConnException(
3188                         "console type '{}' not allowed".format(console_type),
3189                         http_code=vimconn.HTTP_Bad_Request,
3190                     )
3191
3192 0                 try:
3193 0                     console_url = console_dict["console"]["url"]
3194                     # parse console_url
3195 0                     protocol_index = console_url.find("//")
3196 0                     suffix_index = (
3197                         console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3198                     )
3199 0                     port_index = (
3200                         console_url[protocol_index + 2 : suffix_index].find(":")
3201                         + protocol_index
3202                         + 2
3203                     )
3204
3205 0                     if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3206 0                         raise vimconn.VimConnException(
3207                             "Unexpected response from VIM " + str(console_dict)
3208                         )
3209
3210 0                     console_dict2 = {
3211                         "protocol": console_url[0:protocol_index],
3212                         "server": console_url[protocol_index + 2 : port_index],
3213                         "port": int(console_url[port_index + 1 : suffix_index]),
3214                         "suffix": console_url[suffix_index + 1 :],
3215                     }
3216
3217 0                     return console_dict2
3218 0                 except Exception:
3219 0                     raise vimconn.VimConnException(
3220                         "Unexpected response from VIM " + str(console_dict)
3221                     )
3222
3223 0             return None
3224 0         except (
3225             ksExceptions.ClientException,
3226             nvExceptions.ClientException,
3227             nvExceptions.NotFound,
3228             ConnectionError,
3229         ) as e:
3230 0             self._format_exception(e)
3231         # TODO insert exception vimconn.HTTP_Unauthorized
3232
3233     # ###### VIO Specific Changes #########
3234 1     def _generate_vlanID(self):
3235         """
3236         Method to get unused vlanID
3237             Args:
3238                 None
3239             Returns:
3240                 vlanID
3241         """
3242         # Get used VLAN IDs
3243 0         usedVlanIDs = []
3244 0         networks = self.get_network_list()
3245
3246 0         for net in networks:
3247 0             if net.get("provider:segmentation_id"):
3248 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3249
3250 0         used_vlanIDs = set(usedVlanIDs)
3251
3252         # find unused VLAN ID
3253 0         for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3254 0             try:
3255 0                 start_vlanid, end_vlanid = map(
3256                     int, vlanID_range.replace(" ", "").split("-")
3257                 )
3258
3259 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3260 0                     if vlanID not in used_vlanIDs:
3261 0                         return vlanID
3262 0             except Exception as exp:
3263 0                 raise vimconn.VimConnException(
3264                     "Exception {} occurred while generating VLAN ID.".format(exp)
3265                 )
3266         else:
3267 0             raise vimconn.VimConnConflictException(
3268                 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3269                     self.config.get("dataplane_net_vlan_range")
3270                 )
3271             )
3272
3273 1     def _generate_multisegment_vlanID(self):
3274         """
3275         Method to get unused vlanID
3276         Args:
3277             None
3278         Returns:
3279             vlanID
3280         """
3281         # Get used VLAN IDs
3282 0         usedVlanIDs = []
3283 0         networks = self.get_network_list()
3284 0         for net in networks:
3285 0             if net.get("provider:network_type") == "vlan" and net.get(
3286                 "provider:segmentation_id"
3287             ):
3288 0                 usedVlanIDs.append(net.get("provider:segmentation_id"))
3289 0             elif net.get("segments"):
3290 0                 for segment in net.get("segments"):
3291 0                     if segment.get("provider:network_type") == "vlan" and segment.get(
3292                         "provider:segmentation_id"
3293                     ):
3294 0                         usedVlanIDs.append(segment.get("provider:segmentation_id"))
3295
3296 0         used_vlanIDs = set(usedVlanIDs)
3297
3298         # find unused VLAN ID
3299 0         for vlanID_range in self.config.get("multisegment_vlan_range"):
3300 0             try:
3301 0                 start_vlanid, end_vlanid = map(
3302                     int, vlanID_range.replace(" ", "").split("-")
3303                 )
3304
3305 0                 for vlanID in range(start_vlanid, end_vlanid + 1):
3306 0                     if vlanID not in used_vlanIDs:
3307 0                         return vlanID
3308 0             except Exception as exp:
3309 0                 raise vimconn.VimConnException(
3310                     "Exception {} occurred while generating VLAN ID.".format(exp)
3311                 )
3312         else:
3313 0             raise vimconn.VimConnConflictException(
3314                 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3315                     self.config.get("multisegment_vlan_range")
3316                 )
3317             )
3318
3319 1     def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3320         """
3321         Method to validate user given vlanID ranges
3322             Args:  None
3323             Returns: None
3324         """
3325 0         for vlanID_range in input_vlan_range:
3326 0             vlan_range = vlanID_range.replace(" ", "")
3327             # validate format
3328 0             vlanID_pattern = r"(\d)*-(\d)*$"
3329 0             match_obj = re.match(vlanID_pattern, vlan_range)
3330 0             if not match_obj:
3331 0                 raise vimconn.VimConnConflictException(
3332                     "Invalid VLAN range for {}: {}.You must provide "
3333                     "'{}' in format [start_ID - end_ID].".format(
3334                         text_vlan_range, vlanID_range, text_vlan_range
3335                     )
3336                 )
3337
3338 0             start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3339 0             if start_vlanid <= 0:
3340 0                 raise vimconn.VimConnConflictException(
3341                     "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3342                     "networks valid IDs are 1 to 4094 ".format(
3343                         text_vlan_range, vlanID_range
3344                     )
3345                 )
3346
3347 0             if end_vlanid > 4094:
3348 0                 raise vimconn.VimConnConflictException(
3349                     "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3350                     "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3351                         text_vlan_range, vlanID_range
3352                     )
3353                 )
3354
3355 0             if start_vlanid > end_vlanid:
3356 0                 raise vimconn.VimConnConflictException(
3357                     "Invalid VLAN range for {}: {}. You must provide '{}'"
3358                     " in format start_ID - end_ID and start_ID < end_ID ".format(
3359                         text_vlan_range, vlanID_range, text_vlan_range
3360                     )
3361                 )
3362
3363     # NOT USED FUNCTIONS
3364
3365 1     def new_external_port(self, port_data):
3366         """Adds a external port to VIM
3367         Returns the port identifier"""
3368         # TODO openstack if needed
3369 0         return (
3370             -vimconn.HTTP_Internal_Server_Error,
3371             "osconnector.new_external_port() not implemented",
3372         )
3373
3374 1     def connect_port_network(self, port_id, network_id, admin=False):
3375         """Connects a external port to a network
3376         Returns status code of the VIM response"""
3377         # TODO openstack if needed
3378 0         return (
3379             -vimconn.HTTP_Internal_Server_Error,
3380             "osconnector.connect_port_network() not implemented",
3381         )
3382
3383 1     def new_user(self, user_name, user_passwd, tenant_id=None):
3384         """Adds a new user to openstack VIM
3385         Returns the user identifier"""
3386 0         self.logger.debug("osconnector: Adding a new user to VIM")
3387
3388 0         try:
3389 0             self._reload_connection()
3390 0             user = self.keystone.users.create(
3391                 user_name, password=user_passwd, default_project=tenant_id
3392             )
3393             # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3394
3395 0             return user.id
3396 0         except ksExceptions.ConnectionError as e:
3397 0             error_value = -vimconn.HTTP_Bad_Request
3398 0             error_text = (
3399                 type(e).__name__
3400                 + ": "
3401                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3402             )
3403 0         except ksExceptions.ClientException as e:  # TODO remove
3404 0             error_value = -vimconn.HTTP_Bad_Request
3405 0             error_text = (
3406                 type(e).__name__
3407                 + ": "
3408                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3409             )
3410
3411         # TODO insert exception vimconn.HTTP_Unauthorized
3412         # if reaching here is because an exception
3413 0         self.logger.debug("new_user " + error_text)
3414
3415 0         return error_value, error_text
3416
3417 1     def delete_user(self, user_id):
3418         """Delete a user from openstack VIM
3419         Returns the user identifier"""
3420 0         if self.debug:
3421 0             print("osconnector: Deleting  a  user from VIM")
3422
3423 0         try:
3424 0             self._reload_connection()
3425 0             self.keystone.users.delete(user_id)
3426
3427 0             return 1, user_id
3428 0         except ksExceptions.ConnectionError as e:
3429 0             error_value = -vimconn.HTTP_Bad_Request
3430 0             error_text = (
3431                 type(e).__name__
3432                 + ": "
3433                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3434             )
3435 0         except ksExceptions.NotFound as e:
3436 0             error_value = -vimconn.HTTP_Not_Found
3437 0             error_text = (
3438                 type(e).__name__
3439                 + ": "
3440                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3441             )
3442 0         except ksExceptions.ClientException as e:  # TODO remove
3443 0             error_value = -vimconn.HTTP_Bad_Request
3444 0             error_text = (
3445                 type(e).__name__
3446                 + ": "
3447                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3448             )
3449
3450         # TODO insert exception vimconn.HTTP_Unauthorized
3451         # if reaching here is because an exception
3452 0         self.logger.debug("delete_tenant " + error_text)
3453
3454 0         return error_value, error_text
3455
3456 1     def get_hosts_info(self):
3457         """Get the information of deployed hosts
3458         Returns the hosts content"""
3459 0         if self.debug:
3460 0             print("osconnector: Getting Host info from VIM")
3461
3462 0         try:
3463 0             h_list = []
3464 0             self._reload_connection()
3465 0             hypervisors = self.nova.hypervisors.list()
3466
3467 0             for hype in hypervisors:
3468 0                 h_list.append(hype.to_dict())
3469
3470 0             return 1, {"hosts": h_list}
3471 0         except nvExceptions.NotFound as e:
3472 0             error_value = -vimconn.HTTP_Not_Found
3473 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3474 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3475 0             error_value = -vimconn.HTTP_Bad_Request
3476 0             error_text = (
3477                 type(e).__name__
3478                 + ": "
3479                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3480             )
3481
3482         # TODO insert exception vimconn.HTTP_Unauthorized
3483         # if reaching here is because an exception
3484 0         self.logger.debug("get_hosts_info " + error_text)
3485
3486 0         return error_value, error_text
3487
3488 1     def get_hosts(self, vim_tenant):
3489         """Get the hosts and deployed instances
3490         Returns the hosts content"""
3491 0         r, hype_dict = self.get_hosts_info()
3492
3493 0         if r < 0:
3494 0             return r, hype_dict
3495
3496 0         hypervisors = hype_dict["hosts"]
3497
3498 0         try:
3499 0             servers = self.nova.servers.list()
3500 0             for hype in hypervisors:
3501 0                 for server in servers:
3502 0                     if (
3503                         server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3504                         == hype["hypervisor_hostname"]
3505                     ):
3506 0                         if "vm" in hype:
3507 0                             hype["vm"].append(server.id)
3508                         else:
3509 0                             hype["vm"] = [server.id]
3510
3511 0             return 1, hype_dict
3512 0         except nvExceptions.NotFound as e:
3513 0             error_value = -vimconn.HTTP_Not_Found
3514 0             error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3515 0         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3516 0             error_value = -vimconn.HTTP_Bad_Request
3517 0             error_text = (
3518                 type(e).__name__
3519                 + ": "
3520                 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3521             )
3522
3523         # TODO insert exception vimconn.HTTP_Unauthorized
3524         # if reaching here is because an exception
3525 0         self.logger.debug("get_hosts " + error_text)
3526
3527 0         return error_value, error_text
3528
3529 1     def new_classification(self, name, ctype, definition):
3530 1         self.logger.debug(
3531             "Adding a new (Traffic) Classification to VIM, named %s", name
3532         )
3533
3534 1         try:
3535 1             new_class = None
3536 1             self._reload_connection()
3537
3538 1             if ctype not in supportedClassificationTypes:
3539 1                 raise vimconn.VimConnNotSupportedException(
3540                     "OpenStack VIM connector does not support provided "
3541                     "Classification Type {}, supported ones are: {}".format(
3542                         ctype, supportedClassificationTypes
3543                     )
3544                 )
3545
3546 1             if not self._validate_classification(ctype, definition):
3547 0                 raise vimconn.VimConnException(
3548                     "Incorrect Classification definition for the type specified."
3549                 )
3550
3551 1             classification_dict = definition
3552 1             classification_dict["name"] = name
3553 1             new_class = self.neutron.create_sfc_flow_classifier(
3554                 {"flow_classifier": classification_dict}
3555             )
3556
3557 1             return new_class["flow_classifier"]["id"]
3558 1         except (
3559             neExceptions.ConnectionFailed,
3560             ksExceptions.ClientException,
3561             neExceptions.NeutronException,
3562             ConnectionError,
3563         ) as e:
3564 0             self.logger.error("Creation of Classification failed.")
3565 0             self._format_exception(e)
3566
3567 1     def get_classification(self, class_id):
3568 1         self.logger.debug(" Getting Classification %s from VIM", class_id)
3569 1         filter_dict = {"id": class_id}
3570 1         class_list = self.get_classification_list(filter_dict)
3571
3572 1         if len(class_list) == 0:
3573 1             raise vimconn.VimConnNotFoundException(
3574                 "Classification '{}' not found".format(class_id)
3575             )
3576 1         elif len(class_list) > 1:
3577 1             raise vimconn.VimConnConflictException(
3578                 "Found more than one Classification with this criteria"
3579             )
3580
3581 1         classification = class_list[0]
3582
3583 1         return classification
3584
3585 1     def get_classification_list(self, filter_dict={}):
3586 1         self.logger.debug(
3587             "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3588         )
3589
3590 1         try:
3591 1             filter_dict_os = filter_dict.copy()
3592 1             self._reload_connection()
3593
3594 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3595 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3596
3597 1             classification_dict = self.neutron.list_sfc_flow_classifiers(
3598                 **filter_dict_os
3599             )
3600 1             classification_list = classification_dict["flow_classifiers"]
3601 1             self.__classification_os2mano(classification_list)
3602
3603 1             return classification_list
3604 0         except (
3605             neExceptions.ConnectionFailed,
3606             ksExceptions.ClientException,
3607             neExceptions.NeutronException,
3608             ConnectionError,
3609         ) as e:
3610 0             self._format_exception(e)
3611
3612 1     def delete_classification(self, class_id):
3613 1         self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3614
3615 1         try:
3616 1             self._reload_connection()
3617 1             self.neutron.delete_sfc_flow_classifier(class_id)
3618
3619 1             return class_id
3620 0         except (
3621             neExceptions.ConnectionFailed,
3622             neExceptions.NeutronException,
3623             ksExceptions.ClientException,
3624             neExceptions.NeutronException,
3625             ConnectionError,
3626         ) as e:
3627 0             self._format_exception(e)
3628
3629 1     def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3630 1         self.logger.debug(
3631             "Adding a new Service Function Instance to VIM, named '%s'", name
3632         )
3633
3634 1         try:
3635 1             new_sfi = None
3636 1             self._reload_connection()
3637 1             correlation = None
3638
3639 1             if sfc_encap:
3640 1                 correlation = "nsh"
3641
3642 1             if len(ingress_ports) != 1:
3643 1                 raise vimconn.VimConnNotSupportedException(
3644                     "OpenStack VIM connector can only have 1 ingress port per SFI"
3645                 )
3646
3647 1             if len(egress_ports) != 1:
3648 1                 raise vimconn.VimConnNotSupportedException(
3649                     "OpenStack VIM connector can only have 1 egress port per SFI"
3650                 )
3651
3652 1             sfi_dict = {
3653                 "name": name,
3654                 "ingress": ingress_ports[0],
3655                 "egress": egress_ports[0],
3656                 "service_function_parameters": {"correlation": correlation},
3657             }
3658 1             new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3659
3660 1             return new_sfi["port_pair"]["id"]
3661 1         except (
3662             neExceptions.ConnectionFailed,
3663             ksExceptions.ClientException,
3664             neExceptions.NeutronException,
3665             ConnectionError,
3666         ) as e:
3667 0             if new_sfi:
3668 0                 try:
3669 0                     self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3670 0                 except Exception:
3671 0                     self.logger.error(
3672                         "Creation of Service Function Instance failed, with "
3673                         "subsequent deletion failure as well."
3674                     )
3675
3676 0             self._format_exception(e)
3677
3678 1     def get_sfi(self, sfi_id):
3679 1         self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3680 1         filter_dict = {"id": sfi_id}
3681 1         sfi_list = self.get_sfi_list(filter_dict)
3682
3683 1         if len(sfi_list) == 0:
3684 1             raise vimconn.VimConnNotFoundException(
3685                 "Service Function Instance '{}' not found".format(sfi_id)
3686             )
3687 1         elif len(sfi_list) > 1:
3688 1             raise vimconn.VimConnConflictException(
3689                 "Found more than one Service Function Instance with this criteria"
3690             )
3691
3692 1         sfi = sfi_list[0]
3693
3694 1         return sfi
3695
3696 1     def get_sfi_list(self, filter_dict={}):
3697 1         self.logger.debug(
3698             "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3699         )
3700
3701 1         try:
3702 1             self._reload_connection()
3703 1             filter_dict_os = filter_dict.copy()
3704
3705 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3706 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3707
3708 1             sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3709 1             sfi_list = sfi_dict["port_pairs"]
3710 1             self.__sfi_os2mano(sfi_list)
3711
3712 1             return sfi_list
3713 0         except (
3714             neExceptions.ConnectionFailed,
3715             ksExceptions.ClientException,
3716             neExceptions.NeutronException,
3717             ConnectionError,
3718         ) as e:
3719 0             self._format_exception(e)
3720
3721 1     def delete_sfi(self, sfi_id):
3722 1         self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3723
3724 1         try:
3725 1             self._reload_connection()
3726 1             self.neutron.delete_sfc_port_pair(sfi_id)
3727
3728 1             return sfi_id
3729 0         except (
3730             neExceptions.ConnectionFailed,
3731             neExceptions.NeutronException,
3732             ksExceptions.ClientException,
3733             neExceptions.NeutronException,
3734             ConnectionError,
3735         ) as e:
3736 0             self._format_exception(e)
3737
3738 1     def new_sf(self, name, sfis, sfc_encap=True):
3739 1         self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3740
3741 1         try:
3742 1             new_sf = None
3743 1             self._reload_connection()
3744             # correlation = None
3745             # if sfc_encap:
3746             #     correlation = "nsh"
3747
3748 1             for instance in sfis:
3749 1                 sfi = self.get_sfi(instance)
3750
3751 1                 if sfi.get("sfc_encap") != sfc_encap:
3752 1                     raise vimconn.VimConnNotSupportedException(
3753                         "OpenStack VIM connector requires all SFIs of the "
3754                         "same SF to share the same SFC Encapsulation"
3755                     )
3756
3757 1             sf_dict = {"name": name, "port_pairs": sfis}
3758 1             new_sf = self.neutron.create_sfc_port_pair_group(
3759                 {"port_pair_group": sf_dict}
3760             )
3761
3762 1             return new_sf["port_pair_group"]["id"]
3763 1         except (
3764             neExceptions.ConnectionFailed,
3765             ksExceptions.ClientException,
3766             neExceptions.NeutronException,
3767             ConnectionError,
3768         ) as e:
3769 0             if new_sf:
3770 0                 try:
3771 0                     self.neutron.delete_sfc_port_pair_group(
3772                         new_sf["port_pair_group"]["id"]
3773                     )
3774 0                 except Exception:
3775 0                     self.logger.error(
3776                         "Creation of Service Function failed, with "
3777                         "subsequent deletion failure as well."
3778                     )
3779
3780 0             self._format_exception(e)
3781
3782 1     def get_sf(self, sf_id):
3783 1         self.logger.debug("Getting Service Function %s from VIM", sf_id)
3784 1         filter_dict = {"id": sf_id}
3785 1         sf_list = self.get_sf_list(filter_dict)
3786
3787 1         if len(sf_list) == 0:
3788 1             raise vimconn.VimConnNotFoundException(
3789                 "Service Function '{}' not found".format(sf_id)
3790             )
3791 1         elif len(sf_list) > 1:
3792 1             raise vimconn.VimConnConflictException(
3793                 "Found more than one Service Function with this criteria"
3794             )
3795
3796 1         sf = sf_list[0]
3797
3798 1         return sf
3799
3800 1     def get_sf_list(self, filter_dict={}):
3801 1         self.logger.debug(
3802             "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3803         )
3804
3805 1         try:
3806 1             self._reload_connection()
3807 1             filter_dict_os = filter_dict.copy()
3808
3809 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3810 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3811
3812 1             sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3813 1             sf_list = sf_dict["port_pair_groups"]
3814 1             self.__sf_os2mano(sf_list)
3815
3816 1             return sf_list
3817 0         except (
3818             neExceptions.ConnectionFailed,
3819             ksExceptions.ClientException,
3820             neExceptions.NeutronException,
3821             ConnectionError,
3822         ) as e:
3823 0             self._format_exception(e)
3824
3825 1     def delete_sf(self, sf_id):
3826 1         self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3827
3828 1         try:
3829 1             self._reload_connection()
3830 1             self.neutron.delete_sfc_port_pair_group(sf_id)
3831
3832 1             return sf_id
3833 0         except (
3834             neExceptions.ConnectionFailed,
3835             neExceptions.NeutronException,
3836             ksExceptions.ClientException,
3837             neExceptions.NeutronException,
3838             ConnectionError,
3839         ) as e:
3840 0             self._format_exception(e)
3841
3842 1     def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3843 1         self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3844
3845 1         try:
3846 1             new_sfp = None
3847 1             self._reload_connection()
3848             # In networking-sfc the MPLS encapsulation is legacy
3849             # should be used when no full SFC Encapsulation is intended
3850 1             correlation = "mpls"
3851
3852 1             if sfc_encap:
3853 1                 correlation = "nsh"
3854
3855 1             sfp_dict = {
3856                 "name": name,
3857                 "flow_classifiers": classifications,
3858                 "port_pair_groups": sfs,
3859                 "chain_parameters": {"correlation": correlation},
3860             }
3861
3862 1             if spi:
3863 1                 sfp_dict["chain_id"] = spi
3864
3865 1             new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3866
3867 1             return new_sfp["port_chain"]["id"]
3868 0         except (
3869             neExceptions.ConnectionFailed,
3870             ksExceptions.ClientException,
3871             neExceptions.NeutronException,
3872             ConnectionError,
3873         ) as e:
3874 0             if new_sfp:
3875 0                 try:
3876 0                     self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3877 0                 except Exception:
3878 0                     self.logger.error(
3879                         "Creation of Service Function Path failed, with "
3880                         "subsequent deletion failure as well."
3881                     )
3882
3883 0             self._format_exception(e)
3884
3885 1     def get_sfp(self, sfp_id):
3886 1         self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3887
3888 1         filter_dict = {"id": sfp_id}
3889 1         sfp_list = self.get_sfp_list(filter_dict)
3890
3891 1         if len(sfp_list) == 0:
3892 1             raise vimconn.VimConnNotFoundException(
3893                 "Service Function Path '{}' not found".format(sfp_id)
3894             )
3895 1         elif len(sfp_list) > 1:
3896 1             raise vimconn.VimConnConflictException(
3897                 "Found more than one Service Function Path with this criteria"
3898             )
3899
3900 1         sfp = sfp_list[0]
3901
3902 1         return sfp
3903
3904 1     def get_sfp_list(self, filter_dict={}):
3905 1         self.logger.debug(
3906             "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3907         )
3908
3909 1         try:
3910 1             self._reload_connection()
3911 1             filter_dict_os = filter_dict.copy()
3912
3913 1             if self.api_version3 and "tenant_id" in filter_dict_os:
3914 0                 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3915
3916 1             sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3917 1             sfp_list = sfp_dict["port_chains"]
3918 1             self.__sfp_os2mano(sfp_list)
3919
3920 1             return sfp_list
3921 0         except (
3922             neExceptions.ConnectionFailed,
3923             ksExceptions.ClientException,
3924             neExceptions.NeutronException,
3925             ConnectionError,
3926         ) as e:
3927 0             self._format_exception(e)
3928
3929 1     def delete_sfp(self, sfp_id):
3930 1         self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3931
3932 1         try:
3933 1             self._reload_connection()
3934 1             self.neutron.delete_sfc_port_chain(sfp_id)
3935
3936 1             return sfp_id
3937 0         except (
3938             neExceptions.ConnectionFailed,
3939             neExceptions.NeutronException,
3940             ksExceptions.ClientException,
3941             neExceptions.NeutronException,
3942             ConnectionError,
3943         ) as e:
3944 0             self._format_exception(e)
3945
3946 1     def refresh_sfps_status(self, sfp_list):
3947         """Get the status of the service function path
3948         Params: the list of sfp identifiers
3949         Returns a dictionary with:
3950             vm_id:          #VIM id of this service function path
3951                 status:     #Mandatory. Text with one of:
3952                             #  DELETED (not found at vim)
3953                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3954                             #  OTHER (Vim reported other status not understood)
3955                             #  ERROR (VIM indicates an ERROR status)
3956                             #  ACTIVE,
3957                             #  CREATING (on building process)
3958                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
3959                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
3960         """
3961 0         sfp_dict = {}
3962 0         self.logger.debug(
3963             "refresh_sfps status: Getting tenant SFP information from VIM"
3964         )
3965
3966 0         for sfp_id in sfp_list:
3967 0             sfp = {}
3968
3969 0             try:
3970 0                 sfp_vim = self.get_sfp(sfp_id)
3971
3972 0                 if sfp_vim["spi"]:
3973 0                     sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3974                 else:
3975 0                     sfp["status"] = "OTHER"
3976 0                     sfp["error_msg"] = "VIM status reported " + sfp["status"]
3977
3978 0                 sfp["vim_info"] = self.serialize(sfp_vim)
3979
3980 0                 if sfp_vim.get("fault"):
3981 0                     sfp["error_msg"] = str(sfp_vim["fault"])
3982 0             except vimconn.VimConnNotFoundException as e:
3983 0                 self.logger.error("Exception getting sfp status: %s", str(e))
3984 0                 sfp["status"] = "DELETED"
3985 0                 sfp["error_msg"] = str(e)
3986 0             except vimconn.VimConnException as e:
3987 0                 self.logger.error("Exception getting sfp status: %s", str(e))
3988 0                 sfp["status"] = "VIM_ERROR"
3989 0                 sfp["error_msg"] = str(e)
3990
3991 0             sfp_dict[sfp_id] = sfp
3992
3993 0         return sfp_dict
3994
3995 1     def refresh_sfis_status(self, sfi_list):
3996         """Get the status of the service function instances
3997         Params: the list of sfi identifiers
3998         Returns a dictionary with:
3999             vm_id:          #VIM id of this service function instance
4000                 status:     #Mandatory. Text with one of:
4001                             #  DELETED (not found at vim)
4002                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4003                             #  OTHER (Vim reported other status not understood)
4004                             #  ERROR (VIM indicates an ERROR status)
4005                             #  ACTIVE,
4006                             #  CREATING (on building process)
4007                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4008                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4009         """
4010 0         sfi_dict = {}
4011 0         self.logger.debug(
4012             "refresh_sfis status: Getting tenant sfi information from VIM"
4013         )
4014
4015 0         for sfi_id in sfi_list:
4016 0             sfi = {}
4017
4018 0             try:
4019 0                 sfi_vim = self.get_sfi(sfi_id)
4020
4021 0                 if sfi_vim:
4022 0                     sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4023                 else:
4024 0                     sfi["status"] = "OTHER"
4025 0                     sfi["error_msg"] = "VIM status reported " + sfi["status"]
4026
4027 0                 sfi["vim_info"] = self.serialize(sfi_vim)
4028
4029 0                 if sfi_vim.get("fault"):
4030 0                     sfi["error_msg"] = str(sfi_vim["fault"])
4031 0             except vimconn.VimConnNotFoundException as e:
4032 0                 self.logger.error("Exception getting sfi status: %s", str(e))
4033 0                 sfi["status"] = "DELETED"
4034 0                 sfi["error_msg"] = str(e)
4035 0             except vimconn.VimConnException as e:
4036 0                 self.logger.error("Exception getting sfi status: %s", str(e))
4037 0                 sfi["status"] = "VIM_ERROR"
4038 0                 sfi["error_msg"] = str(e)
4039
4040 0             sfi_dict[sfi_id] = sfi
4041
4042 0         return sfi_dict
4043
4044 1     def refresh_sfs_status(self, sf_list):
4045         """Get the status of the service functions
4046         Params: the list of sf identifiers
4047         Returns a dictionary with:
4048             vm_id:          #VIM id of this service function
4049                 status:     #Mandatory. Text with one of:
4050                             #  DELETED (not found at vim)
4051                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4052                             #  OTHER (Vim reported other status not understood)
4053                             #  ERROR (VIM indicates an ERROR status)
4054                             #  ACTIVE,
4055                             #  CREATING (on building process)
4056                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4057                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4058         """
4059 0         sf_dict = {}
4060 0         self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4061
4062 0         for sf_id in sf_list:
4063 0             sf = {}
4064
4065 0             try:
4066 0                 sf_vim = self.get_sf(sf_id)
4067
4068 0                 if sf_vim:
4069 0                     sf["status"] = vmStatus2manoFormat["ACTIVE"]
4070                 else:
4071 0                     sf["status"] = "OTHER"
4072 0                     sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4073
4074 0                 sf["vim_info"] = self.serialize(sf_vim)
4075
4076 0                 if sf_vim.get("fault"):
4077 0                     sf["error_msg"] = str(sf_vim["fault"])
4078 0             except vimconn.VimConnNotFoundException as e:
4079 0                 self.logger.error("Exception getting sf status: %s", str(e))
4080 0                 sf["status"] = "DELETED"
4081 0                 sf["error_msg"] = str(e)
4082 0             except vimconn.VimConnException as e:
4083 0                 self.logger.error("Exception getting sf status: %s", str(e))
4084 0                 sf["status"] = "VIM_ERROR"
4085 0                 sf["error_msg"] = str(e)
4086
4087 0             sf_dict[sf_id] = sf
4088
4089 0         return sf_dict
4090
4091 1     def refresh_classifications_status(self, classification_list):
4092         """Get the status of the classifications
4093         Params: the list of classification identifiers
4094         Returns a dictionary with:
4095             vm_id:          #VIM id of this classifier
4096                 status:     #Mandatory. Text with one of:
4097                             #  DELETED (not found at vim)
4098                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4099                             #  OTHER (Vim reported other status not understood)
4100                             #  ERROR (VIM indicates an ERROR status)
4101                             #  ACTIVE,
4102                             #  CREATING (on building process)
4103                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
4104                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
4105         """
4106 0         classification_dict = {}
4107 0         self.logger.debug(
4108             "refresh_classifications status: Getting tenant classification information from VIM"
4109         )
4110
4111 0         for classification_id in classification_list:
4112 0             classification = {}
4113
4114 0             try:
4115 0                 classification_vim = self.get_classification(classification_id)
4116
4117 0                 if classification_vim:
4118 0                     classification["status"] = vmStatus2manoFormat["ACTIVE"]
4119                 else:
4120 0                     classification["status"] = "OTHER"
4121 0                     classification["error_msg"] = (
4122                         "VIM status reported " + classification["status"]
4123                     )
4124
4125 0                 classification["vim_info"] = self.serialize(classification_vim)
4126
4127 0                 if classification_vim.get("fault"):
4128 0                     classification["error_msg"] = str(classification_vim["fault"])
4129 0             except vimconn.VimConnNotFoundException as e:
4130 0                 self.logger.error("Exception getting classification status: %s", str(e))
4131 0                 classification["status"] = "DELETED"
4132 0                 classification["error_msg"] = str(e)
4133 0             except vimconn.VimConnException as e:
4134 0                 self.logger.error("Exception getting classification status: %s", str(e))
4135 0                 classification["status"] = "VIM_ERROR"
4136 0                 classification["error_msg"] = str(e)
4137
4138 0             classification_dict[classification_id] = classification
4139
4140 0         return classification_dict
4141
4142 1     def new_affinity_group(self, affinity_group_data):
4143         """Adds a server group to VIM
4144             affinity_group_data contains a dictionary with information, keys:
4145                 name: name in VIM for the server group
4146                 type: affinity or anti-affinity
4147                 scope: Only nfvi-node allowed
4148         Returns the server group identifier"""
4149 0         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4150
4151 0         try:
4152 0             name = affinity_group_data["name"]
4153 0             policy = affinity_group_data["type"]
4154
4155 0             self._reload_connection()
4156 0             new_server_group = self.nova.server_groups.create(name, policy)
4157
4158 0             return new_server_group.id
4159 0         except (
4160             ksExceptions.ClientException,
4161             nvExceptions.ClientException,
4162             ConnectionError,
4163             KeyError,
4164         ) as e:
4165 0             self._format_exception(e)
4166
4167 1     def get_affinity_group(self, affinity_group_id):
4168         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4169 0         self.logger.debug("Getting flavor '%s'", affinity_group_id)
4170 0         try:
4171 0             self._reload_connection()
4172 0             server_group = self.nova.server_groups.find(id=affinity_group_id)
4173
4174 0             return server_group.to_dict()
4175 0         except (
4176             nvExceptions.NotFound,
4177             nvExceptions.ClientException,
4178             ksExceptions.ClientException,
4179             ConnectionError,
4180         ) as e:
4181 0             self._format_exception(e)
4182
4183 1     def delete_affinity_group(self, affinity_group_id):
4184         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4185 0         self.logger.debug("Getting server group '%s'", affinity_group_id)
4186 0         try:
4187 0             self._reload_connection()
4188 0             self.nova.server_groups.delete(affinity_group_id)
4189
4190 0             return affinity_group_id
4191 0         except (
4192             nvExceptions.NotFound,
4193             ksExceptions.ClientException,
4194             nvExceptions.ClientException,
4195             ConnectionError,
4196         ) as e:
4197 0             self._format_exception(e)
4198
4199 1     def get_vdu_state(self, vm_id):
4200         """
4201         Getting the state of a vdu
4202         param:
4203             vm_id: ID of an instance
4204         """
4205 0         self.logger.debug("Getting the status of VM")
4206 0         self.logger.debug("VIM VM ID %s", vm_id)
4207 0         self._reload_connection()
4208 0         server = self.nova.servers.find(id=vm_id)
4209 0         server_dict = server.to_dict()
4210 0         vdu_data = [
4211             server_dict["status"],
4212             server_dict["flavor"]["id"],
4213             server_dict["OS-EXT-SRV-ATTR:host"],
4214             server_dict["OS-EXT-AZ:availability_zone"],
4215         ]
4216 0         self.logger.debug("vdu_data %s", vdu_data)
4217 0         return vdu_data
4218
4219 1     def check_compute_availability(self, host, server_flavor_details):
4220 0         self._reload_connection()
4221 0         hypervisor_search = self.nova.hypervisors.search(
4222             hypervisor_match=host, servers=True
4223         )
4224 0         for hypervisor in hypervisor_search:
4225 0             hypervisor_id = hypervisor.to_dict()["id"]
4226 0             hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4227 0             hypervisor_dict = hypervisor_details.to_dict()
4228 0             hypervisor_temp = json.dumps(hypervisor_dict)
4229 0             hypervisor_json = json.loads(hypervisor_temp)
4230 0             resources_available = [
4231                 hypervisor_json["free_ram_mb"],
4232                 hypervisor_json["disk_available_least"],
4233                 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4234             ]
4235 0             compute_available = all(
4236                 x > y for x, y in zip(resources_available, server_flavor_details)
4237             )
4238 0             if compute_available:
4239 0                 return host
4240
4241 1     def check_availability_zone(
4242         self, old_az, server_flavor_details, old_host, host=None
4243     ):
4244 0         self._reload_connection()
4245 0         az_check = {"zone_check": False, "compute_availability": None}
4246 0         aggregates_list = self.nova.aggregates.list()
4247 0         for aggregate in aggregates_list:
4248 0             aggregate_details = aggregate.to_dict()
4249 0             aggregate_temp = json.dumps(aggregate_details)
4250 0             aggregate_json = json.loads(aggregate_temp)
4251 0             if aggregate_json["availability_zone"] == old_az:
4252 0                 hosts_list = aggregate_json["hosts"]
4253 0                 if host is not None:
4254 0                     if host in hosts_list:
4255 0                         az_check["zone_check"] = True
4256 0                         available_compute_id = self.check_compute_availability(
4257                             host, server_flavor_details
4258                         )
4259 0                         if available_compute_id is not None:
4260 0                             az_check["compute_availability"] = available_compute_id
4261                 else:
4262 0                     for check_host in hosts_list:
4263 0                         if check_host != old_host:
4264 0                             available_compute_id = self.check_compute_availability(
4265                                 check_host, server_flavor_details
4266                             )
4267 0                             if available_compute_id is not None:
4268 0                                 az_check["zone_check"] = True
4269 0                                 az_check["compute_availability"] = available_compute_id
4270 0                                 break
4271                     else:
4272 0                         az_check["zone_check"] = True
4273 0         return az_check
4274
4275 1     def migrate_instance(self, vm_id, compute_host=None):
4276         """
4277         Migrate a vdu
4278         param:
4279             vm_id: ID of an instance
4280             compute_host: Host to migrate the vdu to
4281         """
4282 0         self._reload_connection()
4283 0         vm_state = False
4284 0         instance_state = self.get_vdu_state(vm_id)
4285 0         server_flavor_id = instance_state[1]
4286 0         server_hypervisor_name = instance_state[2]
4287 0         server_availability_zone = instance_state[3]
4288 0         try:
4289 0             server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4290 0             server_flavor_details = [
4291                 server_flavor["ram"],
4292                 server_flavor["disk"],
4293                 server_flavor["vcpus"],
4294             ]
4295 0             if compute_host == server_hypervisor_name:
4296 0                 raise vimconn.VimConnException(
4297                     "Unable to migrate instance '{}' to the same host '{}'".format(
4298                         vm_id, compute_host
4299                     ),
4300                     http_code=vimconn.HTTP_Bad_Request,
4301                 )
4302 0             az_status = self.check_availability_zone(
4303                 server_availability_zone,
4304                 server_flavor_details,
4305                 server_hypervisor_name,
4306                 compute_host,
4307             )
4308 0             availability_zone_check = az_status["zone_check"]
4309 0             available_compute_id = az_status.get("compute_availability")
4310
4311 0             if availability_zone_check is False:
4312 0                 raise vimconn.VimConnException(
4313                     "Unable to migrate instance '{}' to a different availability zone".format(
4314                         vm_id
4315                     ),
4316                     http_code=vimconn.HTTP_Bad_Request,
4317                 )
4318 0             if available_compute_id is not None:
4319 0                 self.nova.servers.live_migrate(
4320                     server=vm_id,
4321                     host=available_compute_id,
4322                     block_migration=True,
4323                     disk_over_commit=False,
4324                 )
4325 0                 state = "MIGRATING"
4326 0                 changed_compute_host = ""
4327 0                 if state == "MIGRATING":
4328 0                     vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4329 0                     changed_compute_host = self.get_vdu_state(vm_id)[2]
4330 0                 if vm_state and changed_compute_host == available_compute_id:
4331 0                     self.logger.debug(
4332                         "Instance '{}' migrated to the new compute host '{}'".format(
4333                             vm_id, changed_compute_host
4334                         )
4335                     )
4336 0                     return state, available_compute_id
4337                 else:
4338 0                     raise vimconn.VimConnException(
4339                         "Migration Failed. Instance '{}' not moved to the new host {}".format(
4340                             vm_id, available_compute_id
4341                         ),
4342                         http_code=vimconn.HTTP_Bad_Request,
4343                     )
4344             else:
4345 0                 raise vimconn.VimConnException(
4346                     "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4347                         available_compute_id
4348                     ),
4349                     http_code=vimconn.HTTP_Bad_Request,
4350                 )
4351 0         except (
4352             nvExceptions.BadRequest,
4353             nvExceptions.ClientException,
4354             nvExceptions.NotFound,
4355         ) as e:
4356 0             self._format_exception(e)
4357
4358 1     def resize_instance(self, vm_id, new_flavor_id):
4359         """
4360         For resizing the vm based on the given
4361         flavor details
4362         param:
4363             vm_id : ID of an instance
4364             new_flavor_id : Flavor id to be resized
4365         Return the status of a resized instance
4366         """
4367 0         self._reload_connection()
4368 0         self.logger.debug("resize the flavor of an instance")
4369 0         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4370 0         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4371 0         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4372 0         try:
4373 0             if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4374 0                 if old_flavor_disk > new_flavor_disk:
4375 0                     raise nvExceptions.BadRequest(
4376                         400,
4377                         message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4378                     )
4379                 else:
4380 0                     self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4381 0                     vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4382 0                     if vm_state:
4383 0                         instance_resized_status = self.confirm_resize(vm_id)
4384 0                         return instance_resized_status
4385                     else:
4386 0                         raise nvExceptions.BadRequest(
4387                             409,
4388                             message="Cannot 'resize' vm_state is in ERROR",
4389                         )
4390
4391             else:
4392 0                 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4393 0                 raise nvExceptions.BadRequest(
4394                     409,
4395                     message="Cannot 'resize' instance while it is in vm_state resized",
4396                 )
4397 0         except (
4398             nvExceptions.BadRequest,
4399             nvExceptions.ClientException,
4400             nvExceptions.NotFound,
4401         ) as e:
4402 0             self._format_exception(e)
4403
4404 1     def confirm_resize(self, vm_id):
4405         """
4406         Confirm the resize of an instance
4407         param:
4408             vm_id: ID of an instance
4409         """
4410 0         self._reload_connection()
4411 0         self.nova.servers.confirm_resize(server=vm_id)
4412 0         if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4413 0             self.__wait_for_vm(vm_id, "ACTIVE")
4414 0         instance_status = self.get_vdu_state(vm_id)[0]
4415 0         return instance_status
4416
4417 1     def get_monitoring_data(self):
4418 1         try:
4419 1             self.logger.debug("Getting servers and ports data from Openstack VIMs.")
4420 1             self._reload_connection()
4421 1             all_servers = self.nova.servers.list(detailed=True)
4422 1             all_ports = self.neutron.list_ports()
4423 1             return all_servers, all_ports
4424 1         except (
4425             vimconn.VimConnException,
4426             vimconn.VimConnNotFoundException,
4427             vimconn.VimConnConnectionException,
4428         ) as e:
4429 1             raise vimconn.VimConnException(
4430                 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
4431             )