Coverage for RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py: 36%
1950 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-14 12:04 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-14 12:04 +0000
1# -*- coding: utf-8 -*-
3##
4# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5# This file is part of openmano
6# All Rights Reserved.
7#
8# Licensed under the Apache License, Version 2.0 (the "License"); you may
9# not use this file except in compliance with the License. You may obtain
10# a copy of the License at
11#
12# http://www.apache.org/licenses/LICENSE-2.0
13#
14# Unless required by applicable law or agreed to in writing, software
15# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17# License for the specific language governing permissions and limitations
18# under the License.
19##
21"""
22osconnector implements all the methods to interact with openstack using the python-neutronclient.
24For the VNF forwarding graph, The OpenStack VIM connector calls the
25networking-sfc Neutron extension methods, whose resources are mapped
26to the VIM connector's SFC resources as follows:
27- Classification (OSM) -> Flow Classifier (Neutron)
28- Service Function Instance (OSM) -> Port Pair (Neutron)
29- Service Function (OSM) -> Port Pair Group (Neutron)
30- Service Function Path (OSM) -> Port Chain (Neutron)
31"""
33import copy
34from http.client import HTTPException
35import json
36import logging
37from pprint import pformat
38import random
39import re
40import time
41from typing import Dict, List, Optional, Tuple
43from cinderclient import client as cClient
44import cinderclient.exceptions as cExceptions
45from glanceclient import client as glClient
46import glanceclient.exc as gl1Exceptions
47from keystoneauth1 import session
48from keystoneauth1.identity import v2, v3
49import keystoneclient.exceptions as ksExceptions
50import keystoneclient.v2_0.client as ksClient_v2
51import keystoneclient.v3.client as ksClient_v3
52import netaddr
53from neutronclient.common import exceptions as neExceptions
54from neutronclient.neutron import client as neClient
55from novaclient import client as nClient, exceptions as nvExceptions
56from osm_ro_plugin import vimconn
57from requests.exceptions import ConnectionError
58import yaml
60__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
61__date__ = "$22-sep-2017 23:59:59$"
63"""contain the openstack virtual machine status to openmano status"""
64vmStatus2manoFormat = {
65 "ACTIVE": "ACTIVE",
66 "PAUSED": "PAUSED",
67 "SUSPENDED": "SUSPENDED",
68 "SHUTOFF": "INACTIVE",
69 "BUILD": "BUILD",
70 "ERROR": "ERROR",
71 "DELETED": "DELETED",
72}
73netStatus2manoFormat = {
74 "ACTIVE": "ACTIVE",
75 "PAUSED": "PAUSED",
76 "INACTIVE": "INACTIVE",
77 "BUILD": "BUILD",
78 "ERROR": "ERROR",
79 "DELETED": "DELETED",
80}
82supportedClassificationTypes = ["legacy_flow_classifier"]
84# global var to have a timeout creating and deleting volumes
85volume_timeout = 1800
86server_timeout = 1800
89def catch_any_exception(func):
90 def format_exception(*args, **kwargs):
91 try:
92 return func(*args, *kwargs)
93 except Exception as e:
94 vimconnector._format_exception(e)
96 return format_exception
99class SafeDumper(yaml.SafeDumper):
100 def represent_data(self, data):
101 # Openstack APIs use custom subclasses of dict and YAML safe dumper
102 # is designed to not handle that (reference issue 142 of pyyaml)
103 if isinstance(data, dict) and data.__class__ != dict:
104 # A simple solution is to convert those items back to dicts
105 data = dict(data.items())
107 return super(SafeDumper, self).represent_data(data)
110class vimconnector(vimconn.VimConnector):
111 def __init__(
112 self,
113 uuid,
114 name,
115 tenant_id,
116 tenant_name,
117 url,
118 url_admin=None,
119 user=None,
120 passwd=None,
121 log_level=None,
122 config={},
123 persistent_info={},
124 ):
125 """using common constructor parameters. In this case
126 'url' is the keystone authorization url,
127 'url_admin' is not use
128 """
129 api_version = config.get("APIversion")
131 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
132 raise vimconn.VimConnException(
133 "Invalid value '{}' for config:APIversion. "
134 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
135 )
137 vim_type = config.get("vim_type")
139 if vim_type and vim_type not in ("vio", "VIO"):
140 raise vimconn.VimConnException(
141 "Invalid value '{}' for config:vim_type."
142 "Allowed values are 'vio' or 'VIO'".format(vim_type)
143 )
145 if config.get("dataplane_net_vlan_range") is not None:
146 # validate vlan ranges provided by user
147 self._validate_vlan_ranges(
148 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
149 )
151 if config.get("multisegment_vlan_range") is not None:
152 # validate vlan ranges provided by user
153 self._validate_vlan_ranges(
154 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
155 )
157 vimconn.VimConnector.__init__(
158 self,
159 uuid,
160 name,
161 tenant_id,
162 tenant_name,
163 url,
164 url_admin,
165 user,
166 passwd,
167 log_level,
168 config,
169 )
171 if self.config.get("insecure") and self.config.get("ca_cert"):
172 raise vimconn.VimConnException(
173 "options insecure and ca_cert are mutually exclusive"
174 )
176 self.verify = True
178 if self.config.get("insecure"):
179 self.verify = False
181 if self.config.get("ca_cert"):
182 self.verify = self.config.get("ca_cert")
184 if not url:
185 raise TypeError("url param can not be NoneType")
187 self.persistent_info = persistent_info
188 self.availability_zone = persistent_info.get("availability_zone", None)
189 self.storage_availability_zone = None
190 self.vm_av_zone = None
191 self.session = persistent_info.get("session", {"reload_client": True})
192 self.my_tenant_id = self.session.get("my_tenant_id")
193 self.nova = self.session.get("nova")
194 self.neutron = self.session.get("neutron")
195 self.cinder = self.session.get("cinder")
196 self.glance = self.session.get("glance")
197 # self.glancev1 = self.session.get("glancev1")
198 self.keystone = self.session.get("keystone")
199 self.api_version3 = self.session.get("api_version3")
200 self.vim_type = self.config.get("vim_type")
202 if self.vim_type:
203 self.vim_type = self.vim_type.upper()
205 if self.config.get("use_internal_endpoint"):
206 self.endpoint_type = "internalURL"
207 else:
208 self.endpoint_type = None
210 logging.getLogger("urllib3").setLevel(logging.WARNING)
211 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
212 logging.getLogger("novaclient").setLevel(logging.WARNING)
213 self.logger = logging.getLogger("ro.vim.openstack")
215 # allow security_groups to be a list or a single string
216 if isinstance(self.config.get("security_groups"), str):
217 self.config["security_groups"] = [self.config["security_groups"]]
219 self.security_groups_id = None
221 # ###### VIO Specific Changes #########
222 if self.vim_type == "VIO":
223 self.logger = logging.getLogger("ro.vim.vio")
225 if log_level:
226 self.logger.setLevel(getattr(logging, log_level))
228 def __getitem__(self, index):
229 """Get individuals parameters.
230 Throw KeyError"""
231 if index == "project_domain_id":
232 return self.config.get("project_domain_id")
233 elif index == "user_domain_id":
234 return self.config.get("user_domain_id")
235 else:
236 return vimconn.VimConnector.__getitem__(self, index)
238 def __setitem__(self, index, value):
239 """Set individuals parameters and it is marked as dirty so to force connection reload.
240 Throw KeyError"""
241 if index == "project_domain_id":
242 self.config["project_domain_id"] = value
243 elif index == "user_domain_id":
244 self.config["user_domain_id"] = value
245 else:
246 vimconn.VimConnector.__setitem__(self, index, value)
248 self.session["reload_client"] = True
250 def serialize(self, value):
251 """Serialization of python basic types.
253 In the case value is not serializable a message will be logged and a
254 simple representation of the data that cannot be converted back to
255 python is returned.
256 """
257 if isinstance(value, str):
258 return value
260 try:
261 return yaml.dump(
262 value, Dumper=SafeDumper, default_flow_style=True, width=256
263 )
264 except yaml.representer.RepresenterError:
265 self.logger.debug(
266 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
267 pformat(value),
268 exc_info=True,
269 )
271 return str(value)
273 def _reload_connection(self):
274 """Called before any operation, it check if credentials has changed
275 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
276 """
277 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
278 if self.session["reload_client"]:
279 if self.config.get("APIversion"):
280 self.api_version3 = (
281 self.config["APIversion"] == "v3.3"
282 or self.config["APIversion"] == "3"
283 )
284 else: # get from ending auth_url that end with v3 or with v2.0
285 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
286 "/v3/"
287 )
289 self.session["api_version3"] = self.api_version3
291 if self.api_version3:
292 if self.config.get("project_domain_id") or self.config.get(
293 "project_domain_name"
294 ):
295 project_domain_id_default = None
296 else:
297 project_domain_id_default = "default"
299 if self.config.get("user_domain_id") or self.config.get(
300 "user_domain_name"
301 ):
302 user_domain_id_default = None
303 else:
304 user_domain_id_default = "default"
305 auth = v3.Password(
306 auth_url=self.url,
307 username=self.user,
308 password=self.passwd,
309 project_name=self.tenant_name,
310 project_id=self.tenant_id,
311 project_domain_id=self.config.get(
312 "project_domain_id", project_domain_id_default
313 ),
314 user_domain_id=self.config.get(
315 "user_domain_id", user_domain_id_default
316 ),
317 project_domain_name=self.config.get("project_domain_name"),
318 user_domain_name=self.config.get("user_domain_name"),
319 )
320 else:
321 auth = v2.Password(
322 auth_url=self.url,
323 username=self.user,
324 password=self.passwd,
325 tenant_name=self.tenant_name,
326 tenant_id=self.tenant_id,
327 )
329 sess = session.Session(auth=auth, verify=self.verify)
330 # added region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
331 # Titanium cloud and StarlingX
332 region_name = self.config.get("region_name")
334 if self.api_version3:
335 self.logger.debug(f"Using Keystone client v3 for VIM {self.id}")
336 self.keystone = ksClient_v3.Client(
337 session=sess,
338 endpoint_type=self.endpoint_type,
339 region_name=region_name,
340 )
341 else:
342 self.logger.debug(f"Using Keystone client v2 for VIM {self.id}")
343 self.keystone = ksClient_v2.Client(
344 session=sess, endpoint_type=self.endpoint_type
345 )
347 self.session["keystone"] = self.keystone
348 # In order to enable microversion functionality an explicit microversion must be specified in "config".
349 # This implementation approach is due to the warning message in
350 # https://developer.openstack.org/api-guide/compute/microversions.html
351 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
352 # always require an specific microversion.
353 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
354 version = self.config.get("microversion")
356 if not version:
357 version = "2.60"
359 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
360 # Titanium cloud and StarlingX
361 self.nova = self.session["nova"] = nClient.Client(
362 str(version),
363 session=sess,
364 endpoint_type=self.endpoint_type,
365 region_name=region_name,
366 )
367 self.neutron = self.session["neutron"] = neClient.Client(
368 "2.0",
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
374 if sess.get_all_version_data(service_type="volumev3"):
375 self.logger.debug(f"Using Cinder client v3 for VIM {self.id}")
376 self.cinder = self.session["cinder"] = cClient.Client(
377 3,
378 session=sess,
379 endpoint_type=self.endpoint_type,
380 region_name=region_name,
381 )
382 elif sess.get_all_version_data(service_type="volumev2"):
383 self.logger.debug(
384 f"Service type volumev3 not found. Using Cinder client v2 for VIM {self.id}"
385 )
386 self.cinder = self.session["cinder"] = cClient.Client(
387 2,
388 session=sess,
389 endpoint_type=self.endpoint_type,
390 region_name=region_name,
391 )
392 else:
393 self.logger.debug(
394 f"Service type not found. Using Cinder client v3 for VIM {self.id}"
395 )
396 self.cinder = self.session["cinder"] = cClient.Client(
397 3,
398 session=sess,
399 endpoint_type=self.endpoint_type,
400 region_name=region_name,
401 )
403 try:
404 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
405 except Exception:
406 self.logger.error("Cannot get project_id from session", exc_info=True)
408 if self.endpoint_type == "internalURL":
409 glance_service_id = self.keystone.services.list(name="glance")[0].id
410 glance_endpoint = self.keystone.endpoints.list(
411 glance_service_id, interface="internal"
412 )[0].url
413 else:
414 glance_endpoint = None
416 self.glance = self.session["glance"] = glClient.Client(
417 2, session=sess, endpoint=glance_endpoint
418 )
419 # using version 1 of glance client in new_image()
420 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
421 # endpoint=glance_endpoint)
422 self.session["reload_client"] = False
423 self.persistent_info["session"] = self.session
424 # add availablity zone info inside self.persistent_info
425 self._set_availablity_zones()
426 self.persistent_info["availability_zone"] = self.availability_zone
427 # force to get again security_groups_ids next time they are needed
428 self.security_groups_id = None
430 def __net_os2mano(self, net_list_dict):
431 """Transform the net openstack format to mano format
432 net_list_dict can be a list of dict or a single dict"""
433 if type(net_list_dict) is dict:
434 net_list_ = (net_list_dict,)
435 elif type(net_list_dict) is list:
436 net_list_ = net_list_dict
437 else:
438 raise TypeError("param net_list_dict must be a list or a dictionary")
439 for net in net_list_:
440 if net.get("provider:network_type") == "vlan":
441 net["type"] = "data"
442 else:
443 net["type"] = "bridge"
445 def __classification_os2mano(self, class_list_dict):
446 """Transform the openstack format (Flow Classifier) to mano format
447 (Classification) class_list_dict can be a list of dict or a single dict
448 """
449 if isinstance(class_list_dict, dict):
450 class_list_ = [class_list_dict]
451 elif isinstance(class_list_dict, list):
452 class_list_ = class_list_dict
453 else:
454 raise TypeError("param class_list_dict must be a list or a dictionary")
455 for classification in class_list_:
456 id = classification.pop("id")
457 name = classification.pop("name")
458 description = classification.pop("description")
459 project_id = classification.pop("project_id")
460 tenant_id = classification.pop("tenant_id")
461 original_classification = copy.deepcopy(classification)
462 classification.clear()
463 classification["ctype"] = "legacy_flow_classifier"
464 classification["definition"] = original_classification
465 classification["id"] = id
466 classification["name"] = name
467 classification["description"] = description
468 classification["project_id"] = project_id
469 classification["tenant_id"] = tenant_id
471 def __sfi_os2mano(self, sfi_list_dict):
472 """Transform the openstack format (Port Pair) to mano format (SFI)
473 sfi_list_dict can be a list of dict or a single dict
474 """
475 if isinstance(sfi_list_dict, dict):
476 sfi_list_ = [sfi_list_dict]
477 elif isinstance(sfi_list_dict, list):
478 sfi_list_ = sfi_list_dict
479 else:
480 raise TypeError("param sfi_list_dict must be a list or a dictionary")
482 for sfi in sfi_list_:
483 sfi["ingress_ports"] = []
484 sfi["egress_ports"] = []
486 if sfi.get("ingress"):
487 sfi["ingress_ports"].append(sfi["ingress"])
489 if sfi.get("egress"):
490 sfi["egress_ports"].append(sfi["egress"])
492 del sfi["ingress"]
493 del sfi["egress"]
494 params = sfi.get("service_function_parameters")
495 sfc_encap = False
497 if params:
498 correlation = params.get("correlation")
500 if correlation:
501 sfc_encap = True
503 sfi["sfc_encap"] = sfc_encap
504 del sfi["service_function_parameters"]
506 def __sf_os2mano(self, sf_list_dict):
507 """Transform the openstack format (Port Pair Group) to mano format (SF)
508 sf_list_dict can be a list of dict or a single dict
509 """
510 if isinstance(sf_list_dict, dict):
511 sf_list_ = [sf_list_dict]
512 elif isinstance(sf_list_dict, list):
513 sf_list_ = sf_list_dict
514 else:
515 raise TypeError("param sf_list_dict must be a list or a dictionary")
517 for sf in sf_list_:
518 del sf["port_pair_group_parameters"]
519 sf["sfis"] = sf["port_pairs"]
520 del sf["port_pairs"]
522 def __sfp_os2mano(self, sfp_list_dict):
523 """Transform the openstack format (Port Chain) to mano format (SFP)
524 sfp_list_dict can be a list of dict or a single dict
525 """
526 if isinstance(sfp_list_dict, dict):
527 sfp_list_ = [sfp_list_dict]
528 elif isinstance(sfp_list_dict, list):
529 sfp_list_ = sfp_list_dict
530 else:
531 raise TypeError("param sfp_list_dict must be a list or a dictionary")
533 for sfp in sfp_list_:
534 params = sfp.pop("chain_parameters")
535 sfc_encap = False
537 if params:
538 correlation = params.get("correlation")
540 if correlation:
541 sfc_encap = True
543 sfp["sfc_encap"] = sfc_encap
544 sfp["spi"] = sfp.pop("chain_id")
545 sfp["classifications"] = sfp.pop("flow_classifiers")
546 sfp["service_functions"] = sfp.pop("port_pair_groups")
548 # placeholder for now; read TODO note below
549 def _validate_classification(self, type, definition):
550 # only legacy_flow_classifier Type is supported at this point
551 return True
552 # TODO(igordcard): this method should be an abstract method of an
553 # abstract Classification class to be implemented by the specific
554 # Types. Also, abstract vimconnector should call the validation
555 # method before the implemented VIM connectors are called.
557 @staticmethod
558 def _format_exception(exception):
559 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
560 message_error = str(exception)
561 tip = ""
563 if isinstance(
564 exception,
565 (
566 neExceptions.NetworkNotFoundClient,
567 nvExceptions.NotFound,
568 nvExceptions.ResourceNotFound,
569 ksExceptions.NotFound,
570 gl1Exceptions.HTTPNotFound,
571 cExceptions.NotFound,
572 ),
573 ):
574 raise vimconn.VimConnNotFoundException(
575 type(exception).__name__ + ": " + message_error
576 )
577 elif isinstance(
578 exception,
579 (
580 HTTPException,
581 gl1Exceptions.HTTPException,
582 gl1Exceptions.CommunicationError,
583 ConnectionError,
584 ksExceptions.ConnectionError,
585 neExceptions.ConnectionFailed,
586 cExceptions.ConnectionError,
587 ),
588 ):
589 if type(exception).__name__ == "SSLError":
590 tip = " (maybe option 'insecure' must be added to the VIM)"
592 raise vimconn.VimConnConnectionException(
593 "Invalid URL or credentials{}: {}".format(tip, message_error)
594 )
595 elif isinstance(
596 exception,
597 (
598 KeyError,
599 nvExceptions.BadRequest,
600 ksExceptions.BadRequest,
601 gl1Exceptions.BadRequest,
602 cExceptions.BadRequest,
603 ),
604 ):
605 if message_error == "OS-EXT-SRV-ATTR:host":
606 tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
607 raise vimconn.VimConnInsufficientCredentials(
608 type(exception).__name__ + ": " + message_error + tip
609 )
610 raise vimconn.VimConnException(
611 type(exception).__name__ + ": " + message_error
612 )
614 elif isinstance(
615 exception,
616 (
617 nvExceptions.ClientException,
618 ksExceptions.ClientException,
619 neExceptions.NeutronException,
620 cExceptions.ClientException,
621 ),
622 ):
623 raise vimconn.VimConnUnexpectedResponse(
624 type(exception).__name__ + ": " + message_error
625 )
626 elif isinstance(exception, nvExceptions.Conflict):
627 raise vimconn.VimConnConflictException(
628 type(exception).__name__ + ": " + message_error
629 )
630 elif isinstance(exception, vimconn.VimConnException):
631 raise exception
632 else: # ()
633 logger = logging.getLogger("ro.vim.openstack")
634 logger.error("General Exception " + message_error, exc_info=True)
636 raise vimconn.VimConnException(
637 type(exception).__name__ + ": " + message_error
638 )
640 def _get_ids_from_name(self, security_group_name=None):
641 """
642 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
643 :return: None
644 """
645 # get tenant_id if only tenant_name is supplied
646 self._reload_connection()
648 if not self.my_tenant_id:
649 raise vimconn.VimConnConnectionException(
650 "Error getting tenant information from name={} id={}".format(
651 self.tenant_name, self.tenant_id
652 )
653 )
655 neutron_sg_list = self.neutron.list_security_groups(
656 tenant_id=self.my_tenant_id
657 )["security_groups"]
659 if self.config.get("security_groups") and not self.security_groups_id:
660 # convert from name to id
661 # neutron_sg_list = self.neutron.list_security_groups(
662 # tenant_id=self.my_tenant_id
663 # )["security_groups"]
665 self.security_groups_id = []
666 for sg in self.config.get("security_groups"):
667 for neutron_sg in neutron_sg_list:
668 if sg in (neutron_sg["id"], neutron_sg["name"]):
669 self.security_groups_id.append(neutron_sg["id"])
670 break
671 else:
672 self.security_groups_id = None
674 raise vimconn.VimConnConnectionException(
675 "Not found security group {} for this tenant".format(sg)
676 )
678 if security_group_name is not None:
679 self.security_groups_id = []
680 for neutron_sg in neutron_sg_list:
681 if security_group_name in (neutron_sg["id"], neutron_sg["name"]):
682 self.security_groups_id.append(neutron_sg["id"])
683 break
684 else:
685 self.security_groups_id = None
686 raise vimconn.VimConnConnectionException(
687 "Not found security group {} for this tenant".format(sg)
688 )
690 def _find_nova_server(self, vm_id):
691 """
692 Returns the VM instance from Openstack and completes it with flavor ID
693 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
694 """
695 try:
696 self._reload_connection()
697 server = self.nova.servers.find(id=vm_id)
698 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
699 server_dict = server.to_dict()
700 try:
701 if server_dict["flavor"].get("original_name"):
702 server_dict["flavor"]["id"] = self.nova.flavors.find(
703 name=server_dict["flavor"]["original_name"]
704 ).id
705 except nClient.exceptions.NotFound as e:
706 self.logger.warning(str(e.message))
707 return server_dict
708 except (
709 ksExceptions.ClientException,
710 nvExceptions.ClientException,
711 nvExceptions.NotFound,
712 ConnectionError,
713 ) as e:
714 self._format_exception(e)
716 def check_vim_connectivity(self):
717 # just get network list to check connectivity and credentials
718 self.get_network_list(filter_dict={})
720 def get_tenant_list(self, filter_dict={}):
721 """Obtain tenants of VIM
722 filter_dict can contain the following keys:
723 name: filter by tenant name
724 id: filter by tenant uuid/id
725 <other VIM specific>
726 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
727 """
728 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
729 try:
730 self._reload_connection()
732 if self.api_version3:
733 project_class_list = self.keystone.projects.list(
734 name=filter_dict.get("name")
735 )
736 else:
737 project_class_list = self.keystone.tenants.findall(**filter_dict)
739 project_list = []
741 for project in project_class_list:
742 if filter_dict.get("id") and filter_dict["id"] != project.id:
743 continue
745 project_list.append(project.to_dict())
747 return project_list
748 except (
749 ksExceptions.ConnectionError,
750 ksExceptions.ClientException,
751 ConnectionError,
752 ) as e:
753 self._format_exception(e)
755 def new_tenant(self, tenant_name, tenant_description):
756 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
757 self.logger.debug("Adding a new tenant name: %s", tenant_name)
758 try:
759 self._reload_connection()
761 if self.api_version3:
762 project = self.keystone.projects.create(
763 tenant_name,
764 self.config.get("project_domain_id", "default"),
765 description=tenant_description,
766 is_domain=False,
767 )
768 else:
769 project = self.keystone.tenants.create(tenant_name, tenant_description)
771 return project.id
772 except (
773 ksExceptions.ConnectionError,
774 ksExceptions.ClientException,
775 ksExceptions.BadRequest,
776 ConnectionError,
777 ) as e:
778 self._format_exception(e)
780 def delete_tenant(self, tenant_id):
781 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
782 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
783 try:
784 self._reload_connection()
786 if self.api_version3:
787 self.keystone.projects.delete(tenant_id)
788 else:
789 self.keystone.tenants.delete(tenant_id)
791 return tenant_id
793 except (
794 ksExceptions.ConnectionError,
795 ksExceptions.ClientException,
796 ksExceptions.NotFound,
797 ConnectionError,
798 ) as e:
799 self._format_exception(e)
801 def new_network(
802 self,
803 net_name,
804 net_type,
805 ip_profile=None,
806 shared=False,
807 provider_network_profile=None,
808 ):
809 """Adds a tenant network to VIM
810 Params:
811 'net_name': name of the network
812 'net_type': one of:
813 'bridge': overlay isolated network
814 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
815 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
816 'ip_profile': is a dict containing the IP parameters of the network
817 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
818 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
819 'gateway_address': (Optional) ip_schema, that is X.X.X.X
820 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
821 'dhcp_enabled': True or False
822 'dhcp_start_address': ip_schema, first IP to grant
823 'dhcp_count': number of IPs to grant.
824 'shared': if this network can be seen/use by other tenants/organization
825 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
826 physical-network: physnet-label}
827 Returns a tuple with the network identifier and created_items, or raises an exception on error
828 created_items can be None or a dictionary where this method can include key-values that will be passed to
829 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
830 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
831 as not present.
832 """
833 self.logger.debug(
834 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
835 )
836 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
838 try:
839 vlan = None
841 if provider_network_profile:
842 vlan = provider_network_profile.get("segmentation-id")
844 new_net = None
845 created_items = {}
846 self._reload_connection()
847 network_dict = {"name": net_name, "admin_state_up": True}
849 if net_type in ("data", "ptp") or provider_network_profile:
850 provider_physical_network = None
852 if provider_network_profile and provider_network_profile.get(
853 "physical-network"
854 ):
855 provider_physical_network = provider_network_profile.get(
856 "physical-network"
857 )
859 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
860 # or not declared, just ignore the checking
861 if (
862 isinstance(
863 self.config.get("dataplane_physical_net"), (tuple, list)
864 )
865 and provider_physical_network
866 not in self.config["dataplane_physical_net"]
867 ):
868 raise vimconn.VimConnConflictException(
869 "Invalid parameter 'provider-network:physical-network' "
870 "for network creation. '{}' is not one of the declared "
871 "list at VIM_config:dataplane_physical_net".format(
872 provider_physical_network
873 )
874 )
876 # use the default dataplane_physical_net
877 if not provider_physical_network:
878 provider_physical_network = self.config.get(
879 "dataplane_physical_net"
880 )
882 # if it is non-empty list, use the first value. If it is a string use the value directly
883 if (
884 isinstance(provider_physical_network, (tuple, list))
885 and provider_physical_network
886 ):
887 provider_physical_network = provider_physical_network[0]
889 if not provider_physical_network:
890 raise vimconn.VimConnConflictException(
891 "missing information needed for underlay networks. Provide "
892 "'dataplane_physical_net' configuration at VIM or use the NS "
893 "instantiation parameter 'provider-network.physical-network'"
894 " for the VLD"
895 )
897 if not self.config.get("multisegment_support"):
898 network_dict["provider:physical_network"] = (
899 provider_physical_network
900 )
902 if (
903 provider_network_profile
904 and "network-type" in provider_network_profile
905 ):
906 network_dict["provider:network_type"] = (
907 provider_network_profile["network-type"]
908 )
909 else:
910 network_dict["provider:network_type"] = self.config.get(
911 "dataplane_network_type", "vlan"
912 )
914 if vlan:
915 network_dict["provider:segmentation_id"] = vlan
916 else:
917 # Multi-segment case
918 segment_list = []
919 segment1_dict = {
920 "provider:physical_network": "",
921 "provider:network_type": "vxlan",
922 }
923 segment_list.append(segment1_dict)
924 segment2_dict = {
925 "provider:physical_network": provider_physical_network,
926 "provider:network_type": "vlan",
927 }
929 if vlan:
930 segment2_dict["provider:segmentation_id"] = vlan
931 elif self.config.get("multisegment_vlan_range"):
932 vlanID = self._generate_multisegment_vlanID()
933 segment2_dict["provider:segmentation_id"] = vlanID
935 # else
936 # raise vimconn.VimConnConflictException(
937 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
938 # network")
939 segment_list.append(segment2_dict)
940 network_dict["segments"] = segment_list
942 # VIO Specific Changes. It needs a concrete VLAN
943 if self.vim_type == "VIO" and vlan is None:
944 if self.config.get("dataplane_net_vlan_range") is None:
945 raise vimconn.VimConnConflictException(
946 "You must provide 'dataplane_net_vlan_range' in format "
947 "[start_ID - end_ID] at VIM_config for creating underlay "
948 "networks"
949 )
951 network_dict["provider:segmentation_id"] = self._generate_vlanID()
953 network_dict["shared"] = shared
955 if self.config.get("disable_network_port_security"):
956 network_dict["port_security_enabled"] = False
958 if self.config.get("neutron_availability_zone_hints"):
959 hints = self.config.get("neutron_availability_zone_hints")
961 if isinstance(hints, str):
962 hints = [hints]
964 network_dict["availability_zone_hints"] = hints
966 new_net = self.neutron.create_network({"network": network_dict})
967 # print new_net
968 # create subnetwork, even if there is no profile
970 if not ip_profile:
971 ip_profile = {}
973 if not ip_profile.get("subnet_address"):
974 # Fake subnet is required
975 subnet_rand = random.SystemRandom().randint(0, 255)
976 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
978 if "ip_version" not in ip_profile:
979 ip_profile["ip_version"] = "IPv4"
981 subnet = {
982 "name": net_name + "-subnet",
983 "network_id": new_net["network"]["id"],
984 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
985 "cidr": ip_profile["subnet_address"],
986 }
988 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
989 if ip_profile.get("gateway_address"):
990 subnet["gateway_ip"] = ip_profile["gateway_address"]
991 else:
992 subnet["gateway_ip"] = None
994 if ip_profile.get("dns_address"):
995 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
997 if "dhcp_enabled" in ip_profile:
998 subnet["enable_dhcp"] = (
999 False
1000 if ip_profile["dhcp_enabled"] == "false"
1001 or ip_profile["dhcp_enabled"] is False
1002 else True
1003 )
1005 if ip_profile.get("dhcp_start_address"):
1006 subnet["allocation_pools"] = []
1007 subnet["allocation_pools"].append(dict())
1008 subnet["allocation_pools"][0]["start"] = ip_profile[
1009 "dhcp_start_address"
1010 ]
1012 if ip_profile.get("dhcp_count"):
1013 # parts = ip_profile["dhcp_start_address"].split(".")
1014 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
1015 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
1016 ip_int += ip_profile["dhcp_count"] - 1
1017 ip_str = str(netaddr.IPAddress(ip_int))
1018 subnet["allocation_pools"][0]["end"] = ip_str
1020 if (
1021 ip_profile.get("ipv6_address_mode")
1022 and ip_profile["ip_version"] != "IPv4"
1023 ):
1024 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
1025 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
1026 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
1027 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
1029 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
1030 self.neutron.create_subnet({"subnet": subnet})
1032 if net_type == "data" and self.config.get("multisegment_support"):
1033 if self.config.get("l2gw_support"):
1034 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
1035 for l2gw in l2gw_list:
1036 l2gw_conn = {
1037 "l2_gateway_id": l2gw["id"],
1038 "network_id": new_net["network"]["id"],
1039 "segmentation_id": str(vlanID),
1040 }
1041 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
1042 {"l2_gateway_connection": l2gw_conn}
1043 )
1044 created_items[
1045 "l2gwconn:"
1046 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
1047 ] = True
1049 return new_net["network"]["id"], created_items
1050 except Exception as e:
1051 # delete l2gw connections (if any) before deleting the network
1052 for k, v in created_items.items():
1053 if not v: # skip already deleted
1054 continue
1056 try:
1057 k_item, _, k_id = k.partition(":")
1059 if k_item == "l2gwconn":
1060 self.neutron.delete_l2_gateway_connection(k_id)
1062 except (neExceptions.ConnectionFailed, ConnectionError) as e2:
1063 self.logger.error(
1064 "Error deleting l2 gateway connection: {}: {}".format(
1065 type(e2).__name__, e2
1066 )
1067 )
1068 self._format_exception(e2)
1069 except Exception as e2:
1070 self.logger.error(
1071 "Error deleting l2 gateway connection: {}: {}".format(
1072 type(e2).__name__, e2
1073 )
1074 )
1076 if new_net:
1077 self.neutron.delete_network(new_net["network"]["id"])
1079 self._format_exception(e)
1081 def get_network_list(self, filter_dict={}):
1082 """Obtain tenant networks of VIM
1083 Filter_dict can be:
1084 name: network name
1085 id: network uuid
1086 shared: boolean
1087 tenant_id: tenant
1088 admin_state_up: boolean
1089 status: 'ACTIVE'
1090 Returns the network list of dictionaries
1091 """
1092 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1093 try:
1094 self._reload_connection()
1095 filter_dict_os = filter_dict.copy()
1097 if self.api_version3 and "tenant_id" in filter_dict_os:
1098 # TODO check
1099 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1101 net_dict = self.neutron.list_networks(**filter_dict_os)
1102 net_list = net_dict["networks"]
1103 self.__net_os2mano(net_list)
1105 return net_list
1106 except (
1107 neExceptions.ConnectionFailed,
1108 ksExceptions.ClientException,
1109 neExceptions.NeutronException,
1110 ConnectionError,
1111 ) as e:
1112 self._format_exception(e)
1114 def get_network(self, net_id):
1115 """Obtain details of network from VIM
1116 Returns the network information from a network id"""
1117 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1118 filter_dict = {"id": net_id}
1119 net_list = self.get_network_list(filter_dict)
1121 if len(net_list) == 0:
1122 raise vimconn.VimConnNotFoundException(
1123 "Network '{}' not found".format(net_id)
1124 )
1125 elif len(net_list) > 1:
1126 raise vimconn.VimConnConflictException(
1127 "Found more than one network with this criteria"
1128 )
1130 net = net_list[0]
1131 subnets = []
1132 for subnet_id in net.get("subnets", ()):
1133 try:
1134 subnet = self.neutron.show_subnet(subnet_id)
1135 except Exception as e:
1136 self.logger.error(
1137 "osconnector.get_network(): Error getting subnet %s %s"
1138 % (net_id, str(e))
1139 )
1140 subnet = {"id": subnet_id, "fault": str(e)}
1142 subnets.append(subnet)
1144 net["subnets"] = subnets
1145 net["encapsulation"] = net.get("provider:network_type")
1146 net["encapsulation_type"] = net.get("provider:network_type")
1147 net["segmentation_id"] = net.get("provider:segmentation_id")
1148 net["encapsulation_id"] = net.get("provider:segmentation_id")
1150 return net
1152 @catch_any_exception
1153 def delete_network(self, net_id, created_items=None):
1154 """
1155 Removes a tenant network from VIM and its associated elements
1156 :param net_id: VIM identifier of the network, provided by method new_network
1157 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1158 Returns the network identifier or raises an exception upon error or when network is not found
1159 """
1160 self.logger.debug("Deleting network '%s' from VIM", net_id)
1162 if created_items is None:
1163 created_items = {}
1165 try:
1166 self._reload_connection()
1167 # delete l2gw connections (if any) before deleting the network
1168 for k, v in created_items.items():
1169 if not v: # skip already deleted
1170 continue
1172 try:
1173 k_item, _, k_id = k.partition(":")
1174 if k_item == "l2gwconn":
1175 self.neutron.delete_l2_gateway_connection(k_id)
1177 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1178 self.logger.error(
1179 "Error deleting l2 gateway connection: {}: {}".format(
1180 type(e).__name__, e
1181 )
1182 )
1183 self._format_exception(e)
1184 except Exception as e:
1185 self.logger.error(
1186 "Error deleting l2 gateway connection: {}: {}".format(
1187 type(e).__name__, e
1188 )
1189 )
1191 # delete VM ports attached to this networks before the network
1192 ports = self.neutron.list_ports(network_id=net_id)
1193 for p in ports["ports"]:
1194 try:
1195 self.neutron.delete_port(p["id"])
1197 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1198 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1199 # If there is connection error, it raises.
1200 self._format_exception(e)
1201 except Exception as e:
1202 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1204 self.neutron.delete_network(net_id)
1206 return net_id
1207 except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
1208 # If network to be deleted is not found, it does not raise.
1209 self.logger.warning(
1210 f"Error deleting network: {net_id} is not found, {str(e)}"
1211 )
1213 def refresh_nets_status(self, net_list):
1214 """Get the status of the networks
1215 Params: the list of network identifiers
1216 Returns a dictionary with:
1217 net_id: #VIM id of this network
1218 status: #Mandatory. Text with one of:
1219 # DELETED (not found at vim)
1220 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1221 # OTHER (Vim reported other status not understood)
1222 # ERROR (VIM indicates an ERROR status)
1223 # ACTIVE, INACTIVE, DOWN (admin down),
1224 # BUILD (on building process)
1225 #
1226 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1227 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1228 """
1229 net_dict = {}
1231 for net_id in net_list:
1232 net = {}
1234 try:
1235 net_vim = self.get_network(net_id)
1237 if net_vim["status"] in netStatus2manoFormat:
1238 net["status"] = netStatus2manoFormat[net_vim["status"]]
1239 else:
1240 net["status"] = "OTHER"
1241 net["error_msg"] = "VIM status reported " + net_vim["status"]
1243 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1244 net["status"] = "DOWN"
1246 net["vim_info"] = self.serialize(net_vim)
1248 if net_vim.get("fault"): # TODO
1249 net["error_msg"] = str(net_vim["fault"])
1250 except vimconn.VimConnNotFoundException as e:
1251 self.logger.error("Exception getting net status: %s", str(e))
1252 net["status"] = "DELETED"
1253 net["error_msg"] = str(e)
1254 except vimconn.VimConnException as e:
1255 self.logger.error("Exception getting net status: %s", str(e))
1256 net["status"] = "VIM_ERROR"
1257 net["error_msg"] = str(e)
1258 net_dict[net_id] = net
1259 return net_dict
1261 def get_flavor(self, flavor_id=None, flavor_name=None):
1262 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1263 self.logger.debug("Getting flavor '%s'", flavor_id)
1264 try:
1265 self._reload_connection()
1266 if flavor_id:
1267 flavor = self.nova.flavors.find(id=flavor_id)
1268 else:
1269 flavor = self.nova.flavors.find(name=flavor_name)
1270 return flavor.to_dict()
1272 except (
1273 nvExceptions.NotFound,
1274 nvExceptions.ClientException,
1275 ksExceptions.ClientException,
1276 ConnectionError,
1277 ) as e:
1278 self._format_exception(e)
1280 def get_flavor_id_from_data(self, flavor_dict):
1281 """Obtain flavor id that match the flavor description
1282 Returns the flavor_id or raises a vimconnNotFoundException
1283 flavor_dict: contains the required ram, vcpus, disk
1284 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1285 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1286 vimconnNotFoundException is raised
1287 """
1288 exact_match = False if self.config.get("use_existing_flavors") else True
1290 try:
1291 self._reload_connection()
1292 flavor_candidate_id = None
1293 flavor_candidate_data = (10000, 10000, 10000)
1294 flavor_target = (
1295 flavor_dict["ram"],
1296 flavor_dict["vcpus"],
1297 flavor_dict["disk"],
1298 flavor_dict.get("ephemeral", 0),
1299 flavor_dict.get("swap", 0),
1300 )
1301 # numa=None
1302 extended = flavor_dict.get("extended", {})
1303 if extended:
1304 # TODO
1305 raise vimconn.VimConnNotFoundException(
1306 "Flavor with EPA still not implemented"
1307 )
1308 # if len(numas) > 1:
1309 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1310 # numa=numas[0]
1311 # numas = extended.get("numas")
1312 for flavor in self.nova.flavors.list():
1313 epa = flavor.get_keys()
1315 if epa:
1316 continue
1317 # TODO
1319 flavor_data = (
1320 flavor.ram,
1321 flavor.vcpus,
1322 flavor.disk,
1323 flavor.ephemeral,
1324 flavor.swap if isinstance(flavor.swap, int) else 0,
1325 )
1326 if flavor_data == flavor_target:
1327 return flavor.id
1328 elif (
1329 not exact_match
1330 and flavor_target < flavor_data < flavor_candidate_data
1331 ):
1332 flavor_candidate_id = flavor.id
1333 flavor_candidate_data = flavor_data
1335 if not exact_match and flavor_candidate_id:
1336 return flavor_candidate_id
1338 raise vimconn.VimConnNotFoundException(
1339 "Cannot find any flavor matching '{}'".format(flavor_dict)
1340 )
1341 except (
1342 nvExceptions.NotFound,
1343 nvExceptions.BadRequest,
1344 nvExceptions.ClientException,
1345 ksExceptions.ClientException,
1346 ConnectionError,
1347 ) as e:
1348 self._format_exception(e)
1350 @staticmethod
1351 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1352 """Process resource quota and fill up extra_specs.
1353 Args:
1354 quota (dict): Keeping the quota of resurces
1355 prefix (str) Prefix
1356 extra_specs (dict) Dict to be filled to be used during flavor creation
1358 """
1359 if "limit" in quota:
1360 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1362 if "reserve" in quota:
1363 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1365 if "shares" in quota:
1366 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1367 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1369 @staticmethod
1370 def process_numa_memory(
1371 numa: dict, node_id: Optional[int], extra_specs: dict
1372 ) -> None:
1373 """Set the memory in extra_specs.
1374 Args:
1375 numa (dict): A dictionary which includes numa information
1376 node_id (int): ID of numa node
1377 extra_specs (dict): To be filled.
1379 """
1380 if not numa.get("memory"):
1381 return
1382 memory_mb = numa["memory"] * 1024
1383 memory = "hw:numa_mem.{}".format(node_id)
1384 extra_specs[memory] = int(memory_mb)
1386 @staticmethod
1387 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1388 """Set the cpu in extra_specs.
1389 Args:
1390 numa (dict): A dictionary which includes numa information
1391 node_id (int): ID of numa node
1392 extra_specs (dict): To be filled.
1394 """
1395 if not numa.get("vcpu"):
1396 return
1397 vcpu = numa["vcpu"]
1398 cpu = "hw:numa_cpus.{}".format(node_id)
1399 vcpu = ",".join(map(str, vcpu))
1400 extra_specs[cpu] = vcpu
1402 @staticmethod
1403 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1404 """Fill up extra_specs if numa has paired-threads.
1405 Args:
1406 numa (dict): A dictionary which includes numa information
1407 extra_specs (dict): To be filled.
1409 Returns:
1410 threads (int) Number of virtual cpus
1412 """
1413 if not numa.get("paired-threads"):
1414 return
1416 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1417 threads = numa["paired-threads"] * 2
1418 extra_specs["hw:cpu_thread_policy"] = "require"
1419 extra_specs["hw:cpu_policy"] = "dedicated"
1420 return threads
1422 @staticmethod
1423 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1424 """Fill up extra_specs if numa has cores.
1425 Args:
1426 numa (dict): A dictionary which includes numa information
1427 extra_specs (dict): To be filled.
1429 Returns:
1430 cores (int) Number of virtual cpus
1432 """
1433 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1434 # architecture, or a non-SMT architecture will be emulated
1435 if not numa.get("cores"):
1436 return
1437 cores = numa["cores"]
1438 extra_specs["hw:cpu_thread_policy"] = "isolate"
1439 extra_specs["hw:cpu_policy"] = "dedicated"
1440 return cores
1442 @staticmethod
1443 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1444 """Fill up extra_specs if numa has threads.
1445 Args:
1446 numa (dict): A dictionary which includes numa information
1447 extra_specs (dict): To be filled.
1449 Returns:
1450 threads (int) Number of virtual cpus
1452 """
1453 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1454 if not numa.get("threads"):
1455 return
1456 threads = numa["threads"]
1457 extra_specs["hw:cpu_thread_policy"] = "prefer"
1458 extra_specs["hw:cpu_policy"] = "dedicated"
1459 return threads
1461 def _process_numa_parameters_of_flavor(
1462 self, numas: List, extra_specs: Dict
1463 ) -> None:
1464 """Process numa parameters and fill up extra_specs.
1466 Args:
1467 numas (list): List of dictionary which includes numa information
1468 extra_specs (dict): To be filled.
1470 """
1471 numa_nodes = len(numas)
1472 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1473 cpu_cores, cpu_threads = 0, 0
1475 if self.vim_type == "VIO":
1476 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1478 for numa in numas:
1479 if "id" in numa:
1480 node_id = numa["id"]
1481 # overwrite ram and vcpus
1482 # check if key "memory" is present in numa else use ram value at flavor
1483 self.process_numa_memory(numa, node_id, extra_specs)
1484 self.process_numa_vcpu(numa, node_id, extra_specs)
1486 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1487 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1489 if "paired-threads" in numa:
1490 threads = self.process_numa_paired_threads(numa, extra_specs)
1491 cpu_threads += threads
1493 elif "cores" in numa:
1494 cores = self.process_numa_cores(numa, extra_specs)
1495 cpu_cores += cores
1497 elif "threads" in numa:
1498 threads = self.process_numa_threads(numa, extra_specs)
1499 cpu_threads += threads
1501 if cpu_cores:
1502 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1503 if cpu_threads:
1504 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1506 @staticmethod
1507 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1508 """According to number of numa nodes, updates the extra_specs for VIO.
1510 Args:
1512 numa_nodes (int): List keeps the numa node numbers
1513 extra_specs (dict): Extra specs dict to be updated
1515 """
1516 # If there are several numas, we do not define specific affinity.
1517 extra_specs["vmware:latency_sensitivity_level"] = "high"
1519 def _change_flavor_name(
1520 self, name: str, name_suffix: int, flavor_data: dict
1521 ) -> str:
1522 """Change the flavor name if the name already exists.
1524 Args:
1525 name (str): Flavor name to be checked
1526 name_suffix (int): Suffix to be appended to name
1527 flavor_data (dict): Flavor dict
1529 Returns:
1530 name (str): New flavor name to be used
1532 """
1533 # Get used names
1534 fl = self.nova.flavors.list()
1535 fl_names = [f.name for f in fl]
1537 while name in fl_names:
1538 name_suffix += 1
1539 name = flavor_data["name"] + "-" + str(name_suffix)
1541 return name
1543 def _process_extended_config_of_flavor(
1544 self, extended: dict, extra_specs: dict
1545 ) -> None:
1546 """Process the extended dict to fill up extra_specs.
1547 Args:
1549 extended (dict): Keeping the extra specification of flavor
1550 extra_specs (dict) Dict to be filled to be used during flavor creation
1552 """
1553 quotas = {
1554 "cpu-quota": "cpu",
1555 "mem-quota": "memory",
1556 "vif-quota": "vif",
1557 "disk-io-quota": "disk_io",
1558 }
1560 page_sizes = {
1561 "LARGE": "large",
1562 "SMALL": "small",
1563 "SIZE_2MB": "2MB",
1564 "SIZE_1GB": "1GB",
1565 "PREFER_LARGE": "any",
1566 }
1568 policies = {
1569 "cpu-pinning-policy": "hw:cpu_policy",
1570 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1571 "mem-policy": "hw:numa_mempolicy",
1572 }
1574 numas = extended.get("numas")
1575 if numas:
1576 self._process_numa_parameters_of_flavor(numas, extra_specs)
1578 for quota, item in quotas.items():
1579 if quota in extended.keys():
1580 self.process_resource_quota(extended.get(quota), item, extra_specs)
1582 # Set the mempage size as specified in the descriptor
1583 if extended.get("mempage-size"):
1584 if extended["mempage-size"] in page_sizes.keys():
1585 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1586 else:
1587 # Normally, validations in NBI should not allow to this condition.
1588 self.logger.debug(
1589 "Invalid mempage-size %s. Will be ignored",
1590 extended.get("mempage-size"),
1591 )
1593 for policy, hw_policy in policies.items():
1594 if extended.get(policy):
1595 extra_specs[hw_policy] = extended[policy].lower()
1597 @staticmethod
1598 def _get_flavor_details(flavor_data: dict) -> Tuple:
1599 """Returns the details of flavor
1600 Args:
1601 flavor_data (dict): Dictionary that includes required flavor details
1603 Returns:
1604 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1606 """
1607 return (
1608 flavor_data.get("ram", 64),
1609 flavor_data.get("vcpus", 1),
1610 {},
1611 flavor_data.get("extended"),
1612 )
1614 @catch_any_exception
1615 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1616 """Adds a tenant flavor to openstack VIM.
1617 if change_name_if_used is True, it will change name in case of conflict,
1618 because it is not supported name repetition.
1620 Args:
1621 flavor_data (dict): Flavor details to be processed
1622 change_name_if_used (bool): Change name in case of conflict
1624 Returns:
1625 flavor_id (str): flavor identifier
1627 """
1628 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1629 retry = 0
1630 max_retries = 3
1631 name_suffix = 0
1632 name = flavor_data["name"]
1633 while retry < max_retries:
1634 retry += 1
1635 try:
1636 self._reload_connection()
1638 if change_name_if_used:
1639 name = self._change_flavor_name(name, name_suffix, flavor_data)
1641 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1642 flavor_data
1643 )
1644 if extended:
1645 self._process_extended_config_of_flavor(extended, extra_specs)
1647 # Create flavor
1649 new_flavor = self.nova.flavors.create(
1650 name=name,
1651 ram=ram,
1652 vcpus=vcpus,
1653 disk=flavor_data.get("disk", 0),
1654 ephemeral=flavor_data.get("ephemeral", 0),
1655 swap=flavor_data.get("swap", 0),
1656 is_public=flavor_data.get("is_public", True),
1657 )
1659 # Add metadata
1660 if extra_specs:
1661 new_flavor.set_keys(extra_specs)
1663 return new_flavor.id
1665 except nvExceptions.Conflict as e:
1666 if change_name_if_used and retry < max_retries:
1667 continue
1669 self._format_exception(e)
1671 @catch_any_exception
1672 def delete_flavor(self, flavor_id):
1673 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1674 try:
1675 self._reload_connection()
1676 self.nova.flavors.delete(flavor_id)
1677 return flavor_id
1679 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
1680 # If flavor is not found, it does not raise.
1681 self.logger.warning(
1682 f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
1683 )
1685 def new_image(self, image_dict):
1686 """
1687 Adds a tenant image to VIM. imge_dict is a dictionary with:
1688 name: name
1689 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1690 location: path or URI
1691 public: "yes" or "no"
1692 metadata: metadata of the image
1693 Returns the image_id
1694 """
1695 retry = 0
1696 max_retries = 3
1698 while retry < max_retries:
1699 retry += 1
1700 try:
1701 self._reload_connection()
1703 # determine format http://docs.openstack.org/developer/glance/formats.html
1704 if "disk_format" in image_dict:
1705 disk_format = image_dict["disk_format"]
1706 else: # autodiscover based on extension
1707 if image_dict["location"].endswith(".qcow2"):
1708 disk_format = "qcow2"
1709 elif image_dict["location"].endswith(".vhd"):
1710 disk_format = "vhd"
1711 elif image_dict["location"].endswith(".vmdk"):
1712 disk_format = "vmdk"
1713 elif image_dict["location"].endswith(".vdi"):
1714 disk_format = "vdi"
1715 elif image_dict["location"].endswith(".iso"):
1716 disk_format = "iso"
1717 elif image_dict["location"].endswith(".aki"):
1718 disk_format = "aki"
1719 elif image_dict["location"].endswith(".ari"):
1720 disk_format = "ari"
1721 elif image_dict["location"].endswith(".ami"):
1722 disk_format = "ami"
1723 else:
1724 disk_format = "raw"
1726 self.logger.debug(
1727 "new_image: '%s' loading from '%s'",
1728 image_dict["name"],
1729 image_dict["location"],
1730 )
1731 if self.vim_type == "VIO":
1732 container_format = "bare"
1733 if "container_format" in image_dict:
1734 container_format = image_dict["container_format"]
1736 new_image = self.glance.images.create(
1737 name=image_dict["name"],
1738 container_format=container_format,
1739 disk_format=disk_format,
1740 )
1741 else:
1742 new_image = self.glance.images.create(name=image_dict["name"])
1744 if image_dict["location"].startswith("http"):
1745 # TODO there is not a method to direct download. It must be downloaded locally with requests
1746 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1747 else: # local path
1748 with open(image_dict["location"]) as fimage:
1749 self.glance.images.upload(new_image.id, fimage)
1750 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1751 # image_dict.get("public","yes")=="yes",
1752 # container_format="bare", data=fimage, disk_format=disk_format)
1754 metadata_to_load = image_dict.get("metadata")
1756 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1757 # for openstack
1758 if self.vim_type == "VIO":
1759 metadata_to_load["upload_location"] = image_dict["location"]
1760 else:
1761 metadata_to_load["location"] = image_dict["location"]
1763 self.glance.images.update(new_image.id, **metadata_to_load)
1765 return new_image.id
1766 except (
1767 HTTPException,
1768 gl1Exceptions.HTTPException,
1769 gl1Exceptions.CommunicationError,
1770 ConnectionError,
1771 ) as e:
1772 if retry == max_retries:
1773 continue
1775 self._format_exception(e)
1776 except IOError as e: # can not open the file
1777 raise vimconn.VimConnConnectionException(
1778 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1779 http_code=vimconn.HTTP_Bad_Request,
1780 )
1781 except Exception as e:
1782 self._format_exception(e)
1784 @catch_any_exception
1785 def delete_image(self, image_id):
1786 """Deletes a tenant image from openstack VIM. Returns the old id"""
1787 try:
1788 self._reload_connection()
1789 self.glance.images.delete(image_id)
1791 return image_id
1792 except gl1Exceptions.NotFound as e:
1793 # If image is not found, it does not raise.
1794 self.logger.warning(
1795 f"Error deleting image: {image_id} is not found, {str(e)}"
1796 )
1798 @catch_any_exception
1799 def get_image_id_from_path(self, path):
1800 """Get the image id from image path in the VIM database. Returns the image_id"""
1801 self._reload_connection()
1802 images = self.glance.images.list()
1804 for image in images:
1805 if image.metadata.get("location") == path:
1806 return image.id
1808 raise vimconn.VimConnNotFoundException(
1809 "image with location '{}' not found".format(path)
1810 )
1812 def get_image_list(self, filter_dict={}):
1813 """Obtain tenant images from VIM
1814 Filter_dict can be:
1815 id: image id
1816 name: image name
1817 checksum: image checksum
1818 Returns the image list of dictionaries:
1819 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1820 List can be empty
1821 """
1822 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1823 try:
1824 self._reload_connection()
1825 # filter_dict_os = filter_dict.copy()
1826 # First we filter by the available filter fields: name, id. The others are removed.
1827 image_list = self.glance.images.list()
1828 filtered_list = []
1830 for image in image_list:
1831 try:
1832 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1833 continue
1835 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1836 continue
1838 if (
1839 filter_dict.get("checksum")
1840 and image["checksum"] != filter_dict["checksum"]
1841 ):
1842 continue
1844 filtered_list.append(image.copy())
1845 except gl1Exceptions.HTTPNotFound:
1846 pass
1848 return filtered_list
1850 except (
1851 ksExceptions.ClientException,
1852 nvExceptions.ClientException,
1853 gl1Exceptions.CommunicationError,
1854 ConnectionError,
1855 ) as e:
1856 self._format_exception(e)
1858 def __wait_for_vm(self, vm_id, status):
1859 """wait until vm is in the desired status and return True.
1860 If the VM gets in ERROR status, return false.
1861 If the timeout is reached generate an exception"""
1862 elapsed_time = 0
1863 while elapsed_time < server_timeout:
1864 vm_status = self.nova.servers.get(vm_id).status
1866 if vm_status == status:
1867 return True
1869 if vm_status == "ERROR":
1870 return False
1872 time.sleep(5)
1873 elapsed_time += 5
1875 # if we exceeded the timeout rollback
1876 if elapsed_time >= server_timeout:
1877 raise vimconn.VimConnException(
1878 "Timeout waiting for instance " + vm_id + " to get " + status,
1879 http_code=vimconn.HTTP_Request_Timeout,
1880 )
1882 def _get_openstack_availablity_zones(self):
1883 """
1884 Get from openstack availability zones available
1885 :return:
1886 """
1887 try:
1888 openstack_availability_zone = self.nova.availability_zones.list()
1889 openstack_availability_zone = [
1890 str(zone.zoneName)
1891 for zone in openstack_availability_zone
1892 if zone.zoneName != "internal"
1893 ]
1895 return openstack_availability_zone
1896 except Exception:
1897 return None
1899 def _set_availablity_zones(self):
1900 """
1901 Set vim availablity zone
1902 :return:
1903 """
1904 if "availability_zone" in self.config:
1905 vim_availability_zones = self.config.get("availability_zone")
1907 if isinstance(vim_availability_zones, str):
1908 self.availability_zone = [vim_availability_zones]
1909 elif isinstance(vim_availability_zones, list):
1910 self.availability_zone = vim_availability_zones
1911 else:
1912 self.availability_zone = self._get_openstack_availablity_zones()
1913 if "storage_availability_zone" in self.config:
1914 self.storage_availability_zone = self.config.get(
1915 "storage_availability_zone"
1916 )
1918 def _get_vm_availability_zone(
1919 self, availability_zone_index, availability_zone_list
1920 ):
1921 """
1922 Return thge availability zone to be used by the created VM.
1923 :return: The VIM availability zone to be used or None
1924 """
1925 if availability_zone_index is None:
1926 if not self.config.get("availability_zone"):
1927 return None
1928 elif isinstance(self.config.get("availability_zone"), str):
1929 return self.config["availability_zone"]
1930 else:
1931 # TODO consider using a different parameter at config for default AV and AV list match
1932 return self.config["availability_zone"][0]
1934 vim_availability_zones = self.availability_zone
1935 # check if VIM offer enough availability zones describe in the VNFD
1936 if vim_availability_zones and len(availability_zone_list) <= len(
1937 vim_availability_zones
1938 ):
1939 # check if all the names of NFV AV match VIM AV names
1940 match_by_index = False
1941 for av in availability_zone_list:
1942 if av not in vim_availability_zones:
1943 match_by_index = True
1944 break
1946 if match_by_index:
1947 return vim_availability_zones[availability_zone_index]
1948 else:
1949 return availability_zone_list[availability_zone_index]
1950 else:
1951 raise vimconn.VimConnConflictException(
1952 "No enough availability zones at VIM for this deployment"
1953 )
1955 def _prepare_port_dict_security_groups(
1956 self, net: dict, port_dict: dict, security_group_name=None
1957 ) -> None:
1958 """Fill up the security_groups in the port_dict.
1960 Args:
1961 net (dict): Network details
1962 port_dict (dict): Port details
1964 """
1965 if (
1966 self.config.get("security_groups")
1967 and net.get("port_security") is not False
1968 and not self.config.get("no_port_security_extension")
1969 ):
1970 if not self.security_groups_id:
1971 self._get_ids_from_name()
1973 port_dict["security_groups"] = self.security_groups_id
1975 if security_group_name is not None:
1976 self._get_ids_from_name(security_group_name)
1977 port_dict["security_groups"] = self.security_groups_id
1979 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1980 """Fill up the network binding depending on network type in the port_dict.
1982 Args:
1983 net (dict): Network details
1984 port_dict (dict): Port details
1986 """
1987 if not net.get("type"):
1988 raise vimconn.VimConnException("Type is missing in the network details.")
1990 if net["type"] == "virtual":
1991 pass
1993 # For VF
1994 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1995 port_dict["binding:vnic_type"] = "direct"
1997 # VIO specific Changes
1998 if self.vim_type == "VIO":
1999 # Need to create port with port_security_enabled = False and no-security-groups
2000 port_dict["port_security_enabled"] = False
2001 port_dict["provider_security_groups"] = []
2002 port_dict["security_groups"] = []
2004 else:
2005 # For PT PCI-PASSTHROUGH
2006 port_dict["binding:vnic_type"] = "direct-physical"
2008 @staticmethod
2009 def _set_fixed_ip(new_port: dict, net: dict) -> None:
2010 """Set the "ip" parameter in net dictionary.
2012 Args:
2013 new_port (dict): New created port
2014 net (dict): Network details
2016 """
2017 fixed_ips = new_port["port"].get("fixed_ips")
2019 if fixed_ips:
2020 net["ip"] = fixed_ips[0].get("ip_address")
2021 else:
2022 net["ip"] = None
2024 @staticmethod
2025 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
2026 """Fill up the mac_address and fixed_ips in port_dict.
2028 Args:
2029 net (dict): Network details
2030 port_dict (dict): Port details
2032 """
2033 if net.get("mac_address"):
2034 port_dict["mac_address"] = net["mac_address"]
2036 ip_dual_list = []
2037 if ip_list := net.get("ip_address"):
2038 if not isinstance(ip_list, list):
2039 ip_list = [ip_list]
2040 for ip in ip_list:
2041 ip_dict = {"ip_address": ip}
2042 ip_dual_list.append(ip_dict)
2043 port_dict["fixed_ips"] = ip_dual_list
2044 # TODO add "subnet_id": <subnet_id>
2046 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
2047 """Create new port using neutron.
2049 Args:
2050 port_dict (dict): Port details
2051 created_items (dict): All created items
2052 net (dict): Network details
2054 Returns:
2055 new_port (dict): New created port
2057 """
2058 new_port = self.neutron.create_port({"port": port_dict})
2059 created_items["port:" + str(new_port["port"]["id"])] = True
2060 net["mac_address"] = new_port["port"]["mac_address"]
2061 net["vim_id"] = new_port["port"]["id"]
2063 return new_port
2065 def _create_port(
2066 self, net: dict, name: str, created_items: dict, security_group_name=None
2067 ) -> Tuple[dict, dict]:
2068 """Create port using net details.
2070 Args:
2071 net (dict): Network details
2072 name (str): Name to be used as network name if net dict does not include name
2073 created_items (dict): All created items
2075 Returns:
2076 new_port, port New created port, port dictionary
2078 """
2080 port_dict = {
2081 "network_id": net["net_id"],
2082 "name": net.get("name"),
2083 "admin_state_up": True,
2084 }
2086 if not port_dict["name"]:
2087 port_dict["name"] = name
2089 self._prepare_port_dict_security_groups(net, port_dict, security_group_name)
2091 self._prepare_port_dict_binding(net, port_dict)
2093 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2095 new_port = self._create_new_port(port_dict, created_items, net)
2097 vimconnector._set_fixed_ip(new_port, net)
2099 port = {"port-id": new_port["port"]["id"]}
2101 if float(self.nova.api_version.get_string()) >= 2.32:
2102 port["tag"] = new_port["port"]["name"]
2104 return new_port, port
2106 def _prepare_network_for_vminstance(
2107 self,
2108 name: str,
2109 net_list: list,
2110 created_items: dict,
2111 net_list_vim: list,
2112 external_network: list,
2113 no_secured_ports: list,
2114 security_group_name=None,
2115 ) -> None:
2116 """Create port and fill up net dictionary for new VM instance creation.
2118 Args:
2119 name (str): Name of network
2120 net_list (list): List of networks
2121 created_items (dict): All created items belongs to a VM
2122 net_list_vim (list): List of ports
2123 external_network (list): List of external-networks
2124 no_secured_ports (list): Port security disabled ports
2125 """
2127 self._reload_connection()
2129 for net in net_list:
2130 # Skip non-connected iface
2131 if not net.get("net_id"):
2132 continue
2134 new_port, port = self._create_port(
2135 net, name, created_items, security_group_name
2136 )
2138 net_list_vim.append(port)
2140 if net.get("floating_ip", False):
2141 net["exit_on_floating_ip_error"] = True
2142 external_network.append(net)
2144 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2145 net["exit_on_floating_ip_error"] = False
2146 external_network.append(net)
2147 net["floating_ip"] = self.config.get("use_floating_ip")
2149 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2150 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2151 if net.get("port_security") is False and not self.config.get(
2152 "no_port_security_extension"
2153 ):
2154 no_secured_ports.append(
2155 (
2156 new_port["port"]["id"],
2157 net.get("port_security_disable_strategy"),
2158 )
2159 )
2161 def _prepare_persistent_root_volumes(
2162 self,
2163 name: str,
2164 storage_av_zone: list,
2165 disk: dict,
2166 base_disk_index: int,
2167 block_device_mapping: dict,
2168 existing_vim_volumes: list,
2169 created_items: dict,
2170 ) -> Optional[str]:
2171 """Prepare persistent root volumes for new VM instance.
2173 Args:
2174 name (str): Name of VM instance
2175 storage_av_zone (list): Storage of availability zones
2176 disk (dict): Disk details
2177 base_disk_index (int): Disk index
2178 block_device_mapping (dict): Block device details
2179 existing_vim_volumes (list): Existing disk details
2180 created_items (dict): All created items belongs to VM
2182 Returns:
2183 boot_volume_id (str): ID of boot volume
2185 """
2186 self.logger.debug("Preparing root persistent volumes")
2187 # Disk may include only vim_volume_id or only vim_id."
2188 # Use existing persistent root volume finding with volume_id or vim_id
2189 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2190 if disk.get(key_id):
2191 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2192 existing_vim_volumes.append({"id": disk[key_id]})
2193 else:
2194 # Create persistent root volume
2195 volume = self.cinder.volumes.create(
2196 size=disk["size"],
2197 name=name + "vd" + chr(base_disk_index),
2198 imageRef=disk["image_id"],
2199 # Make sure volume is in the same AZ as the VM to be attached to
2200 availability_zone=storage_av_zone,
2201 )
2202 boot_volume_id = volume.id
2203 self.update_block_device_mapping(
2204 volume=volume,
2205 block_device_mapping=block_device_mapping,
2206 base_disk_index=base_disk_index,
2207 disk=disk,
2208 created_items=created_items,
2209 )
2211 return boot_volume_id
2213 @staticmethod
2214 def update_block_device_mapping(
2215 volume: object,
2216 block_device_mapping: dict,
2217 base_disk_index: int,
2218 disk: dict,
2219 created_items: dict,
2220 ) -> None:
2221 """Add volume information to block device mapping dict.
2222 Args:
2223 volume (object): Created volume object
2224 block_device_mapping (dict): Block device details
2225 base_disk_index (int): Disk index
2226 disk (dict): Disk details
2227 created_items (dict): All created items belongs to VM
2228 """
2229 if not volume:
2230 raise vimconn.VimConnException("Volume is empty.")
2232 if not hasattr(volume, "id"):
2233 raise vimconn.VimConnException(
2234 "Created volume is not valid, does not have id attribute."
2235 )
2237 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2238 if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
2239 return
2240 volume_txt = "volume:" + str(volume.id)
2241 if disk.get("keep"):
2242 volume_txt += ":keep"
2243 created_items[volume_txt] = True
2245 @catch_any_exception
2246 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2247 self.logger.debug("Creating new shared volume")
2248 availability_zone = (
2249 self.storage_availability_zone
2250 if self.storage_availability_zone
2251 else self.vm_av_zone
2252 )
2253 volume = self.cinder.volumes.create(
2254 size=shared_volume_data["size"],
2255 name=shared_volume_data["name"],
2256 volume_type="multiattach",
2257 availability_zone=availability_zone,
2258 )
2259 return volume.name, volume.id
2261 def _prepare_shared_volumes(
2262 self,
2263 name: str,
2264 disk: dict,
2265 base_disk_index: int,
2266 block_device_mapping: dict,
2267 existing_vim_volumes: list,
2268 created_items: dict,
2269 ):
2270 self.logger.debug("Preparing shared volumes")
2271 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2272 if volumes.get(disk["name"]):
2273 sv_id = volumes[disk["name"]]
2274 max_retries = 3
2275 vol_status = ""
2276 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2277 while max_retries:
2278 max_retries -= 1
2279 volume = self.cinder.volumes.get(sv_id)
2280 vol_status = volume.status
2281 if volume.status not in ("in-use", "available"):
2282 time.sleep(5)
2283 continue
2284 self.update_block_device_mapping(
2285 volume=volume,
2286 block_device_mapping=block_device_mapping,
2287 base_disk_index=base_disk_index,
2288 disk=disk,
2289 created_items=created_items,
2290 )
2291 return
2292 raise vimconn.VimConnException(
2293 "Shared volume is not prepared, status is: {}".format(vol_status),
2294 http_code=vimconn.HTTP_Internal_Server_Error,
2295 )
2297 def _prepare_non_root_persistent_volumes(
2298 self,
2299 name: str,
2300 disk: dict,
2301 storage_av_zone: list,
2302 block_device_mapping: dict,
2303 base_disk_index: int,
2304 existing_vim_volumes: list,
2305 created_items: dict,
2306 ) -> None:
2307 """Prepare persistent volumes for new VM instance.
2309 Args:
2310 name (str): Name of VM instance
2311 disk (dict): Disk details
2312 storage_av_zone (list): Storage of availability zones
2313 block_device_mapping (dict): Block device details
2314 base_disk_index (int): Disk index
2315 existing_vim_volumes (list): Existing disk details
2316 created_items (dict): All created items belongs to VM
2317 """
2318 # Non-root persistent volumes
2319 # Disk may include only vim_volume_id or only vim_id."
2320 self.logger.debug("Preparing non-root persistent volumes")
2321 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2322 if disk.get(key_id):
2323 # Use existing persistent volume
2324 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2325 existing_vim_volumes.append({"id": disk[key_id]})
2326 else:
2327 volume_name = f"{name}vd{chr(base_disk_index)}"
2328 volume = self.cinder.volumes.create(
2329 size=disk["size"],
2330 name=volume_name,
2331 # Make sure volume is in the same AZ as the VM to be attached to
2332 availability_zone=storage_av_zone,
2333 )
2334 self.update_block_device_mapping(
2335 volume=volume,
2336 block_device_mapping=block_device_mapping,
2337 base_disk_index=base_disk_index,
2338 disk=disk,
2339 created_items=created_items,
2340 )
2342 def _wait_for_created_volumes_availability(
2343 self, elapsed_time: int, created_items: dict
2344 ) -> Optional[int]:
2345 """Wait till created volumes become available.
2347 Args:
2348 elapsed_time (int): Passed time while waiting
2349 created_items (dict): All created items belongs to VM
2351 Returns:
2352 elapsed_time (int): Time spent while waiting
2354 """
2355 self.logger.debug("Waiting for all created volumes to become available")
2356 while elapsed_time < volume_timeout:
2357 self.logger.debug("Checking disk availability for created volumes")
2358 for created_item in created_items:
2359 v, volume_id = (
2360 created_item.split(":")[0],
2361 created_item.split(":")[1],
2362 )
2363 if v == "volume":
2364 self.logger.debug(f"Checking volume: {volume_id}")
2365 volume = self.cinder.volumes.get(volume_id)
2366 if (
2367 volume.volume_type == "multiattach"
2368 and volume.status == "in-use"
2369 ):
2370 return elapsed_time
2371 elif volume.status != "available":
2372 break
2373 else:
2374 # All ready: break from while
2375 break
2377 time.sleep(5)
2378 elapsed_time += 5
2380 return elapsed_time
2382 def _wait_for_existing_volumes_availability(
2383 self, elapsed_time: int, existing_vim_volumes: list
2384 ) -> Optional[int]:
2385 """Wait till existing volumes become available.
2387 Args:
2388 elapsed_time (int): Passed time while waiting
2389 existing_vim_volumes (list): Existing volume details
2391 Returns:
2392 elapsed_time (int): Time spent while waiting
2394 """
2396 self.logger.debug("Waiting for all existing volumes to become available")
2397 while elapsed_time < volume_timeout:
2398 self.logger.debug("Checking disk availability for existing volumes")
2399 for volume in existing_vim_volumes:
2400 self.logger.debug(f"Checking existing volume: {volume}")
2401 v = self.cinder.volumes.get(volume["id"])
2402 if v.volume_type == "multiattach" and v.status == "in-use":
2403 return elapsed_time
2404 elif v.status != "available":
2405 break
2406 else: # all ready: break from while
2407 break
2409 time.sleep(5)
2410 elapsed_time += 5
2412 return elapsed_time
2414 def _prepare_disk_for_vminstance(
2415 self,
2416 name: str,
2417 existing_vim_volumes: list,
2418 created_items: dict,
2419 storage_av_zone: list,
2420 block_device_mapping: dict,
2421 disk_list: list = None,
2422 ) -> None:
2423 """Prepare all volumes for new VM instance.
2425 Args:
2426 name (str): Name of Instance
2427 existing_vim_volumes (list): List of existing volumes
2428 created_items (dict): All created items belongs to VM
2429 storage_av_zone (list): Storage availability zone
2430 block_device_mapping (dict): Block devices to be attached to VM
2431 disk_list (list): List of disks
2433 """
2434 # Create additional volumes in case these are present in disk_list
2435 self.logger.debug("Preparing disks for VM instances")
2436 base_disk_index = ord("b")
2437 boot_volume_id = None
2438 elapsed_time = 0
2439 for disk in disk_list:
2440 self.logger.debug(f"Disk: {disk}")
2441 if "image_id" in disk:
2442 # Root persistent volume
2443 base_disk_index = ord("a")
2444 boot_volume_id = self._prepare_persistent_root_volumes(
2445 name=name,
2446 storage_av_zone=storage_av_zone,
2447 disk=disk,
2448 base_disk_index=base_disk_index,
2449 block_device_mapping=block_device_mapping,
2450 existing_vim_volumes=existing_vim_volumes,
2451 created_items=created_items,
2452 )
2453 elif disk.get("multiattach"):
2454 self._prepare_shared_volumes(
2455 name=name,
2456 disk=disk,
2457 base_disk_index=base_disk_index,
2458 block_device_mapping=block_device_mapping,
2459 existing_vim_volumes=existing_vim_volumes,
2460 created_items=created_items,
2461 )
2462 else:
2463 # Non-root persistent volume
2464 self._prepare_non_root_persistent_volumes(
2465 name=name,
2466 disk=disk,
2467 storage_av_zone=storage_av_zone,
2468 block_device_mapping=block_device_mapping,
2469 base_disk_index=base_disk_index,
2470 existing_vim_volumes=existing_vim_volumes,
2471 created_items=created_items,
2472 )
2473 base_disk_index += 1
2475 # Wait until created volumes are with status available
2476 elapsed_time = self._wait_for_created_volumes_availability(
2477 elapsed_time, created_items
2478 )
2479 # Wait until existing volumes in vim are with status available
2480 elapsed_time = self._wait_for_existing_volumes_availability(
2481 elapsed_time, existing_vim_volumes
2482 )
2483 # If we exceeded the timeout rollback
2484 if elapsed_time >= volume_timeout:
2485 raise vimconn.VimConnException(
2486 "Timeout creating volumes for instance " + name,
2487 http_code=vimconn.HTTP_Request_Timeout,
2488 )
2489 if boot_volume_id:
2490 self.cinder.volumes.set_bootable(boot_volume_id, True)
2492 def _find_the_external_network_for_floating_ip(self):
2493 """Get the external network ip in order to create floating IP.
2495 Returns:
2496 pool_id (str): External network pool ID
2498 """
2500 # Find the external network
2501 external_nets = list()
2503 for net in self.neutron.list_networks()["networks"]:
2504 if net["router:external"]:
2505 external_nets.append(net)
2507 if len(external_nets) == 0:
2508 raise vimconn.VimConnException(
2509 "Cannot create floating_ip automatically since "
2510 "no external network is present",
2511 http_code=vimconn.HTTP_Conflict,
2512 )
2514 if len(external_nets) > 1:
2515 raise vimconn.VimConnException(
2516 "Cannot create floating_ip automatically since "
2517 "multiple external networks are present",
2518 http_code=vimconn.HTTP_Conflict,
2519 )
2521 # Pool ID
2522 return external_nets[0].get("id")
2524 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2525 """Trigger neutron to create a new floating IP using external network ID.
2527 Args:
2528 param (dict): Input parameters to create a floating IP
2529 created_items (dict): All created items belongs to new VM instance
2531 Raises:
2533 VimConnException
2534 """
2535 try:
2536 self.logger.debug("Creating floating IP")
2537 new_floating_ip = self.neutron.create_floatingip(param)
2538 free_floating_ip = new_floating_ip["floatingip"]["id"]
2539 created_items["floating_ip:" + str(free_floating_ip)] = True
2541 except Exception as e:
2542 raise vimconn.VimConnException(
2543 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2544 http_code=vimconn.HTTP_Conflict,
2545 )
2547 def _create_floating_ip(
2548 self, floating_network: dict, server: object, created_items: dict
2549 ) -> None:
2550 """Get the available Pool ID and create a new floating IP.
2552 Args:
2553 floating_network (dict): Dict including external network ID
2554 server (object): Server object
2555 created_items (dict): All created items belongs to new VM instance
2557 """
2559 # Pool_id is available
2560 if (
2561 isinstance(floating_network["floating_ip"], str)
2562 and floating_network["floating_ip"].lower() != "true"
2563 ):
2564 pool_id = floating_network["floating_ip"]
2566 # Find the Pool_id
2567 else:
2568 pool_id = self._find_the_external_network_for_floating_ip()
2570 param = {
2571 "floatingip": {
2572 "floating_network_id": pool_id,
2573 "tenant_id": server.tenant_id,
2574 }
2575 }
2577 self._neutron_create_float_ip(param, created_items)
2579 def _find_floating_ip(
2580 self,
2581 server: object,
2582 floating_ips: list,
2583 floating_network: dict,
2584 ) -> Optional[str]:
2585 """Find the available free floating IPs if there are.
2587 Args:
2588 server (object): Server object
2589 floating_ips (list): List of floating IPs
2590 floating_network (dict): Details of floating network such as ID
2592 Returns:
2593 free_floating_ip (str): Free floating ip address
2595 """
2596 for fip in floating_ips:
2597 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2598 continue
2600 if isinstance(floating_network["floating_ip"], str):
2601 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2602 continue
2604 return fip["id"]
2606 def _assign_floating_ip(
2607 self, free_floating_ip: str, floating_network: dict
2608 ) -> Dict:
2609 """Assign the free floating ip address to port.
2611 Args:
2612 free_floating_ip (str): Floating IP to be assigned
2613 floating_network (dict): ID of floating network
2615 Returns:
2616 fip (dict) (dict): Floating ip details
2618 """
2619 # The vim_id key contains the neutron.port_id
2620 self.neutron.update_floatingip(
2621 free_floating_ip,
2622 {"floatingip": {"port_id": floating_network["vim_id"]}},
2623 )
2624 # For race condition ensure not re-assigned to other VM after 5 seconds
2625 time.sleep(5)
2627 return self.neutron.show_floatingip(free_floating_ip)
2629 def _get_free_floating_ip(
2630 self, server: object, floating_network: dict
2631 ) -> Optional[str]:
2632 """Get the free floating IP address.
2634 Args:
2635 server (object): Server Object
2636 floating_network (dict): Floating network details
2638 Returns:
2639 free_floating_ip (str): Free floating ip addr
2641 """
2643 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2645 # Randomize
2646 random.shuffle(floating_ips)
2648 return self._find_floating_ip(server, floating_ips, floating_network)
2650 def _prepare_external_network_for_vminstance(
2651 self,
2652 external_network: list,
2653 server: object,
2654 created_items: dict,
2655 vm_start_time: float,
2656 ) -> None:
2657 """Assign floating IP address for VM instance.
2659 Args:
2660 external_network (list): ID of External network
2661 server (object): Server Object
2662 created_items (dict): All created items belongs to new VM instance
2663 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2665 Raises:
2666 VimConnException
2668 """
2669 for floating_network in external_network:
2670 try:
2671 assigned = False
2672 floating_ip_retries = 3
2673 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2674 # several times
2675 while not assigned:
2676 free_floating_ip = self._get_free_floating_ip(
2677 server, floating_network
2678 )
2680 if not free_floating_ip:
2681 self._create_floating_ip(
2682 floating_network, server, created_items
2683 )
2685 try:
2686 # For race condition ensure not already assigned
2687 fip = self.neutron.show_floatingip(free_floating_ip)
2689 if fip["floatingip"].get("port_id"):
2690 continue
2692 # Assign floating ip
2693 fip = self._assign_floating_ip(
2694 free_floating_ip, floating_network
2695 )
2697 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2698 self.logger.warning(
2699 "floating_ip {} re-assigned to other port".format(
2700 free_floating_ip
2701 )
2702 )
2703 continue
2705 self.logger.debug(
2706 "Assigned floating_ip {} to VM {}".format(
2707 free_floating_ip, server.id
2708 )
2709 )
2711 assigned = True
2713 except Exception as e:
2714 # Openstack need some time after VM creation to assign an IP. So retry if fails
2715 vm_status = self.nova.servers.get(server.id).status
2717 if vm_status not in ("ACTIVE", "ERROR"):
2718 if time.time() - vm_start_time < server_timeout:
2719 time.sleep(5)
2720 continue
2721 elif floating_ip_retries > 0:
2722 floating_ip_retries -= 1
2723 continue
2725 raise vimconn.VimConnException(
2726 "Cannot create floating_ip: {} {}".format(
2727 type(e).__name__, e
2728 ),
2729 http_code=vimconn.HTTP_Conflict,
2730 )
2732 except Exception as e:
2733 if not floating_network["exit_on_floating_ip_error"]:
2734 self.logger.error("Cannot create floating_ip. %s", str(e))
2735 continue
2737 raise
2739 def _update_port_security_for_vminstance(
2740 self,
2741 no_secured_ports: list,
2742 server: object,
2743 ) -> None:
2744 """Updates the port security according to no_secured_ports list.
2746 Args:
2747 no_secured_ports (list): List of ports that security will be disabled
2748 server (object): Server Object
2750 Raises:
2751 VimConnException
2753 """
2754 # Wait until the VM is active and then disable the port-security
2755 if no_secured_ports:
2756 self.__wait_for_vm(server.id, "ACTIVE")
2758 for port in no_secured_ports:
2759 port_update = {
2760 "port": {"port_security_enabled": False, "security_groups": None}
2761 }
2763 if port[1] == "allow-address-pairs":
2764 port_update = {
2765 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2766 }
2768 try:
2769 self.neutron.update_port(port[0], port_update)
2771 except Exception:
2772 raise vimconn.VimConnException(
2773 "It was not possible to disable port security for port {}".format(
2774 port[0]
2775 )
2776 )
2778 def new_vminstance(
2779 self,
2780 name: str,
2781 description: str,
2782 start: bool,
2783 image_id: str,
2784 flavor_id: str,
2785 affinity_group_list: list,
2786 net_list: list,
2787 cloud_config=None,
2788 disk_list=None,
2789 availability_zone_index=None,
2790 availability_zone_list=None,
2791 security_group_name=None,
2792 ) -> tuple:
2793 """Adds a VM instance to VIM.
2795 Args:
2796 name (str): name of VM
2797 description (str): description
2798 start (bool): indicates if VM must start or boot in pause mode. Ignored
2799 image_id (str) image uuid
2800 flavor_id (str) flavor uuid
2801 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2802 net_list (list): list of interfaces, each one is a dictionary with:
2803 name: name of network
2804 net_id: network uuid to connect
2805 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2806 model: interface model, ignored #TODO
2807 mac_address: used for SR-IOV ifaces #TODO for other types
2808 use: 'data', 'bridge', 'mgmt'
2809 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2810 vim_id: filled/added by this function
2811 floating_ip: True/False (or it can be None)
2812 port_security: True/False
2813 cloud_config (dict): (optional) dictionary with:
2814 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2815 users: (optional) list of users to be inserted, each item is a dict with:
2816 name: (mandatory) user name,
2817 key-pairs: (optional) list of strings with the public key to be inserted to the user
2818 user-data: (optional) string is a text script to be passed directly to cloud-init
2819 config-files: (optional). List of files to be transferred. Each item is a dict with:
2820 dest: (mandatory) string with the destination absolute path
2821 encoding: (optional, by default text). Can be one of:
2822 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2823 content : (mandatory) string with the content of the file
2824 permissions: (optional) string with file permissions, typically octal notation '0644'
2825 owner: (optional) file owner, string with the format 'owner:group'
2826 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2827 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2828 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2829 size: (mandatory) string with the size of the disk in GB
2830 vim_id: (optional) should use this existing volume id
2831 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2832 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2833 availability_zone_index is None
2834 #TODO ip, security groups
2836 Returns:
2837 A tuple with the instance identifier and created_items or raises an exception on error
2838 created_items can be None or a dictionary where this method can include key-values that will be passed to
2839 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2840 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2841 as not present.
2843 """
2844 self.logger.debug(
2845 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2846 image_id,
2847 flavor_id,
2848 str(net_list),
2849 )
2850 server = None
2851 created_items = {}
2852 net_list_vim = []
2853 # list of external networks to be connected to instance, later on used to create floating_ip
2854 external_network = []
2855 # List of ports with port-security disabled
2856 no_secured_ports = []
2857 block_device_mapping = {}
2858 existing_vim_volumes = []
2859 server_group_id = None
2860 scheduller_hints = {}
2862 try:
2863 # Check the Openstack Connection
2864 self._reload_connection()
2866 # Prepare network list
2867 self._prepare_network_for_vminstance(
2868 name=name,
2869 net_list=net_list,
2870 created_items=created_items,
2871 net_list_vim=net_list_vim,
2872 external_network=external_network,
2873 no_secured_ports=no_secured_ports,
2874 security_group_name=security_group_name,
2875 )
2877 # Cloud config
2878 config_drive, userdata = self._create_user_data(cloud_config)
2880 # Get availability Zone
2881 self.vm_av_zone = self._get_vm_availability_zone(
2882 availability_zone_index, availability_zone_list
2883 )
2885 storage_av_zone = (
2886 self.storage_availability_zone
2887 if self.storage_availability_zone
2888 else self.vm_av_zone
2889 )
2891 if disk_list:
2892 # Prepare disks
2893 self._prepare_disk_for_vminstance(
2894 name=name,
2895 existing_vim_volumes=existing_vim_volumes,
2896 created_items=created_items,
2897 storage_av_zone=storage_av_zone,
2898 block_device_mapping=block_device_mapping,
2899 disk_list=disk_list,
2900 )
2902 if affinity_group_list:
2903 # Only first id on the list will be used. Openstack restriction
2904 server_group_id = affinity_group_list[0]["affinity_group_id"]
2905 scheduller_hints["group"] = server_group_id
2907 self.logger.debug(
2908 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2909 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2910 "block_device_mapping={}, server_group={})".format(
2911 name,
2912 image_id,
2913 flavor_id,
2914 net_list_vim,
2915 self.config.get("security_groups"),
2916 self.vm_av_zone,
2917 self.config.get("keypair"),
2918 userdata,
2919 config_drive,
2920 block_device_mapping,
2921 server_group_id,
2922 )
2923 )
2924 # Create VM
2925 server = self.nova.servers.create(
2926 name=name,
2927 image=image_id,
2928 flavor=flavor_id,
2929 nics=net_list_vim,
2930 security_groups=self.config.get("security_groups"),
2931 # TODO remove security_groups in future versions. Already at neutron port
2932 availability_zone=self.vm_av_zone,
2933 key_name=self.config.get("keypair"),
2934 userdata=userdata,
2935 config_drive=config_drive,
2936 block_device_mapping=block_device_mapping,
2937 scheduler_hints=scheduller_hints,
2938 )
2940 vm_start_time = time.time()
2942 self._update_port_security_for_vminstance(no_secured_ports, server)
2944 self._prepare_external_network_for_vminstance(
2945 external_network=external_network,
2946 server=server,
2947 created_items=created_items,
2948 vm_start_time=vm_start_time,
2949 )
2951 return server.id, created_items
2953 except Exception as e:
2954 server_id = None
2955 if server:
2956 server_id = server.id
2958 try:
2959 created_items = self.remove_keep_tag_from_persistent_volumes(
2960 created_items
2961 )
2963 self.delete_vminstance(server_id, created_items)
2965 except Exception as e2:
2966 self.logger.error("new_vminstance rollback fail {}".format(e2))
2968 self._format_exception(e)
2970 @staticmethod
2971 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2972 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2974 Args:
2975 created_items (dict): All created items belongs to VM
2977 Returns:
2978 updated_created_items (dict): Dict which does not include keep flag for volumes.
2980 """
2981 return {
2982 key.replace(":keep", ""): value for (key, value) in created_items.items()
2983 }
2985 def get_vminstance(self, vm_id):
2986 """Returns the VM instance information from VIM"""
2987 return self._find_nova_server(vm_id)
2989 @catch_any_exception
2990 def get_vminstance_console(self, vm_id, console_type="vnc"):
2991 """
2992 Get a console for the virtual machine
2993 Params:
2994 vm_id: uuid of the VM
2995 console_type, can be:
2996 "novnc" (by default), "xvpvnc" for VNC types,
2997 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2998 Returns dict with the console parameters:
2999 protocol: ssh, ftp, http, https, ...
3000 server: usually ip address
3001 port: the http, ssh, ... port
3002 suffix: extra text, e.g. the http path and query string
3003 """
3004 self.logger.debug("Getting VM CONSOLE from VIM")
3005 self._reload_connection()
3006 server = self.nova.servers.find(id=vm_id)
3008 if console_type is None or console_type == "novnc":
3009 console_dict = server.get_vnc_console("novnc")
3010 elif console_type == "xvpvnc":
3011 console_dict = server.get_vnc_console(console_type)
3012 elif console_type == "rdp-html5":
3013 console_dict = server.get_rdp_console(console_type)
3014 elif console_type == "spice-html5":
3015 console_dict = server.get_spice_console(console_type)
3016 else:
3017 raise vimconn.VimConnException(
3018 "console type '{}' not allowed".format(console_type),
3019 http_code=vimconn.HTTP_Bad_Request,
3020 )
3022 console_dict1 = console_dict.get("console")
3024 if console_dict1:
3025 console_url = console_dict1.get("url")
3027 if console_url:
3028 # parse console_url
3029 protocol_index = console_url.find("//")
3030 suffix_index = (
3031 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3032 )
3033 port_index = (
3034 console_url[protocol_index + 2 : suffix_index].find(":")
3035 + protocol_index
3036 + 2
3037 )
3039 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3040 return (
3041 -vimconn.HTTP_Internal_Server_Error,
3042 "Unexpected response from VIM",
3043 )
3045 console_dict = {
3046 "protocol": console_url[0:protocol_index],
3047 "server": console_url[protocol_index + 2 : port_index],
3048 "port": console_url[port_index:suffix_index],
3049 "suffix": console_url[suffix_index + 1 :],
3050 }
3051 protocol_index += 2
3053 return console_dict
3054 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
3056 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
3057 """Neutron delete ports by id.
3058 Args:
3059 k_id (str): Port id in the VIM
3060 """
3061 try:
3062 self.neutron.delete_port(k_id)
3064 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3065 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
3066 # If there is connection error, raise.
3067 self._format_exception(e)
3068 except Exception as e:
3069 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
3071 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
3072 """Cinder delete volume by id.
3073 Args:
3074 shared_volume_vim_id (str): ID of shared volume in VIM
3075 """
3076 elapsed_time = 0
3077 try:
3078 while elapsed_time < server_timeout:
3079 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3080 if vol_status == "available":
3081 self.cinder.volumes.delete(shared_volume_vim_id)
3082 return True
3084 time.sleep(5)
3085 elapsed_time += 5
3087 if elapsed_time >= server_timeout:
3088 raise vimconn.VimConnException(
3089 "Timeout waiting for volume "
3090 + shared_volume_vim_id
3091 + " to be available",
3092 http_code=vimconn.HTTP_Request_Timeout,
3093 )
3095 except Exception as e:
3096 self.logger.error(
3097 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3098 )
3099 self._format_exception(e)
3101 def _delete_volumes_by_id_wth_cinder(
3102 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3103 ) -> bool:
3104 """Cinder delete volume by id.
3105 Args:
3106 k (str): Full item name in created_items
3107 k_id (str): ID of floating ip in VIM
3108 volumes_to_hold (list): Volumes not to delete
3109 created_items (dict): All created items belongs to VM
3110 """
3111 try:
3112 if k_id in volumes_to_hold:
3113 return False
3115 if self.cinder.volumes.get(k_id).status != "available":
3116 return True
3118 else:
3119 self.cinder.volumes.delete(k_id)
3120 created_items[k] = None
3122 except (cExceptions.ConnectionError, ConnectionError) as e:
3123 self.logger.error(
3124 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3125 )
3126 self._format_exception(e)
3127 except Exception as e:
3128 self.logger.error(
3129 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3130 )
3132 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3133 """Neutron delete floating ip by id.
3134 Args:
3135 k (str): Full item name in created_items
3136 k_id (str): ID of floating ip in VIM
3137 created_items (dict): All created items belongs to VM
3138 """
3139 try:
3140 self.neutron.delete_floatingip(k_id)
3141 created_items[k] = None
3143 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3144 self.logger.error(
3145 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3146 )
3147 self._format_exception(e)
3148 except Exception as e:
3149 self.logger.error(
3150 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3151 )
3153 @staticmethod
3154 def _get_item_name_id(k: str) -> Tuple[str, str]:
3155 k_item, _, k_id = k.partition(":")
3156 return k_item, k_id
3158 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3159 """Delete VM ports attached to the networks before deleting virtual machine.
3160 Args:
3161 created_items (dict): All created items belongs to VM
3162 """
3164 for k, v in created_items.items():
3165 if not v: # skip already deleted
3166 continue
3168 try:
3169 k_item, k_id = self._get_item_name_id(k)
3170 if k_item == "port":
3171 self._delete_ports_by_id_wth_neutron(k_id)
3173 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3174 self.logger.error(
3175 "Error deleting port: {}: {}".format(type(e).__name__, e)
3176 )
3177 self._format_exception(e)
3178 except Exception as e:
3179 self.logger.error(
3180 "Error deleting port: {}: {}".format(type(e).__name__, e)
3181 )
3183 def _delete_created_items(
3184 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3185 ) -> bool:
3186 """Delete Volumes and floating ip if they exist in created_items."""
3187 for k, v in created_items.items():
3188 if not v: # skip already deleted
3189 continue
3191 try:
3192 k_item, k_id = self._get_item_name_id(k)
3193 if k_item == "volume":
3194 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3195 k, k_id, volumes_to_hold, created_items
3196 )
3198 if unavailable_vol:
3199 keep_waiting = True
3201 elif k_item == "floating_ip":
3202 self._delete_floating_ip_by_id(k, k_id, created_items)
3204 except (
3205 cExceptions.ConnectionError,
3206 neExceptions.ConnectionFailed,
3207 ConnectionError,
3208 AttributeError,
3209 TypeError,
3210 ) as e:
3211 self.logger.error("Error deleting {}: {}".format(k, e))
3212 self._format_exception(e)
3214 except Exception as e:
3215 self.logger.error("Error deleting {}: {}".format(k, e))
3217 return keep_waiting
3219 @staticmethod
3220 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3221 """Remove the volumes which has key flag from created_items
3223 Args:
3224 created_items (dict): All created items belongs to VM
3226 Returns:
3227 created_items (dict): Persistent volumes eliminated created_items
3228 """
3229 return {
3230 key: value
3231 for (key, value) in created_items.items()
3232 if len(key.split(":")) == 2
3233 }
3235 @catch_any_exception
3236 def delete_vminstance(
3237 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3238 ) -> None:
3239 """Removes a VM instance from VIM. Returns the old identifier.
3240 Args:
3241 vm_id (str): Identifier of VM instance
3242 created_items (dict): All created items belongs to VM
3243 volumes_to_hold (list): Volumes_to_hold
3244 """
3245 if created_items is None:
3246 created_items = {}
3247 if volumes_to_hold is None:
3248 volumes_to_hold = []
3250 try:
3251 created_items = self._extract_items_wth_keep_flag_from_created_items(
3252 created_items
3253 )
3255 self._reload_connection()
3257 # Delete VM ports attached to the networks before the virtual machine
3258 if created_items:
3259 self._delete_vm_ports_attached_to_network(created_items)
3261 if vm_id:
3262 self.nova.servers.delete(vm_id)
3264 # Although having detached, volumes should have in active status before deleting.
3265 # We ensure in this loop
3266 keep_waiting = True
3267 elapsed_time = 0
3269 while keep_waiting and elapsed_time < volume_timeout:
3270 keep_waiting = False
3272 # Delete volumes and floating IP.
3273 keep_waiting = self._delete_created_items(
3274 created_items, volumes_to_hold, keep_waiting
3275 )
3277 if keep_waiting:
3278 time.sleep(1)
3279 elapsed_time += 1
3280 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
3281 # If VM does not exist, it does not raise
3282 self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
3284 def refresh_vms_status(self, vm_list):
3285 """Get the status of the virtual machines and their interfaces/ports
3286 Params: the list of VM identifiers
3287 Returns a dictionary with:
3288 vm_id: #VIM id of this Virtual Machine
3289 status: #Mandatory. Text with one of:
3290 # DELETED (not found at vim)
3291 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3292 # OTHER (Vim reported other status not understood)
3293 # ERROR (VIM indicates an ERROR status)
3294 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3295 # CREATING (on building process), ERROR
3296 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3297 #
3298 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3299 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3300 interfaces:
3301 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3302 mac_address: #Text format XX:XX:XX:XX:XX:XX
3303 vim_net_id: #network id where this interface is connected
3304 vim_interface_id: #interface/port VIM id
3305 ip_address: #null, or text with IPv4, IPv6 address
3306 compute_node: #identification of compute node where PF,VF interface is allocated
3307 pci: #PCI address of the NIC that hosts the PF,VF
3308 vlan: #physical VLAN used for VF
3309 """
3310 vm_dict = {}
3311 self.logger.debug(
3312 "refresh_vms status: Getting tenant VM instance information from VIM"
3313 )
3314 for vm_id in vm_list:
3315 vm = {}
3317 try:
3318 vm_vim = self.get_vminstance(vm_id)
3320 if vm_vim["status"] in vmStatus2manoFormat:
3321 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3322 else:
3323 vm["status"] = "OTHER"
3324 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3326 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3327 vm_vim.pop("user_data", None)
3328 vm["vim_info"] = self.serialize(vm_vim)
3330 vm["interfaces"] = []
3331 if vm_vim.get("fault"):
3332 vm["error_msg"] = str(vm_vim["fault"])
3334 # get interfaces
3335 try:
3336 self._reload_connection()
3337 port_dict = self.neutron.list_ports(device_id=vm_id)
3339 for port in port_dict["ports"]:
3340 interface = {}
3341 interface["vim_info"] = self.serialize(port)
3342 interface["mac_address"] = port.get("mac_address")
3343 interface["vim_net_id"] = port["network_id"]
3344 interface["vim_interface_id"] = port["id"]
3345 # check if OS-EXT-SRV-ATTR:host is there,
3346 # in case of non-admin credentials, it will be missing
3348 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3349 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3351 interface["pci"] = None
3353 # check if binding:profile is there,
3354 # in case of non-admin credentials, it will be missing
3355 if port.get("binding:profile"):
3356 if port["binding:profile"].get("pci_slot"):
3357 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3358 # the slot to 0x00
3359 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3360 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3361 pci = port["binding:profile"]["pci_slot"]
3362 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3363 interface["pci"] = pci
3365 interface["vlan"] = None
3367 if port.get("binding:vif_details"):
3368 interface["vlan"] = port["binding:vif_details"].get("vlan")
3370 # Get vlan from network in case not present in port for those old openstacks and cases where
3371 # it is needed vlan at PT
3372 if not interface["vlan"]:
3373 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3374 network = self.neutron.show_network(port["network_id"])
3376 if (
3377 network["network"].get("provider:network_type")
3378 == "vlan"
3379 ):
3380 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3381 interface["vlan"] = network["network"].get(
3382 "provider:segmentation_id"
3383 )
3385 ips = []
3386 # look for floating ip address
3387 try:
3388 floating_ip_dict = self.neutron.list_floatingips(
3389 port_id=port["id"]
3390 )
3392 if floating_ip_dict.get("floatingips"):
3393 ips.append(
3394 floating_ip_dict["floatingips"][0].get(
3395 "floating_ip_address"
3396 )
3397 )
3398 except Exception:
3399 pass
3401 for subnet in port["fixed_ips"]:
3402 ips.append(subnet["ip_address"])
3404 interface["ip_address"] = ";".join(ips)
3405 vm["interfaces"].append(interface)
3406 except Exception as e:
3407 self.logger.error(
3408 "Error getting vm interface information {}: {}".format(
3409 type(e).__name__, e
3410 ),
3411 exc_info=True,
3412 )
3413 except vimconn.VimConnNotFoundException as e:
3414 self.logger.error("Exception getting vm status: %s", str(e))
3415 vm["status"] = "DELETED"
3416 vm["error_msg"] = str(e)
3417 except vimconn.VimConnException as e:
3418 self.logger.error("Exception getting vm status: %s", str(e))
3419 vm["status"] = "VIM_ERROR"
3420 vm["error_msg"] = str(e)
3422 vm_dict[vm_id] = vm
3424 return vm_dict
3426 @catch_any_exception
3427 def action_vminstance(self, vm_id, action_dict, created_items={}):
3428 """Send and action over a VM instance from VIM
3429 Returns None or the console dict if the action was successfully sent to the VIM
3430 """
3431 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3432 self._reload_connection()
3433 server = self.nova.servers.find(id=vm_id)
3434 if "start" in action_dict:
3435 if action_dict["start"] == "rebuild":
3436 server.rebuild()
3437 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3438 if not vm_state:
3439 raise nvExceptions.BadRequest(
3440 409,
3441 message="Cannot 'REBUILD' vm_state is in ERROR",
3442 )
3443 else:
3444 if server.status == "PAUSED":
3445 server.unpause()
3446 elif server.status == "SUSPENDED":
3447 server.resume()
3448 elif server.status == "SHUTOFF":
3449 server.start()
3450 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3451 if not vm_state:
3452 raise nvExceptions.BadRequest(
3453 409,
3454 message="Cannot 'START' vm_state is in ERROR",
3455 )
3456 else:
3457 self.logger.debug(
3458 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3459 )
3460 raise vimconn.VimConnException(
3461 "Cannot 'start' instance while it is in active state",
3462 http_code=vimconn.HTTP_Bad_Request,
3463 )
3464 elif "pause" in action_dict:
3465 server.pause()
3466 elif "resume" in action_dict:
3467 server.resume()
3468 elif "shutoff" in action_dict or "shutdown" in action_dict:
3469 self.logger.debug("server status %s", server.status)
3470 if server.status == "ACTIVE":
3471 server.stop()
3472 vm_state = self.__wait_for_vm(vm_id, "SHUTOFF")
3473 if not vm_state:
3474 raise nvExceptions.BadRequest(
3475 409,
3476 message="Cannot 'STOP' vm_state is in ERROR",
3477 )
3478 else:
3479 self.logger.debug("ERROR: VM is not in Active state")
3480 raise vimconn.VimConnException(
3481 "VM is not in active state, stop operation is not allowed",
3482 http_code=vimconn.HTTP_Bad_Request,
3483 )
3484 elif "forceOff" in action_dict:
3485 server.stop() # TODO
3486 elif "terminate" in action_dict:
3487 server.delete()
3488 elif "createImage" in action_dict:
3489 server.create_image()
3490 # "path":path_schema,
3491 # "description":description_schema,
3492 # "name":name_schema,
3493 # "metadata":metadata_schema,
3494 # "imageRef": id_schema,
3495 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3496 elif "rebuild" in action_dict:
3497 server.rebuild(server.image["id"])
3498 elif "reboot" in action_dict:
3499 server.reboot() # reboot_type="SOFT"
3500 elif "console" in action_dict:
3501 console_type = action_dict["console"]
3503 if console_type is None or console_type == "novnc":
3504 console_dict = server.get_vnc_console("novnc")
3505 elif console_type == "xvpvnc":
3506 console_dict = server.get_vnc_console(console_type)
3507 elif console_type == "rdp-html5":
3508 console_dict = server.get_rdp_console(console_type)
3509 elif console_type == "spice-html5":
3510 console_dict = server.get_spice_console(console_type)
3511 else:
3512 raise vimconn.VimConnException(
3513 "console type '{}' not allowed".format(console_type),
3514 http_code=vimconn.HTTP_Bad_Request,
3515 )
3517 try:
3518 console_url = console_dict["console"]["url"]
3519 # parse console_url
3520 protocol_index = console_url.find("//")
3521 suffix_index = (
3522 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3523 )
3524 port_index = (
3525 console_url[protocol_index + 2 : suffix_index].find(":")
3526 + protocol_index
3527 + 2
3528 )
3530 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3531 raise vimconn.VimConnException(
3532 "Unexpected response from VIM " + str(console_dict)
3533 )
3535 console_dict2 = {
3536 "protocol": console_url[0:protocol_index],
3537 "server": console_url[protocol_index + 2 : port_index],
3538 "port": int(console_url[port_index + 1 : suffix_index]),
3539 "suffix": console_url[suffix_index + 1 :],
3540 }
3542 return console_dict2
3543 except Exception:
3544 raise vimconn.VimConnException(
3545 "Unexpected response from VIM " + str(console_dict)
3546 )
3548 return None
3550 # ###### VIO Specific Changes #########
3551 def _generate_vlanID(self):
3552 """
3553 Method to get unused vlanID
3554 Args:
3555 None
3556 Returns:
3557 vlanID
3558 """
3559 # Get used VLAN IDs
3560 usedVlanIDs = []
3561 networks = self.get_network_list()
3563 for net in networks:
3564 if net.get("provider:segmentation_id"):
3565 usedVlanIDs.append(net.get("provider:segmentation_id"))
3567 used_vlanIDs = set(usedVlanIDs)
3569 # find unused VLAN ID
3570 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3571 try:
3572 start_vlanid, end_vlanid = map(
3573 int, vlanID_range.replace(" ", "").split("-")
3574 )
3576 for vlanID in range(start_vlanid, end_vlanid + 1):
3577 if vlanID not in used_vlanIDs:
3578 return vlanID
3579 except Exception as exp:
3580 raise vimconn.VimConnException(
3581 "Exception {} occurred while generating VLAN ID.".format(exp)
3582 )
3583 else:
3584 raise vimconn.VimConnConflictException(
3585 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3586 self.config.get("dataplane_net_vlan_range")
3587 )
3588 )
3590 def _generate_multisegment_vlanID(self):
3591 """
3592 Method to get unused vlanID
3593 Args:
3594 None
3595 Returns:
3596 vlanID
3597 """
3598 # Get used VLAN IDs
3599 usedVlanIDs = []
3600 networks = self.get_network_list()
3601 for net in networks:
3602 if net.get("provider:network_type") == "vlan" and net.get(
3603 "provider:segmentation_id"
3604 ):
3605 usedVlanIDs.append(net.get("provider:segmentation_id"))
3606 elif net.get("segments"):
3607 for segment in net.get("segments"):
3608 if segment.get("provider:network_type") == "vlan" and segment.get(
3609 "provider:segmentation_id"
3610 ):
3611 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3613 used_vlanIDs = set(usedVlanIDs)
3615 # find unused VLAN ID
3616 for vlanID_range in self.config.get("multisegment_vlan_range"):
3617 try:
3618 start_vlanid, end_vlanid = map(
3619 int, vlanID_range.replace(" ", "").split("-")
3620 )
3622 for vlanID in range(start_vlanid, end_vlanid + 1):
3623 if vlanID not in used_vlanIDs:
3624 return vlanID
3625 except Exception as exp:
3626 raise vimconn.VimConnException(
3627 "Exception {} occurred while generating VLAN ID.".format(exp)
3628 )
3629 else:
3630 raise vimconn.VimConnConflictException(
3631 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3632 self.config.get("multisegment_vlan_range")
3633 )
3634 )
3636 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3637 """
3638 Method to validate user given vlanID ranges
3639 Args: None
3640 Returns: None
3641 """
3642 for vlanID_range in input_vlan_range:
3643 vlan_range = vlanID_range.replace(" ", "")
3644 # validate format
3645 vlanID_pattern = r"(\d)*-(\d)*$"
3646 match_obj = re.match(vlanID_pattern, vlan_range)
3647 if not match_obj:
3648 raise vimconn.VimConnConflictException(
3649 "Invalid VLAN range for {}: {}.You must provide "
3650 "'{}' in format [start_ID - end_ID].".format(
3651 text_vlan_range, vlanID_range, text_vlan_range
3652 )
3653 )
3655 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3656 if start_vlanid <= 0:
3657 raise vimconn.VimConnConflictException(
3658 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3659 "networks valid IDs are 1 to 4094 ".format(
3660 text_vlan_range, vlanID_range
3661 )
3662 )
3664 if end_vlanid > 4094:
3665 raise vimconn.VimConnConflictException(
3666 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3667 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3668 text_vlan_range, vlanID_range
3669 )
3670 )
3672 if start_vlanid > end_vlanid:
3673 raise vimconn.VimConnConflictException(
3674 "Invalid VLAN range for {}: {}. You must provide '{}'"
3675 " in format start_ID - end_ID and start_ID < end_ID ".format(
3676 text_vlan_range, vlanID_range, text_vlan_range
3677 )
3678 )
3680 def get_hosts_info(self):
3681 """Get the information of deployed hosts
3682 Returns the hosts content"""
3683 self.logger.debug("osconnector: Getting Host info from VIM")
3685 try:
3686 h_list = []
3687 self._reload_connection()
3688 hypervisors = self.nova.hypervisors.list()
3690 for hype in hypervisors:
3691 h_list.append(hype.to_dict())
3693 return 1, {"hosts": h_list}
3694 except nvExceptions.NotFound as e:
3695 error_value = -vimconn.HTTP_Not_Found
3696 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3697 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3698 error_value = -vimconn.HTTP_Bad_Request
3699 error_text = (
3700 type(e).__name__
3701 + ": "
3702 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3703 )
3705 # TODO insert exception vimconn.HTTP_Unauthorized
3706 # if reaching here is because an exception
3707 self.logger.debug("get_hosts_info " + error_text)
3709 return error_value, error_text
3711 def get_hosts(self, vim_tenant):
3712 """Get the hosts and deployed instances
3713 Returns the hosts content"""
3714 r, hype_dict = self.get_hosts_info()
3716 if r < 0:
3717 return r, hype_dict
3719 hypervisors = hype_dict["hosts"]
3721 try:
3722 servers = self.nova.servers.list()
3723 for hype in hypervisors:
3724 for server in servers:
3725 if (
3726 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3727 == hype["hypervisor_hostname"]
3728 ):
3729 if "vm" in hype:
3730 hype["vm"].append(server.id)
3731 else:
3732 hype["vm"] = [server.id]
3734 return 1, hype_dict
3735 except nvExceptions.NotFound as e:
3736 error_value = -vimconn.HTTP_Not_Found
3737 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3738 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3739 error_value = -vimconn.HTTP_Bad_Request
3740 error_text = (
3741 type(e).__name__
3742 + ": "
3743 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3744 )
3746 # TODO insert exception vimconn.HTTP_Unauthorized
3747 # if reaching here is because an exception
3748 self.logger.debug("get_hosts " + error_text)
3750 return error_value, error_text
3752 def new_classification(self, name, ctype, definition):
3753 self.logger.debug(
3754 "Adding a new (Traffic) Classification to VIM, named %s", name
3755 )
3757 try:
3758 new_class = None
3759 self._reload_connection()
3761 if ctype not in supportedClassificationTypes:
3762 raise vimconn.VimConnNotSupportedException(
3763 "OpenStack VIM connector does not support provided "
3764 "Classification Type {}, supported ones are: {}".format(
3765 ctype, supportedClassificationTypes
3766 )
3767 )
3769 if not self._validate_classification(ctype, definition):
3770 raise vimconn.VimConnException(
3771 "Incorrect Classification definition for the type specified."
3772 )
3774 classification_dict = definition
3775 classification_dict["name"] = name
3777 self.logger.info(
3778 "Adding a new (Traffic) Classification to VIM, named {} and {}.".format(
3779 name, classification_dict
3780 )
3781 )
3782 new_class = self.neutron.create_sfc_flow_classifier(
3783 {"flow_classifier": classification_dict}
3784 )
3786 return new_class["flow_classifier"]["id"]
3787 except (
3788 neExceptions.ConnectionFailed,
3789 ksExceptions.ClientException,
3790 neExceptions.NeutronException,
3791 ConnectionError,
3792 ) as e:
3793 self.logger.error("Creation of Classification failed.")
3794 self._format_exception(e)
3796 def get_classification(self, class_id):
3797 self.logger.debug(" Getting Classification %s from VIM", class_id)
3798 filter_dict = {"id": class_id}
3799 class_list = self.get_classification_list(filter_dict)
3801 if len(class_list) == 0:
3802 raise vimconn.VimConnNotFoundException(
3803 "Classification '{}' not found".format(class_id)
3804 )
3805 elif len(class_list) > 1:
3806 raise vimconn.VimConnConflictException(
3807 "Found more than one Classification with this criteria"
3808 )
3810 classification = class_list[0]
3812 return classification
3814 def get_classification_list(self, filter_dict={}):
3815 self.logger.debug(
3816 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3817 )
3819 try:
3820 filter_dict_os = filter_dict.copy()
3821 self._reload_connection()
3823 if self.api_version3 and "tenant_id" in filter_dict_os:
3824 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3826 classification_dict = self.neutron.list_sfc_flow_classifiers(
3827 **filter_dict_os
3828 )
3829 classification_list = classification_dict["flow_classifiers"]
3830 self.__classification_os2mano(classification_list)
3832 return classification_list
3833 except (
3834 neExceptions.ConnectionFailed,
3835 ksExceptions.ClientException,
3836 neExceptions.NeutronException,
3837 ConnectionError,
3838 ) as e:
3839 self._format_exception(e)
3841 def delete_classification(self, class_id):
3842 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3844 try:
3845 self._reload_connection()
3846 self.neutron.delete_sfc_flow_classifier(class_id)
3848 return class_id
3849 except (
3850 neExceptions.ConnectionFailed,
3851 neExceptions.NeutronException,
3852 ksExceptions.ClientException,
3853 neExceptions.NeutronException,
3854 ConnectionError,
3855 ) as e:
3856 self._format_exception(e)
3858 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3859 self.logger.debug(
3860 "Adding a new Service Function Instance to VIM, named '%s'", name
3861 )
3863 try:
3864 new_sfi = None
3865 self._reload_connection()
3866 correlation = None
3868 if sfc_encap:
3869 correlation = "nsh"
3871 if len(ingress_ports) != 1:
3872 raise vimconn.VimConnNotSupportedException(
3873 "OpenStack VIM connector can only have 1 ingress port per SFI"
3874 )
3876 if len(egress_ports) != 1:
3877 raise vimconn.VimConnNotSupportedException(
3878 "OpenStack VIM connector can only have 1 egress port per SFI"
3879 )
3881 sfi_dict = {
3882 "name": name,
3883 "ingress": ingress_ports[0],
3884 "egress": egress_ports[0],
3885 "service_function_parameters": {"correlation": correlation},
3886 }
3887 self.logger.info("Adding a new SFI to VIM, {}.".format(sfi_dict))
3888 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3890 return new_sfi["port_pair"]["id"]
3891 except (
3892 neExceptions.ConnectionFailed,
3893 ksExceptions.ClientException,
3894 neExceptions.NeutronException,
3895 ConnectionError,
3896 ) as e:
3897 if new_sfi:
3898 try:
3899 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3900 except Exception:
3901 self.logger.error(
3902 "Creation of Service Function Instance failed, with "
3903 "subsequent deletion failure as well."
3904 )
3906 self._format_exception(e)
3908 def get_sfi(self, sfi_id):
3909 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3910 filter_dict = {"id": sfi_id}
3911 sfi_list = self.get_sfi_list(filter_dict)
3913 if len(sfi_list) == 0:
3914 raise vimconn.VimConnNotFoundException(
3915 "Service Function Instance '{}' not found".format(sfi_id)
3916 )
3917 elif len(sfi_list) > 1:
3918 raise vimconn.VimConnConflictException(
3919 "Found more than one Service Function Instance with this criteria"
3920 )
3922 sfi = sfi_list[0]
3924 return sfi
3926 def get_sfi_list(self, filter_dict={}):
3927 self.logger.debug(
3928 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3929 )
3931 try:
3932 self._reload_connection()
3933 filter_dict_os = filter_dict.copy()
3935 if self.api_version3 and "tenant_id" in filter_dict_os:
3936 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3938 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3939 sfi_list = sfi_dict["port_pairs"]
3940 self.__sfi_os2mano(sfi_list)
3942 return sfi_list
3943 except (
3944 neExceptions.ConnectionFailed,
3945 ksExceptions.ClientException,
3946 neExceptions.NeutronException,
3947 ConnectionError,
3948 ) as e:
3949 self._format_exception(e)
3951 def delete_sfi(self, sfi_id):
3952 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3954 try:
3955 self._reload_connection()
3956 self.neutron.delete_sfc_port_pair(sfi_id)
3958 return sfi_id
3959 except (
3960 neExceptions.ConnectionFailed,
3961 neExceptions.NeutronException,
3962 ksExceptions.ClientException,
3963 neExceptions.NeutronException,
3964 ConnectionError,
3965 ) as e:
3966 self._format_exception(e)
3968 def new_sf(self, name, sfis, sfc_encap=True):
3969 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3971 new_sf = None
3973 try:
3974 self._reload_connection()
3976 for instance in sfis:
3977 sfi = self.get_sfi(instance)
3979 if sfi.get("sfc_encap") != sfc_encap:
3980 raise vimconn.VimConnNotSupportedException(
3981 "OpenStack VIM connector requires all SFIs of the "
3982 "same SF to share the same SFC Encapsulation"
3983 )
3985 sf_dict = {"name": name, "port_pairs": sfis}
3987 self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict))
3988 new_sf = self.neutron.create_sfc_port_pair_group(
3989 {"port_pair_group": sf_dict}
3990 )
3992 return new_sf["port_pair_group"]["id"]
3993 except (
3994 neExceptions.ConnectionFailed,
3995 ksExceptions.ClientException,
3996 neExceptions.NeutronException,
3997 ConnectionError,
3998 ) as e:
3999 if new_sf:
4000 try:
4001 new_sf_id = new_sf.get("port_pair_group").get("id")
4002 self.neutron.delete_sfc_port_pair_group(new_sf_id)
4003 except Exception:
4004 self.logger.error(
4005 "Creation of Service Function failed, with "
4006 "subsequent deletion failure as well."
4007 )
4009 self._format_exception(e)
4011 def get_sf(self, sf_id):
4012 self.logger.debug("Getting Service Function %s from VIM", sf_id)
4013 filter_dict = {"id": sf_id}
4014 sf_list = self.get_sf_list(filter_dict)
4016 if len(sf_list) == 0:
4017 raise vimconn.VimConnNotFoundException(
4018 "Service Function '{}' not found".format(sf_id)
4019 )
4020 elif len(sf_list) > 1:
4021 raise vimconn.VimConnConflictException(
4022 "Found more than one Service Function with this criteria"
4023 )
4025 sf = sf_list[0]
4027 return sf
4029 def get_sf_list(self, filter_dict={}):
4030 self.logger.debug(
4031 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
4032 )
4034 try:
4035 self._reload_connection()
4036 filter_dict_os = filter_dict.copy()
4038 if self.api_version3 and "tenant_id" in filter_dict_os:
4039 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
4041 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
4042 sf_list = sf_dict["port_pair_groups"]
4043 self.__sf_os2mano(sf_list)
4045 return sf_list
4046 except (
4047 neExceptions.ConnectionFailed,
4048 ksExceptions.ClientException,
4049 neExceptions.NeutronException,
4050 ConnectionError,
4051 ) as e:
4052 self._format_exception(e)
4054 def delete_sf(self, sf_id):
4055 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
4057 try:
4058 self._reload_connection()
4059 self.neutron.delete_sfc_port_pair_group(sf_id)
4061 return sf_id
4062 except (
4063 neExceptions.ConnectionFailed,
4064 neExceptions.NeutronException,
4065 ksExceptions.ClientException,
4066 neExceptions.NeutronException,
4067 ConnectionError,
4068 ) as e:
4069 self._format_exception(e)
4071 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
4072 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
4074 new_sfp = None
4076 try:
4077 self._reload_connection()
4078 # In networking-sfc the MPLS encapsulation is legacy
4079 # should be used when no full SFC Encapsulation is intended
4080 correlation = "mpls"
4082 if sfc_encap:
4083 correlation = "nsh"
4085 sfp_dict = {
4086 "name": name,
4087 "flow_classifiers": classifications,
4088 "port_pair_groups": sfs,
4089 "chain_parameters": {"correlation": correlation},
4090 }
4092 if spi:
4093 sfp_dict["chain_id"] = spi
4095 self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict))
4096 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
4098 return new_sfp["port_chain"]["id"]
4099 except (
4100 neExceptions.ConnectionFailed,
4101 ksExceptions.ClientException,
4102 neExceptions.NeutronException,
4103 ConnectionError,
4104 ) as e:
4105 if new_sfp:
4106 try:
4107 new_sfp_id = new_sfp.get("port_chain").get("id")
4108 self.neutron.delete_sfc_port_chain(new_sfp_id)
4109 except Exception:
4110 self.logger.error(
4111 "Creation of Service Function Path failed, with "
4112 "subsequent deletion failure as well."
4113 )
4115 self._format_exception(e)
4117 def get_sfp(self, sfp_id):
4118 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
4120 filter_dict = {"id": sfp_id}
4121 sfp_list = self.get_sfp_list(filter_dict)
4123 if len(sfp_list) == 0:
4124 raise vimconn.VimConnNotFoundException(
4125 "Service Function Path '{}' not found".format(sfp_id)
4126 )
4127 elif len(sfp_list) > 1:
4128 raise vimconn.VimConnConflictException(
4129 "Found more than one Service Function Path with this criteria"
4130 )
4132 sfp = sfp_list[0]
4134 return sfp
4136 def get_sfp_list(self, filter_dict={}):
4137 self.logger.debug(
4138 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
4139 )
4141 try:
4142 self._reload_connection()
4143 filter_dict_os = filter_dict.copy()
4145 if self.api_version3 and "tenant_id" in filter_dict_os:
4146 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
4148 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
4149 sfp_list = sfp_dict["port_chains"]
4150 self.__sfp_os2mano(sfp_list)
4152 return sfp_list
4153 except (
4154 neExceptions.ConnectionFailed,
4155 ksExceptions.ClientException,
4156 neExceptions.NeutronException,
4157 ConnectionError,
4158 ) as e:
4159 self._format_exception(e)
4161 def delete_sfp(self, sfp_id):
4162 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
4164 try:
4165 self._reload_connection()
4166 self.neutron.delete_sfc_port_chain(sfp_id)
4168 return sfp_id
4169 except (
4170 neExceptions.ConnectionFailed,
4171 neExceptions.NeutronException,
4172 ksExceptions.ClientException,
4173 neExceptions.NeutronException,
4174 ConnectionError,
4175 ) as e:
4176 self._format_exception(e)
4178 def refresh_sfps_status(self, sfp_list):
4179 """Get the status of the service function path
4180 Params: the list of sfp identifiers
4181 Returns a dictionary with:
4182 vm_id: #VIM id of this service function path
4183 status: #Mandatory. Text with one of:
4184 # DELETED (not found at vim)
4185 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4186 # OTHER (Vim reported other status not understood)
4187 # ERROR (VIM indicates an ERROR status)
4188 # ACTIVE,
4189 # CREATING (on building process)
4190 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4191 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
4192 """
4193 sfp_dict = {}
4194 self.logger.debug(
4195 "refresh_sfps status: Getting tenant SFP information from VIM"
4196 )
4198 for sfp_id in sfp_list:
4199 sfp = {}
4201 try:
4202 sfp_vim = self.get_sfp(sfp_id)
4204 if sfp_vim["spi"]:
4205 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4206 else:
4207 sfp["status"] = "OTHER"
4208 sfp["error_msg"] = "VIM status reported " + sfp["status"]
4210 sfp["vim_info"] = self.serialize(sfp_vim)
4212 if sfp_vim.get("fault"):
4213 sfp["error_msg"] = str(sfp_vim["fault"])
4214 except vimconn.VimConnNotFoundException as e:
4215 self.logger.error("Exception getting sfp status: %s", str(e))
4216 sfp["status"] = "DELETED"
4217 sfp["error_msg"] = str(e)
4218 except vimconn.VimConnException as e:
4219 self.logger.error("Exception getting sfp status: %s", str(e))
4220 sfp["status"] = "VIM_ERROR"
4221 sfp["error_msg"] = str(e)
4223 sfp_dict[sfp_id] = sfp
4225 return sfp_dict
4227 def refresh_sfis_status(self, sfi_list):
4228 """Get the status of the service function instances
4229 Params: the list of sfi identifiers
4230 Returns a dictionary with:
4231 vm_id: #VIM id of this service function instance
4232 status: #Mandatory. Text with one of:
4233 # DELETED (not found at vim)
4234 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4235 # OTHER (Vim reported other status not understood)
4236 # ERROR (VIM indicates an ERROR status)
4237 # ACTIVE,
4238 # CREATING (on building process)
4239 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4240 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4241 """
4242 sfi_dict = {}
4243 self.logger.debug(
4244 "refresh_sfis status: Getting tenant sfi information from VIM"
4245 )
4247 for sfi_id in sfi_list:
4248 sfi = {}
4250 try:
4251 sfi_vim = self.get_sfi(sfi_id)
4253 if sfi_vim:
4254 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4255 else:
4256 sfi["status"] = "OTHER"
4257 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4259 sfi["vim_info"] = self.serialize(sfi_vim)
4261 if sfi_vim.get("fault"):
4262 sfi["error_msg"] = str(sfi_vim["fault"])
4263 except vimconn.VimConnNotFoundException as e:
4264 self.logger.error("Exception getting sfi status: %s", str(e))
4265 sfi["status"] = "DELETED"
4266 sfi["error_msg"] = str(e)
4267 except vimconn.VimConnException as e:
4268 self.logger.error("Exception getting sfi status: %s", str(e))
4269 sfi["status"] = "VIM_ERROR"
4270 sfi["error_msg"] = str(e)
4272 sfi_dict[sfi_id] = sfi
4274 return sfi_dict
4276 def refresh_sfs_status(self, sf_list):
4277 """Get the status of the service functions
4278 Params: the list of sf identifiers
4279 Returns a dictionary with:
4280 vm_id: #VIM id of this service function
4281 status: #Mandatory. Text with one of:
4282 # DELETED (not found at vim)
4283 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4284 # OTHER (Vim reported other status not understood)
4285 # ERROR (VIM indicates an ERROR status)
4286 # ACTIVE,
4287 # CREATING (on building process)
4288 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4289 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4290 """
4291 sf_dict = {}
4292 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4294 for sf_id in sf_list:
4295 sf = {}
4297 try:
4298 sf_vim = self.get_sf(sf_id)
4300 if sf_vim:
4301 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4302 else:
4303 sf["status"] = "OTHER"
4304 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4306 sf["vim_info"] = self.serialize(sf_vim)
4308 if sf_vim.get("fault"):
4309 sf["error_msg"] = str(sf_vim["fault"])
4310 except vimconn.VimConnNotFoundException as e:
4311 self.logger.error("Exception getting sf status: %s", str(e))
4312 sf["status"] = "DELETED"
4313 sf["error_msg"] = str(e)
4314 except vimconn.VimConnException as e:
4315 self.logger.error("Exception getting sf status: %s", str(e))
4316 sf["status"] = "VIM_ERROR"
4317 sf["error_msg"] = str(e)
4319 sf_dict[sf_id] = sf
4321 return sf_dict
4323 def refresh_classifications_status(self, classification_list):
4324 """Get the status of the classifications
4325 Params: the list of classification identifiers
4326 Returns a dictionary with:
4327 vm_id: #VIM id of this classifier
4328 status: #Mandatory. Text with one of:
4329 # DELETED (not found at vim)
4330 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4331 # OTHER (Vim reported other status not understood)
4332 # ERROR (VIM indicates an ERROR status)
4333 # ACTIVE,
4334 # CREATING (on building process)
4335 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4336 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4337 """
4338 classification_dict = {}
4339 self.logger.debug(
4340 "refresh_classifications status: Getting tenant classification information from VIM"
4341 )
4343 for classification_id in classification_list:
4344 classification = {}
4346 try:
4347 classification_vim = self.get_classification(classification_id)
4349 if classification_vim:
4350 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4351 else:
4352 classification["status"] = "OTHER"
4353 classification["error_msg"] = (
4354 "VIM status reported " + classification["status"]
4355 )
4357 classification["vim_info"] = self.serialize(classification_vim)
4359 if classification_vim.get("fault"):
4360 classification["error_msg"] = str(classification_vim["fault"])
4361 except vimconn.VimConnNotFoundException as e:
4362 self.logger.error("Exception getting classification status: %s", str(e))
4363 classification["status"] = "DELETED"
4364 classification["error_msg"] = str(e)
4365 except vimconn.VimConnException as e:
4366 self.logger.error("Exception getting classification status: %s", str(e))
4367 classification["status"] = "VIM_ERROR"
4368 classification["error_msg"] = str(e)
4370 classification_dict[classification_id] = classification
4372 return classification_dict
4374 @catch_any_exception
4375 def new_affinity_group(self, affinity_group_data):
4376 """Adds a server group to VIM
4377 affinity_group_data contains a dictionary with information, keys:
4378 name: name in VIM for the server group
4379 type: affinity or anti-affinity
4380 scope: Only nfvi-node allowed
4381 Returns the server group identifier"""
4382 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4383 name = affinity_group_data["name"]
4384 policy = affinity_group_data["type"]
4385 self._reload_connection()
4386 new_server_group = self.nova.server_groups.create(name, policy)
4387 return new_server_group.id
4389 @catch_any_exception
4390 def get_affinity_group(self, affinity_group_id):
4391 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4392 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4393 self._reload_connection()
4394 server_group = self.nova.server_groups.find(id=affinity_group_id)
4395 return server_group.to_dict()
4397 @catch_any_exception
4398 def delete_affinity_group(self, affinity_group_id):
4399 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4400 self.logger.debug("Getting server group '%s'", affinity_group_id)
4401 self._reload_connection()
4402 self.nova.server_groups.delete(affinity_group_id)
4403 return affinity_group_id
4405 @catch_any_exception
4406 def get_vdu_state(self, vm_id, host_is_required=False) -> list:
4407 """Getting the state of a VDU.
4408 Args:
4409 vm_id (str): ID of an instance
4410 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
4411 and if this is set to True, it raises KeyError.
4412 Returns:
4413 vdu_data (list): VDU details including state, flavor, host_info, AZ
4414 """
4415 self.logger.debug("Getting the status of VM")
4416 self.logger.debug("VIM VM ID %s", vm_id)
4417 self._reload_connection()
4418 server_dict = self._find_nova_server(vm_id)
4419 srv_attr = "OS-EXT-SRV-ATTR:host"
4420 host_info = (
4421 server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
4422 )
4423 vdu_data = [
4424 server_dict["status"],
4425 server_dict["flavor"]["id"],
4426 host_info,
4427 server_dict["OS-EXT-AZ:availability_zone"],
4428 ]
4429 self.logger.debug("vdu_data %s", vdu_data)
4430 return vdu_data
4432 def check_compute_availability(self, host, server_flavor_details):
4433 self._reload_connection()
4434 hypervisor_search = self.nova.hypervisors.search(
4435 hypervisor_match=host, servers=True
4436 )
4437 for hypervisor in hypervisor_search:
4438 hypervisor_id = hypervisor.to_dict()["id"]
4439 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4440 hypervisor_dict = hypervisor_details.to_dict()
4441 hypervisor_temp = json.dumps(hypervisor_dict)
4442 hypervisor_json = json.loads(hypervisor_temp)
4443 resources_available = [
4444 hypervisor_json["free_ram_mb"],
4445 hypervisor_json["disk_available_least"],
4446 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4447 ]
4448 compute_available = all(
4449 x > y for x, y in zip(resources_available, server_flavor_details)
4450 )
4451 if compute_available:
4452 return host
4454 def check_availability_zone(
4455 self, old_az, server_flavor_details, old_host, host=None
4456 ):
4457 self._reload_connection()
4458 az_check = {"zone_check": False, "compute_availability": None}
4459 aggregates_list = self.nova.aggregates.list()
4460 for aggregate in aggregates_list:
4461 aggregate_details = aggregate.to_dict()
4462 aggregate_temp = json.dumps(aggregate_details)
4463 aggregate_json = json.loads(aggregate_temp)
4464 if aggregate_json["availability_zone"] == old_az:
4465 hosts_list = aggregate_json["hosts"]
4466 if host is not None:
4467 if host in hosts_list:
4468 az_check["zone_check"] = True
4469 available_compute_id = self.check_compute_availability(
4470 host, server_flavor_details
4471 )
4472 if available_compute_id is not None:
4473 az_check["compute_availability"] = available_compute_id
4474 else:
4475 for check_host in hosts_list:
4476 if check_host != old_host:
4477 available_compute_id = self.check_compute_availability(
4478 check_host, server_flavor_details
4479 )
4480 if available_compute_id is not None:
4481 az_check["zone_check"] = True
4482 az_check["compute_availability"] = available_compute_id
4483 break
4484 else:
4485 az_check["zone_check"] = True
4486 return az_check
4488 @catch_any_exception
4489 def migrate_instance(self, vm_id, compute_host=None):
4490 """
4491 Migrate a vdu
4492 param:
4493 vm_id: ID of an instance
4494 compute_host: Host to migrate the vdu to
4495 """
4496 self._reload_connection()
4497 vm_state = False
4498 instance_state = self.get_vdu_state(vm_id, host_is_required=True)
4499 server_flavor_id = instance_state[1]
4500 server_hypervisor_name = instance_state[2]
4501 server_availability_zone = instance_state[3]
4502 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4503 server_flavor_details = [
4504 server_flavor["ram"],
4505 server_flavor["disk"],
4506 server_flavor["vcpus"],
4507 ]
4508 if compute_host == server_hypervisor_name:
4509 raise vimconn.VimConnException(
4510 "Unable to migrate instance '{}' to the same host '{}'".format(
4511 vm_id, compute_host
4512 ),
4513 http_code=vimconn.HTTP_Bad_Request,
4514 )
4515 az_status = self.check_availability_zone(
4516 server_availability_zone,
4517 server_flavor_details,
4518 server_hypervisor_name,
4519 compute_host,
4520 )
4521 availability_zone_check = az_status["zone_check"]
4522 available_compute_id = az_status.get("compute_availability")
4524 if availability_zone_check is False:
4525 raise vimconn.VimConnException(
4526 "Unable to migrate instance '{}' to a different availability zone".format(
4527 vm_id
4528 ),
4529 http_code=vimconn.HTTP_Bad_Request,
4530 )
4531 if available_compute_id is not None:
4532 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
4533 self.nova.servers.live_migrate(
4534 server=vm_id,
4535 host=available_compute_id,
4536 block_migration=True,
4537 )
4538 state = "MIGRATING"
4539 changed_compute_host = ""
4540 if state == "MIGRATING":
4541 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4542 changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
4543 2
4544 ]
4545 if vm_state and changed_compute_host == available_compute_id:
4546 self.logger.debug(
4547 "Instance '{}' migrated to the new compute host '{}'".format(
4548 vm_id, changed_compute_host
4549 )
4550 )
4551 return state, available_compute_id
4552 else:
4553 raise vimconn.VimConnException(
4554 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4555 vm_id, available_compute_id
4556 ),
4557 http_code=vimconn.HTTP_Bad_Request,
4558 )
4559 else:
4560 raise vimconn.VimConnException(
4561 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4562 available_compute_id
4563 ),
4564 http_code=vimconn.HTTP_Bad_Request,
4565 )
4567 @catch_any_exception
4568 def resize_instance(self, vm_id, new_flavor_id):
4569 """
4570 For resizing the vm based on the given
4571 flavor details
4572 param:
4573 vm_id : ID of an instance
4574 new_flavor_id : Flavor id to be resized
4575 Return the status of a resized instance
4576 """
4577 self._reload_connection()
4578 self.logger.debug("resize the flavor of an instance")
4579 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4580 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4581 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4582 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4583 if old_flavor_disk > new_flavor_disk:
4584 raise nvExceptions.BadRequest(
4585 400,
4586 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4587 )
4588 else:
4589 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4590 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4591 if vm_state:
4592 instance_resized_status = self.confirm_resize(
4593 vm_id, instance_status
4594 )
4595 return instance_resized_status
4596 else:
4597 raise nvExceptions.BadRequest(
4598 409,
4599 message="Cannot 'resize' vm_state is in ERROR",
4600 )
4602 else:
4603 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4604 raise nvExceptions.BadRequest(
4605 409,
4606 message="Cannot 'resize' instance while it is in vm_state resized",
4607 )
4609 def confirm_resize(self, vm_id, instance_state):
4610 """
4611 Confirm the resize of an instance
4612 param:
4613 vm_id: ID of an instance
4614 """
4615 self._reload_connection()
4616 self.nova.servers.confirm_resize(server=vm_id)
4617 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4618 self.__wait_for_vm(vm_id, instance_state)
4619 instance_status = self.get_vdu_state(vm_id)[0]
4620 return instance_status
4622 def get_monitoring_data(self):
4623 try:
4624 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
4625 self._reload_connection()
4626 all_servers = self.nova.servers.list(detailed=True)
4627 try:
4628 for server in all_servers:
4629 if server.flavor.get("original_name"):
4630 server.flavor["id"] = self.nova.flavors.find(
4631 name=server.flavor["original_name"]
4632 ).id
4633 except nClient.exceptions.NotFound as e:
4634 self.logger.warning(str(e.message))
4635 all_ports = self.neutron.list_ports()
4636 return all_servers, all_ports
4637 except Exception as e:
4638 raise vimconn.VimConnException(
4639 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
4640 )