Fix Bug 2158 and Bug 2254 by arranging exception handling
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 import cinderclient.exceptions as cExceptions
45 from glanceclient import client as glClient
46 import glanceclient.exc as gl1Exceptions
47 from keystoneauth1 import session
48 from keystoneauth1.identity import v2, v3
49 import keystoneclient.exceptions as ksExceptions
50 import keystoneclient.v2_0.client as ksClient_v2
51 import keystoneclient.v3.client as ksClient_v3
52 import netaddr
53 from neutronclient.common import exceptions as neExceptions
54 from neutronclient.neutron import client as neClient
55 from novaclient import client as nClient, exceptions as nvExceptions
56 from osm_ro_plugin import vimconn
57 from requests.exceptions import ConnectionError
58 import yaml
59
60 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
61 __date__ = "$22-sep-2017 23:59:59$"
62
63 """contain the openstack virtual machine status to openmano status"""
64 vmStatus2manoFormat = {
65 "ACTIVE": "ACTIVE",
66 "PAUSED": "PAUSED",
67 "SUSPENDED": "SUSPENDED",
68 "SHUTOFF": "INACTIVE",
69 "BUILD": "BUILD",
70 "ERROR": "ERROR",
71 "DELETED": "DELETED",
72 }
73 netStatus2manoFormat = {
74 "ACTIVE": "ACTIVE",
75 "PAUSED": "PAUSED",
76 "INACTIVE": "INACTIVE",
77 "BUILD": "BUILD",
78 "ERROR": "ERROR",
79 "DELETED": "DELETED",
80 }
81
82 supportedClassificationTypes = ["legacy_flow_classifier"]
83
84 # global var to have a timeout creating and deleting volumes
85 volume_timeout = 1800
86 server_timeout = 1800
87
88
89 def catch_any_exception(func):
90 def format_exception(*args, **kwargs):
91 try:
92 return func(*args, *kwargs)
93 except Exception as e:
94 vimconnector._format_exception(e)
95
96 return format_exception
97
98
99 class SafeDumper(yaml.SafeDumper):
100 def represent_data(self, data):
101 # Openstack APIs use custom subclasses of dict and YAML safe dumper
102 # is designed to not handle that (reference issue 142 of pyyaml)
103 if isinstance(data, dict) and data.__class__ != dict:
104 # A simple solution is to convert those items back to dicts
105 data = dict(data.items())
106
107 return super(SafeDumper, self).represent_data(data)
108
109
110 class vimconnector(vimconn.VimConnector):
111 def __init__(
112 self,
113 uuid,
114 name,
115 tenant_id,
116 tenant_name,
117 url,
118 url_admin=None,
119 user=None,
120 passwd=None,
121 log_level=None,
122 config={},
123 persistent_info={},
124 ):
125 """using common constructor parameters. In this case
126 'url' is the keystone authorization url,
127 'url_admin' is not use
128 """
129 api_version = config.get("APIversion")
130
131 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
132 raise vimconn.VimConnException(
133 "Invalid value '{}' for config:APIversion. "
134 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
135 )
136
137 vim_type = config.get("vim_type")
138
139 if vim_type and vim_type not in ("vio", "VIO"):
140 raise vimconn.VimConnException(
141 "Invalid value '{}' for config:vim_type."
142 "Allowed values are 'vio' or 'VIO'".format(vim_type)
143 )
144
145 if config.get("dataplane_net_vlan_range") is not None:
146 # validate vlan ranges provided by user
147 self._validate_vlan_ranges(
148 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
149 )
150
151 if config.get("multisegment_vlan_range") is not None:
152 # validate vlan ranges provided by user
153 self._validate_vlan_ranges(
154 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
155 )
156
157 vimconn.VimConnector.__init__(
158 self,
159 uuid,
160 name,
161 tenant_id,
162 tenant_name,
163 url,
164 url_admin,
165 user,
166 passwd,
167 log_level,
168 config,
169 )
170
171 if self.config.get("insecure") and self.config.get("ca_cert"):
172 raise vimconn.VimConnException(
173 "options insecure and ca_cert are mutually exclusive"
174 )
175
176 self.verify = True
177
178 if self.config.get("insecure"):
179 self.verify = False
180
181 if self.config.get("ca_cert"):
182 self.verify = self.config.get("ca_cert")
183
184 if not url:
185 raise TypeError("url param can not be NoneType")
186
187 self.persistent_info = persistent_info
188 self.availability_zone = persistent_info.get("availability_zone", None)
189 self.session = persistent_info.get("session", {"reload_client": True})
190 self.my_tenant_id = self.session.get("my_tenant_id")
191 self.nova = self.session.get("nova")
192 self.neutron = self.session.get("neutron")
193 self.cinder = self.session.get("cinder")
194 self.glance = self.session.get("glance")
195 # self.glancev1 = self.session.get("glancev1")
196 self.keystone = self.session.get("keystone")
197 self.api_version3 = self.session.get("api_version3")
198 self.vim_type = self.config.get("vim_type")
199
200 if self.vim_type:
201 self.vim_type = self.vim_type.upper()
202
203 if self.config.get("use_internal_endpoint"):
204 self.endpoint_type = "internalURL"
205 else:
206 self.endpoint_type = None
207
208 logging.getLogger("urllib3").setLevel(logging.WARNING)
209 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
210 logging.getLogger("novaclient").setLevel(logging.WARNING)
211 self.logger = logging.getLogger("ro.vim.openstack")
212
213 # allow security_groups to be a list or a single string
214 if isinstance(self.config.get("security_groups"), str):
215 self.config["security_groups"] = [self.config["security_groups"]]
216
217 self.security_groups_id = None
218
219 # ###### VIO Specific Changes #########
220 if self.vim_type == "VIO":
221 self.logger = logging.getLogger("ro.vim.vio")
222
223 if log_level:
224 self.logger.setLevel(getattr(logging, log_level))
225
226 def __getitem__(self, index):
227 """Get individuals parameters.
228 Throw KeyError"""
229 if index == "project_domain_id":
230 return self.config.get("project_domain_id")
231 elif index == "user_domain_id":
232 return self.config.get("user_domain_id")
233 else:
234 return vimconn.VimConnector.__getitem__(self, index)
235
236 def __setitem__(self, index, value):
237 """Set individuals parameters and it is marked as dirty so to force connection reload.
238 Throw KeyError"""
239 if index == "project_domain_id":
240 self.config["project_domain_id"] = value
241 elif index == "user_domain_id":
242 self.config["user_domain_id"] = value
243 else:
244 vimconn.VimConnector.__setitem__(self, index, value)
245
246 self.session["reload_client"] = True
247
248 def serialize(self, value):
249 """Serialization of python basic types.
250
251 In the case value is not serializable a message will be logged and a
252 simple representation of the data that cannot be converted back to
253 python is returned.
254 """
255 if isinstance(value, str):
256 return value
257
258 try:
259 return yaml.dump(
260 value, Dumper=SafeDumper, default_flow_style=True, width=256
261 )
262 except yaml.representer.RepresenterError:
263 self.logger.debug(
264 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
265 pformat(value),
266 exc_info=True,
267 )
268
269 return str(value)
270
271 def _reload_connection(self):
272 """Called before any operation, it check if credentials has changed
273 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
274 """
275 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
276 if self.session["reload_client"]:
277 if self.config.get("APIversion"):
278 self.api_version3 = (
279 self.config["APIversion"] == "v3.3"
280 or self.config["APIversion"] == "3"
281 )
282 else: # get from ending auth_url that end with v3 or with v2.0
283 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
284 "/v3/"
285 )
286
287 self.session["api_version3"] = self.api_version3
288
289 if self.api_version3:
290 if self.config.get("project_domain_id") or self.config.get(
291 "project_domain_name"
292 ):
293 project_domain_id_default = None
294 else:
295 project_domain_id_default = "default"
296
297 if self.config.get("user_domain_id") or self.config.get(
298 "user_domain_name"
299 ):
300 user_domain_id_default = None
301 else:
302 user_domain_id_default = "default"
303 auth = v3.Password(
304 auth_url=self.url,
305 username=self.user,
306 password=self.passwd,
307 project_name=self.tenant_name,
308 project_id=self.tenant_id,
309 project_domain_id=self.config.get(
310 "project_domain_id", project_domain_id_default
311 ),
312 user_domain_id=self.config.get(
313 "user_domain_id", user_domain_id_default
314 ),
315 project_domain_name=self.config.get("project_domain_name"),
316 user_domain_name=self.config.get("user_domain_name"),
317 )
318 else:
319 auth = v2.Password(
320 auth_url=self.url,
321 username=self.user,
322 password=self.passwd,
323 tenant_name=self.tenant_name,
324 tenant_id=self.tenant_id,
325 )
326
327 sess = session.Session(auth=auth, verify=self.verify)
328 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
329 # Titanium cloud and StarlingX
330 region_name = self.config.get("region_name")
331
332 if self.api_version3:
333 self.keystone = ksClient_v3.Client(
334 session=sess,
335 endpoint_type=self.endpoint_type,
336 region_name=region_name,
337 )
338 else:
339 self.keystone = ksClient_v2.Client(
340 session=sess, endpoint_type=self.endpoint_type
341 )
342
343 self.session["keystone"] = self.keystone
344 # In order to enable microversion functionality an explicit microversion must be specified in "config".
345 # This implementation approach is due to the warning message in
346 # https://developer.openstack.org/api-guide/compute/microversions.html
347 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
348 # always require an specific microversion.
349 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
350 version = self.config.get("microversion")
351
352 if not version:
353 version = "2.60"
354
355 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
356 # Titanium cloud and StarlingX
357 self.nova = self.session["nova"] = nClient.Client(
358 str(version),
359 session=sess,
360 endpoint_type=self.endpoint_type,
361 region_name=region_name,
362 )
363 self.neutron = self.session["neutron"] = neClient.Client(
364 "2.0",
365 session=sess,
366 endpoint_type=self.endpoint_type,
367 region_name=region_name,
368 )
369
370 if sess.get_all_version_data(service_type="volumev2"):
371 self.cinder = self.session["cinder"] = cClient.Client(
372 2,
373 session=sess,
374 endpoint_type=self.endpoint_type,
375 region_name=region_name,
376 )
377 else:
378 self.cinder = self.session["cinder"] = cClient.Client(
379 3,
380 session=sess,
381 endpoint_type=self.endpoint_type,
382 region_name=region_name,
383 )
384
385 try:
386 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
387 except Exception:
388 self.logger.error("Cannot get project_id from session", exc_info=True)
389
390 if self.endpoint_type == "internalURL":
391 glance_service_id = self.keystone.services.list(name="glance")[0].id
392 glance_endpoint = self.keystone.endpoints.list(
393 glance_service_id, interface="internal"
394 )[0].url
395 else:
396 glance_endpoint = None
397
398 self.glance = self.session["glance"] = glClient.Client(
399 2, session=sess, endpoint=glance_endpoint
400 )
401 # using version 1 of glance client in new_image()
402 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
403 # endpoint=glance_endpoint)
404 self.session["reload_client"] = False
405 self.persistent_info["session"] = self.session
406 # add availablity zone info inside self.persistent_info
407 self._set_availablity_zones()
408 self.persistent_info["availability_zone"] = self.availability_zone
409 # force to get again security_groups_ids next time they are needed
410 self.security_groups_id = None
411
412 def __net_os2mano(self, net_list_dict):
413 """Transform the net openstack format to mano format
414 net_list_dict can be a list of dict or a single dict"""
415 if type(net_list_dict) is dict:
416 net_list_ = (net_list_dict,)
417 elif type(net_list_dict) is list:
418 net_list_ = net_list_dict
419 else:
420 raise TypeError("param net_list_dict must be a list or a dictionary")
421 for net in net_list_:
422 if net.get("provider:network_type") == "vlan":
423 net["type"] = "data"
424 else:
425 net["type"] = "bridge"
426
427 def __classification_os2mano(self, class_list_dict):
428 """Transform the openstack format (Flow Classifier) to mano format
429 (Classification) class_list_dict can be a list of dict or a single dict
430 """
431 if isinstance(class_list_dict, dict):
432 class_list_ = [class_list_dict]
433 elif isinstance(class_list_dict, list):
434 class_list_ = class_list_dict
435 else:
436 raise TypeError("param class_list_dict must be a list or a dictionary")
437 for classification in class_list_:
438 id = classification.pop("id")
439 name = classification.pop("name")
440 description = classification.pop("description")
441 project_id = classification.pop("project_id")
442 tenant_id = classification.pop("tenant_id")
443 original_classification = copy.deepcopy(classification)
444 classification.clear()
445 classification["ctype"] = "legacy_flow_classifier"
446 classification["definition"] = original_classification
447 classification["id"] = id
448 classification["name"] = name
449 classification["description"] = description
450 classification["project_id"] = project_id
451 classification["tenant_id"] = tenant_id
452
453 def __sfi_os2mano(self, sfi_list_dict):
454 """Transform the openstack format (Port Pair) to mano format (SFI)
455 sfi_list_dict can be a list of dict or a single dict
456 """
457 if isinstance(sfi_list_dict, dict):
458 sfi_list_ = [sfi_list_dict]
459 elif isinstance(sfi_list_dict, list):
460 sfi_list_ = sfi_list_dict
461 else:
462 raise TypeError("param sfi_list_dict must be a list or a dictionary")
463
464 for sfi in sfi_list_:
465 sfi["ingress_ports"] = []
466 sfi["egress_ports"] = []
467
468 if sfi.get("ingress"):
469 sfi["ingress_ports"].append(sfi["ingress"])
470
471 if sfi.get("egress"):
472 sfi["egress_ports"].append(sfi["egress"])
473
474 del sfi["ingress"]
475 del sfi["egress"]
476 params = sfi.get("service_function_parameters")
477 sfc_encap = False
478
479 if params:
480 correlation = params.get("correlation")
481
482 if correlation:
483 sfc_encap = True
484
485 sfi["sfc_encap"] = sfc_encap
486 del sfi["service_function_parameters"]
487
488 def __sf_os2mano(self, sf_list_dict):
489 """Transform the openstack format (Port Pair Group) to mano format (SF)
490 sf_list_dict can be a list of dict or a single dict
491 """
492 if isinstance(sf_list_dict, dict):
493 sf_list_ = [sf_list_dict]
494 elif isinstance(sf_list_dict, list):
495 sf_list_ = sf_list_dict
496 else:
497 raise TypeError("param sf_list_dict must be a list or a dictionary")
498
499 for sf in sf_list_:
500 del sf["port_pair_group_parameters"]
501 sf["sfis"] = sf["port_pairs"]
502 del sf["port_pairs"]
503
504 def __sfp_os2mano(self, sfp_list_dict):
505 """Transform the openstack format (Port Chain) to mano format (SFP)
506 sfp_list_dict can be a list of dict or a single dict
507 """
508 if isinstance(sfp_list_dict, dict):
509 sfp_list_ = [sfp_list_dict]
510 elif isinstance(sfp_list_dict, list):
511 sfp_list_ = sfp_list_dict
512 else:
513 raise TypeError("param sfp_list_dict must be a list or a dictionary")
514
515 for sfp in sfp_list_:
516 params = sfp.pop("chain_parameters")
517 sfc_encap = False
518
519 if params:
520 correlation = params.get("correlation")
521
522 if correlation:
523 sfc_encap = True
524
525 sfp["sfc_encap"] = sfc_encap
526 sfp["spi"] = sfp.pop("chain_id")
527 sfp["classifications"] = sfp.pop("flow_classifiers")
528 sfp["service_functions"] = sfp.pop("port_pair_groups")
529
530 # placeholder for now; read TODO note below
531 def _validate_classification(self, type, definition):
532 # only legacy_flow_classifier Type is supported at this point
533 return True
534 # TODO(igordcard): this method should be an abstract method of an
535 # abstract Classification class to be implemented by the specific
536 # Types. Also, abstract vimconnector should call the validation
537 # method before the implemented VIM connectors are called.
538
539 @staticmethod
540 def _format_exception(exception):
541 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
542 message_error = str(exception)
543 tip = ""
544
545 if isinstance(
546 exception,
547 (
548 neExceptions.NetworkNotFoundClient,
549 nvExceptions.NotFound,
550 nvExceptions.ResourceNotFound,
551 ksExceptions.NotFound,
552 gl1Exceptions.HTTPNotFound,
553 cExceptions.NotFound,
554 ),
555 ):
556 raise vimconn.VimConnNotFoundException(
557 type(exception).__name__ + ": " + message_error
558 )
559 elif isinstance(
560 exception,
561 (
562 HTTPException,
563 gl1Exceptions.HTTPException,
564 gl1Exceptions.CommunicationError,
565 ConnectionError,
566 ksExceptions.ConnectionError,
567 neExceptions.ConnectionFailed,
568 cExceptions.ConnectionError,
569 ),
570 ):
571 if type(exception).__name__ == "SSLError":
572 tip = " (maybe option 'insecure' must be added to the VIM)"
573
574 raise vimconn.VimConnConnectionException(
575 "Invalid URL or credentials{}: {}".format(tip, message_error)
576 )
577 elif isinstance(
578 exception,
579 (
580 KeyError,
581 nvExceptions.BadRequest,
582 ksExceptions.BadRequest,
583 gl1Exceptions.BadRequest,
584 cExceptions.BadRequest,
585 ),
586 ):
587 if message_error == "OS-EXT-SRV-ATTR:host":
588 tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
589 raise vimconn.VimConnInsufficientCredentials(
590 type(exception).__name__ + ": " + message_error + tip
591 )
592 raise vimconn.VimConnException(
593 type(exception).__name__ + ": " + message_error
594 )
595
596 elif isinstance(
597 exception,
598 (
599 nvExceptions.ClientException,
600 ksExceptions.ClientException,
601 neExceptions.NeutronException,
602 cExceptions.ClientException,
603 ),
604 ):
605 raise vimconn.VimConnUnexpectedResponse(
606 type(exception).__name__ + ": " + message_error
607 )
608 elif isinstance(exception, nvExceptions.Conflict):
609 raise vimconn.VimConnConflictException(
610 type(exception).__name__ + ": " + message_error
611 )
612 elif isinstance(exception, vimconn.VimConnException):
613 raise exception
614 else: # ()
615 logger = logging.getLogger("ro.vim.openstack")
616 logger.error("General Exception " + message_error, exc_info=True)
617
618 raise vimconn.VimConnException(
619 type(exception).__name__ + ": " + message_error
620 )
621
622 def _get_ids_from_name(self):
623 """
624 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
625 :return: None
626 """
627 # get tenant_id if only tenant_name is supplied
628 self._reload_connection()
629
630 if not self.my_tenant_id:
631 raise vimconn.VimConnConnectionException(
632 "Error getting tenant information from name={} id={}".format(
633 self.tenant_name, self.tenant_id
634 )
635 )
636
637 if self.config.get("security_groups") and not self.security_groups_id:
638 # convert from name to id
639 neutron_sg_list = self.neutron.list_security_groups(
640 tenant_id=self.my_tenant_id
641 )["security_groups"]
642
643 self.security_groups_id = []
644 for sg in self.config.get("security_groups"):
645 for neutron_sg in neutron_sg_list:
646 if sg in (neutron_sg["id"], neutron_sg["name"]):
647 self.security_groups_id.append(neutron_sg["id"])
648 break
649 else:
650 self.security_groups_id = None
651
652 raise vimconn.VimConnConnectionException(
653 "Not found security group {} for this tenant".format(sg)
654 )
655
656 def _find_nova_server(self, vm_id):
657 """
658 Returns the VM instance from Openstack and completes it with flavor ID
659 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
660 """
661 try:
662 self._reload_connection()
663 server = self.nova.servers.find(id=vm_id)
664 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
665 server_dict = server.to_dict()
666 try:
667 if server_dict["flavor"].get("original_name"):
668 server_dict["flavor"]["id"] = self.nova.flavors.find(
669 name=server_dict["flavor"]["original_name"]
670 ).id
671 except nClient.exceptions.NotFound as e:
672 self.logger.warning(str(e.message))
673 return server_dict
674 except (
675 ksExceptions.ClientException,
676 nvExceptions.ClientException,
677 nvExceptions.NotFound,
678 ConnectionError,
679 ) as e:
680 self._format_exception(e)
681
682 def check_vim_connectivity(self):
683 # just get network list to check connectivity and credentials
684 self.get_network_list(filter_dict={})
685
686 def get_tenant_list(self, filter_dict={}):
687 """Obtain tenants of VIM
688 filter_dict can contain the following keys:
689 name: filter by tenant name
690 id: filter by tenant uuid/id
691 <other VIM specific>
692 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
693 """
694 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
695 try:
696 self._reload_connection()
697
698 if self.api_version3:
699 project_class_list = self.keystone.projects.list(
700 name=filter_dict.get("name")
701 )
702 else:
703 project_class_list = self.keystone.tenants.findall(**filter_dict)
704
705 project_list = []
706
707 for project in project_class_list:
708 if filter_dict.get("id") and filter_dict["id"] != project.id:
709 continue
710
711 project_list.append(project.to_dict())
712
713 return project_list
714 except (
715 ksExceptions.ConnectionError,
716 ksExceptions.ClientException,
717 ConnectionError,
718 ) as e:
719 self._format_exception(e)
720
721 def new_tenant(self, tenant_name, tenant_description):
722 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
723 self.logger.debug("Adding a new tenant name: %s", tenant_name)
724 try:
725 self._reload_connection()
726
727 if self.api_version3:
728 project = self.keystone.projects.create(
729 tenant_name,
730 self.config.get("project_domain_id", "default"),
731 description=tenant_description,
732 is_domain=False,
733 )
734 else:
735 project = self.keystone.tenants.create(tenant_name, tenant_description)
736
737 return project.id
738 except (
739 ksExceptions.ConnectionError,
740 ksExceptions.ClientException,
741 ksExceptions.BadRequest,
742 ConnectionError,
743 ) as e:
744 self._format_exception(e)
745
746 def delete_tenant(self, tenant_id):
747 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
748 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
749 try:
750 self._reload_connection()
751
752 if self.api_version3:
753 self.keystone.projects.delete(tenant_id)
754 else:
755 self.keystone.tenants.delete(tenant_id)
756
757 return tenant_id
758
759 except (
760 ksExceptions.ConnectionError,
761 ksExceptions.ClientException,
762 ksExceptions.NotFound,
763 ConnectionError,
764 ) as e:
765 self._format_exception(e)
766
767 def new_network(
768 self,
769 net_name,
770 net_type,
771 ip_profile=None,
772 shared=False,
773 provider_network_profile=None,
774 ):
775 """Adds a tenant network to VIM
776 Params:
777 'net_name': name of the network
778 'net_type': one of:
779 'bridge': overlay isolated network
780 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
781 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
782 'ip_profile': is a dict containing the IP parameters of the network
783 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
784 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
785 'gateway_address': (Optional) ip_schema, that is X.X.X.X
786 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
787 'dhcp_enabled': True or False
788 'dhcp_start_address': ip_schema, first IP to grant
789 'dhcp_count': number of IPs to grant.
790 'shared': if this network can be seen/use by other tenants/organization
791 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
792 physical-network: physnet-label}
793 Returns a tuple with the network identifier and created_items, or raises an exception on error
794 created_items can be None or a dictionary where this method can include key-values that will be passed to
795 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
796 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
797 as not present.
798 """
799 self.logger.debug(
800 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
801 )
802 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
803
804 try:
805 vlan = None
806
807 if provider_network_profile:
808 vlan = provider_network_profile.get("segmentation-id")
809
810 new_net = None
811 created_items = {}
812 self._reload_connection()
813 network_dict = {"name": net_name, "admin_state_up": True}
814
815 if net_type in ("data", "ptp") or provider_network_profile:
816 provider_physical_network = None
817
818 if provider_network_profile and provider_network_profile.get(
819 "physical-network"
820 ):
821 provider_physical_network = provider_network_profile.get(
822 "physical-network"
823 )
824
825 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
826 # or not declared, just ignore the checking
827 if (
828 isinstance(
829 self.config.get("dataplane_physical_net"), (tuple, list)
830 )
831 and provider_physical_network
832 not in self.config["dataplane_physical_net"]
833 ):
834 raise vimconn.VimConnConflictException(
835 "Invalid parameter 'provider-network:physical-network' "
836 "for network creation. '{}' is not one of the declared "
837 "list at VIM_config:dataplane_physical_net".format(
838 provider_physical_network
839 )
840 )
841
842 # use the default dataplane_physical_net
843 if not provider_physical_network:
844 provider_physical_network = self.config.get(
845 "dataplane_physical_net"
846 )
847
848 # if it is non-empty list, use the first value. If it is a string use the value directly
849 if (
850 isinstance(provider_physical_network, (tuple, list))
851 and provider_physical_network
852 ):
853 provider_physical_network = provider_physical_network[0]
854
855 if not provider_physical_network:
856 raise vimconn.VimConnConflictException(
857 "missing information needed for underlay networks. Provide "
858 "'dataplane_physical_net' configuration at VIM or use the NS "
859 "instantiation parameter 'provider-network.physical-network'"
860 " for the VLD"
861 )
862
863 if not self.config.get("multisegment_support"):
864 network_dict[
865 "provider:physical_network"
866 ] = provider_physical_network
867
868 if (
869 provider_network_profile
870 and "network-type" in provider_network_profile
871 ):
872 network_dict[
873 "provider:network_type"
874 ] = provider_network_profile["network-type"]
875 else:
876 network_dict["provider:network_type"] = self.config.get(
877 "dataplane_network_type", "vlan"
878 )
879
880 if vlan:
881 network_dict["provider:segmentation_id"] = vlan
882 else:
883 # Multi-segment case
884 segment_list = []
885 segment1_dict = {
886 "provider:physical_network": "",
887 "provider:network_type": "vxlan",
888 }
889 segment_list.append(segment1_dict)
890 segment2_dict = {
891 "provider:physical_network": provider_physical_network,
892 "provider:network_type": "vlan",
893 }
894
895 if vlan:
896 segment2_dict["provider:segmentation_id"] = vlan
897 elif self.config.get("multisegment_vlan_range"):
898 vlanID = self._generate_multisegment_vlanID()
899 segment2_dict["provider:segmentation_id"] = vlanID
900
901 # else
902 # raise vimconn.VimConnConflictException(
903 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
904 # network")
905 segment_list.append(segment2_dict)
906 network_dict["segments"] = segment_list
907
908 # VIO Specific Changes. It needs a concrete VLAN
909 if self.vim_type == "VIO" and vlan is None:
910 if self.config.get("dataplane_net_vlan_range") is None:
911 raise vimconn.VimConnConflictException(
912 "You must provide 'dataplane_net_vlan_range' in format "
913 "[start_ID - end_ID] at VIM_config for creating underlay "
914 "networks"
915 )
916
917 network_dict["provider:segmentation_id"] = self._generate_vlanID()
918
919 network_dict["shared"] = shared
920
921 if self.config.get("disable_network_port_security"):
922 network_dict["port_security_enabled"] = False
923
924 if self.config.get("neutron_availability_zone_hints"):
925 hints = self.config.get("neutron_availability_zone_hints")
926
927 if isinstance(hints, str):
928 hints = [hints]
929
930 network_dict["availability_zone_hints"] = hints
931
932 new_net = self.neutron.create_network({"network": network_dict})
933 # print new_net
934 # create subnetwork, even if there is no profile
935
936 if not ip_profile:
937 ip_profile = {}
938
939 if not ip_profile.get("subnet_address"):
940 # Fake subnet is required
941 subnet_rand = random.SystemRandom().randint(0, 255)
942 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
943
944 if "ip_version" not in ip_profile:
945 ip_profile["ip_version"] = "IPv4"
946
947 subnet = {
948 "name": net_name + "-subnet",
949 "network_id": new_net["network"]["id"],
950 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
951 "cidr": ip_profile["subnet_address"],
952 }
953
954 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
955 if ip_profile.get("gateway_address"):
956 subnet["gateway_ip"] = ip_profile["gateway_address"]
957 else:
958 subnet["gateway_ip"] = None
959
960 if ip_profile.get("dns_address"):
961 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
962
963 if "dhcp_enabled" in ip_profile:
964 subnet["enable_dhcp"] = (
965 False
966 if ip_profile["dhcp_enabled"] == "false"
967 or ip_profile["dhcp_enabled"] is False
968 else True
969 )
970
971 if ip_profile.get("dhcp_start_address"):
972 subnet["allocation_pools"] = []
973 subnet["allocation_pools"].append(dict())
974 subnet["allocation_pools"][0]["start"] = ip_profile[
975 "dhcp_start_address"
976 ]
977
978 if ip_profile.get("dhcp_count"):
979 # parts = ip_profile["dhcp_start_address"].split(".")
980 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
981 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
982 ip_int += ip_profile["dhcp_count"] - 1
983 ip_str = str(netaddr.IPAddress(ip_int))
984 subnet["allocation_pools"][0]["end"] = ip_str
985
986 if (
987 ip_profile.get("ipv6_address_mode")
988 and ip_profile["ip_version"] != "IPv4"
989 ):
990 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
991 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
992 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
993 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
994
995 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
996 self.neutron.create_subnet({"subnet": subnet})
997
998 if net_type == "data" and self.config.get("multisegment_support"):
999 if self.config.get("l2gw_support"):
1000 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
1001 for l2gw in l2gw_list:
1002 l2gw_conn = {
1003 "l2_gateway_id": l2gw["id"],
1004 "network_id": new_net["network"]["id"],
1005 "segmentation_id": str(vlanID),
1006 }
1007 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
1008 {"l2_gateway_connection": l2gw_conn}
1009 )
1010 created_items[
1011 "l2gwconn:"
1012 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
1013 ] = True
1014
1015 return new_net["network"]["id"], created_items
1016 except Exception as e:
1017 # delete l2gw connections (if any) before deleting the network
1018 for k, v in created_items.items():
1019 if not v: # skip already deleted
1020 continue
1021
1022 try:
1023 k_item, _, k_id = k.partition(":")
1024
1025 if k_item == "l2gwconn":
1026 self.neutron.delete_l2_gateway_connection(k_id)
1027
1028 except (neExceptions.ConnectionFailed, ConnectionError) as e2:
1029 self.logger.error(
1030 "Error deleting l2 gateway connection: {}: {}".format(
1031 type(e2).__name__, e2
1032 )
1033 )
1034 self._format_exception(e2)
1035 except Exception as e2:
1036 self.logger.error(
1037 "Error deleting l2 gateway connection: {}: {}".format(
1038 type(e2).__name__, e2
1039 )
1040 )
1041
1042 if new_net:
1043 self.neutron.delete_network(new_net["network"]["id"])
1044
1045 self._format_exception(e)
1046
1047 def get_network_list(self, filter_dict={}):
1048 """Obtain tenant networks of VIM
1049 Filter_dict can be:
1050 name: network name
1051 id: network uuid
1052 shared: boolean
1053 tenant_id: tenant
1054 admin_state_up: boolean
1055 status: 'ACTIVE'
1056 Returns the network list of dictionaries
1057 """
1058 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1059 try:
1060 self._reload_connection()
1061 filter_dict_os = filter_dict.copy()
1062
1063 if self.api_version3 and "tenant_id" in filter_dict_os:
1064 # TODO check
1065 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1066
1067 net_dict = self.neutron.list_networks(**filter_dict_os)
1068 net_list = net_dict["networks"]
1069 self.__net_os2mano(net_list)
1070
1071 return net_list
1072 except (
1073 neExceptions.ConnectionFailed,
1074 ksExceptions.ClientException,
1075 neExceptions.NeutronException,
1076 ConnectionError,
1077 ) as e:
1078 self._format_exception(e)
1079
1080 def get_network(self, net_id):
1081 """Obtain details of network from VIM
1082 Returns the network information from a network id"""
1083 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1084 filter_dict = {"id": net_id}
1085 net_list = self.get_network_list(filter_dict)
1086
1087 if len(net_list) == 0:
1088 raise vimconn.VimConnNotFoundException(
1089 "Network '{}' not found".format(net_id)
1090 )
1091 elif len(net_list) > 1:
1092 raise vimconn.VimConnConflictException(
1093 "Found more than one network with this criteria"
1094 )
1095
1096 net = net_list[0]
1097 subnets = []
1098 for subnet_id in net.get("subnets", ()):
1099 try:
1100 subnet = self.neutron.show_subnet(subnet_id)
1101 except Exception as e:
1102 self.logger.error(
1103 "osconnector.get_network(): Error getting subnet %s %s"
1104 % (net_id, str(e))
1105 )
1106 subnet = {"id": subnet_id, "fault": str(e)}
1107
1108 subnets.append(subnet)
1109
1110 net["subnets"] = subnets
1111 net["encapsulation"] = net.get("provider:network_type")
1112 net["encapsulation_type"] = net.get("provider:network_type")
1113 net["segmentation_id"] = net.get("provider:segmentation_id")
1114 net["encapsulation_id"] = net.get("provider:segmentation_id")
1115
1116 return net
1117
1118 @catch_any_exception
1119 def delete_network(self, net_id, created_items=None):
1120 """
1121 Removes a tenant network from VIM and its associated elements
1122 :param net_id: VIM identifier of the network, provided by method new_network
1123 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1124 Returns the network identifier or raises an exception upon error or when network is not found
1125 """
1126 self.logger.debug("Deleting network '%s' from VIM", net_id)
1127
1128 if created_items is None:
1129 created_items = {}
1130
1131 try:
1132 self._reload_connection()
1133 # delete l2gw connections (if any) before deleting the network
1134 for k, v in created_items.items():
1135 if not v: # skip already deleted
1136 continue
1137
1138 try:
1139 k_item, _, k_id = k.partition(":")
1140 if k_item == "l2gwconn":
1141 self.neutron.delete_l2_gateway_connection(k_id)
1142
1143 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1144 self.logger.error(
1145 "Error deleting l2 gateway connection: {}: {}".format(
1146 type(e).__name__, e
1147 )
1148 )
1149 self._format_exception(e)
1150 except Exception as e:
1151 self.logger.error(
1152 "Error deleting l2 gateway connection: {}: {}".format(
1153 type(e).__name__, e
1154 )
1155 )
1156
1157 # delete VM ports attached to this networks before the network
1158 ports = self.neutron.list_ports(network_id=net_id)
1159 for p in ports["ports"]:
1160 try:
1161 self.neutron.delete_port(p["id"])
1162
1163 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1164 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1165 # If there is connection error, it raises.
1166 self._format_exception(e)
1167 except Exception as e:
1168 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1169
1170 self.neutron.delete_network(net_id)
1171
1172 return net_id
1173 except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
1174 # If network to be deleted is not found, it does not raise.
1175 self.logger.warning(
1176 f"Error deleting network: {net_id} is not found, {str(e)}"
1177 )
1178
1179 def refresh_nets_status(self, net_list):
1180 """Get the status of the networks
1181 Params: the list of network identifiers
1182 Returns a dictionary with:
1183 net_id: #VIM id of this network
1184 status: #Mandatory. Text with one of:
1185 # DELETED (not found at vim)
1186 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1187 # OTHER (Vim reported other status not understood)
1188 # ERROR (VIM indicates an ERROR status)
1189 # ACTIVE, INACTIVE, DOWN (admin down),
1190 # BUILD (on building process)
1191 #
1192 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1193 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1194 """
1195 net_dict = {}
1196
1197 for net_id in net_list:
1198 net = {}
1199
1200 try:
1201 net_vim = self.get_network(net_id)
1202
1203 if net_vim["status"] in netStatus2manoFormat:
1204 net["status"] = netStatus2manoFormat[net_vim["status"]]
1205 else:
1206 net["status"] = "OTHER"
1207 net["error_msg"] = "VIM status reported " + net_vim["status"]
1208
1209 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1210 net["status"] = "DOWN"
1211
1212 net["vim_info"] = self.serialize(net_vim)
1213
1214 if net_vim.get("fault"): # TODO
1215 net["error_msg"] = str(net_vim["fault"])
1216 except vimconn.VimConnNotFoundException as e:
1217 self.logger.error("Exception getting net status: %s", str(e))
1218 net["status"] = "DELETED"
1219 net["error_msg"] = str(e)
1220 except vimconn.VimConnException as e:
1221 self.logger.error("Exception getting net status: %s", str(e))
1222 net["status"] = "VIM_ERROR"
1223 net["error_msg"] = str(e)
1224 net_dict[net_id] = net
1225 return net_dict
1226
1227 def get_flavor(self, flavor_id):
1228 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1229 self.logger.debug("Getting flavor '%s'", flavor_id)
1230 try:
1231 self._reload_connection()
1232 flavor = self.nova.flavors.find(id=flavor_id)
1233 return flavor.to_dict()
1234
1235 except (
1236 nvExceptions.NotFound,
1237 nvExceptions.ClientException,
1238 ksExceptions.ClientException,
1239 ConnectionError,
1240 ) as e:
1241 self._format_exception(e)
1242
1243 def get_flavor_id_from_data(self, flavor_dict):
1244 """Obtain flavor id that match the flavor description
1245 Returns the flavor_id or raises a vimconnNotFoundException
1246 flavor_dict: contains the required ram, vcpus, disk
1247 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1248 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1249 vimconnNotFoundException is raised
1250 """
1251 exact_match = False if self.config.get("use_existing_flavors") else True
1252
1253 try:
1254 self._reload_connection()
1255 flavor_candidate_id = None
1256 flavor_candidate_data = (10000, 10000, 10000)
1257 flavor_target = (
1258 flavor_dict["ram"],
1259 flavor_dict["vcpus"],
1260 flavor_dict["disk"],
1261 flavor_dict.get("ephemeral", 0),
1262 flavor_dict.get("swap", 0),
1263 )
1264 # numa=None
1265 extended = flavor_dict.get("extended", {})
1266 if extended:
1267 # TODO
1268 raise vimconn.VimConnNotFoundException(
1269 "Flavor with EPA still not implemented"
1270 )
1271 # if len(numas) > 1:
1272 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1273 # numa=numas[0]
1274 # numas = extended.get("numas")
1275 for flavor in self.nova.flavors.list():
1276 epa = flavor.get_keys()
1277
1278 if epa:
1279 continue
1280 # TODO
1281
1282 flavor_data = (
1283 flavor.ram,
1284 flavor.vcpus,
1285 flavor.disk,
1286 flavor.ephemeral,
1287 flavor.swap if isinstance(flavor.swap, int) else 0,
1288 )
1289 if flavor_data == flavor_target:
1290 return flavor.id
1291 elif (
1292 not exact_match
1293 and flavor_target < flavor_data < flavor_candidate_data
1294 ):
1295 flavor_candidate_id = flavor.id
1296 flavor_candidate_data = flavor_data
1297
1298 if not exact_match and flavor_candidate_id:
1299 return flavor_candidate_id
1300
1301 raise vimconn.VimConnNotFoundException(
1302 "Cannot find any flavor matching '{}'".format(flavor_dict)
1303 )
1304 except (
1305 nvExceptions.NotFound,
1306 nvExceptions.BadRequest,
1307 nvExceptions.ClientException,
1308 ksExceptions.ClientException,
1309 ConnectionError,
1310 ) as e:
1311 self._format_exception(e)
1312
1313 @staticmethod
1314 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1315 """Process resource quota and fill up extra_specs.
1316 Args:
1317 quota (dict): Keeping the quota of resurces
1318 prefix (str) Prefix
1319 extra_specs (dict) Dict to be filled to be used during flavor creation
1320
1321 """
1322 if "limit" in quota:
1323 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1324
1325 if "reserve" in quota:
1326 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1327
1328 if "shares" in quota:
1329 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1330 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1331
1332 @staticmethod
1333 def process_numa_memory(
1334 numa: dict, node_id: Optional[int], extra_specs: dict
1335 ) -> None:
1336 """Set the memory in extra_specs.
1337 Args:
1338 numa (dict): A dictionary which includes numa information
1339 node_id (int): ID of numa node
1340 extra_specs (dict): To be filled.
1341
1342 """
1343 if not numa.get("memory"):
1344 return
1345 memory_mb = numa["memory"] * 1024
1346 memory = "hw:numa_mem.{}".format(node_id)
1347 extra_specs[memory] = int(memory_mb)
1348
1349 @staticmethod
1350 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1351 """Set the cpu in extra_specs.
1352 Args:
1353 numa (dict): A dictionary which includes numa information
1354 node_id (int): ID of numa node
1355 extra_specs (dict): To be filled.
1356
1357 """
1358 if not numa.get("vcpu"):
1359 return
1360 vcpu = numa["vcpu"]
1361 cpu = "hw:numa_cpus.{}".format(node_id)
1362 vcpu = ",".join(map(str, vcpu))
1363 extra_specs[cpu] = vcpu
1364
1365 @staticmethod
1366 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1367 """Fill up extra_specs if numa has paired-threads.
1368 Args:
1369 numa (dict): A dictionary which includes numa information
1370 extra_specs (dict): To be filled.
1371
1372 Returns:
1373 threads (int) Number of virtual cpus
1374
1375 """
1376 if not numa.get("paired-threads"):
1377 return
1378
1379 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1380 threads = numa["paired-threads"] * 2
1381 extra_specs["hw:cpu_thread_policy"] = "require"
1382 extra_specs["hw:cpu_policy"] = "dedicated"
1383 return threads
1384
1385 @staticmethod
1386 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1387 """Fill up extra_specs if numa has cores.
1388 Args:
1389 numa (dict): A dictionary which includes numa information
1390 extra_specs (dict): To be filled.
1391
1392 Returns:
1393 cores (int) Number of virtual cpus
1394
1395 """
1396 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1397 # architecture, or a non-SMT architecture will be emulated
1398 if not numa.get("cores"):
1399 return
1400 cores = numa["cores"]
1401 extra_specs["hw:cpu_thread_policy"] = "isolate"
1402 extra_specs["hw:cpu_policy"] = "dedicated"
1403 return cores
1404
1405 @staticmethod
1406 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1407 """Fill up extra_specs if numa has threads.
1408 Args:
1409 numa (dict): A dictionary which includes numa information
1410 extra_specs (dict): To be filled.
1411
1412 Returns:
1413 threads (int) Number of virtual cpus
1414
1415 """
1416 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1417 if not numa.get("threads"):
1418 return
1419 threads = numa["threads"]
1420 extra_specs["hw:cpu_thread_policy"] = "prefer"
1421 extra_specs["hw:cpu_policy"] = "dedicated"
1422 return threads
1423
1424 def _process_numa_parameters_of_flavor(
1425 self, numas: List, extra_specs: Dict
1426 ) -> None:
1427 """Process numa parameters and fill up extra_specs.
1428
1429 Args:
1430 numas (list): List of dictionary which includes numa information
1431 extra_specs (dict): To be filled.
1432
1433 """
1434 numa_nodes = len(numas)
1435 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1436 cpu_cores, cpu_threads = 0, 0
1437
1438 if self.vim_type == "VIO":
1439 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1440
1441 for numa in numas:
1442 if "id" in numa:
1443 node_id = numa["id"]
1444 # overwrite ram and vcpus
1445 # check if key "memory" is present in numa else use ram value at flavor
1446 self.process_numa_memory(numa, node_id, extra_specs)
1447 self.process_numa_vcpu(numa, node_id, extra_specs)
1448
1449 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1450 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1451
1452 if "paired-threads" in numa:
1453 threads = self.process_numa_paired_threads(numa, extra_specs)
1454 cpu_threads += threads
1455
1456 elif "cores" in numa:
1457 cores = self.process_numa_cores(numa, extra_specs)
1458 cpu_cores += cores
1459
1460 elif "threads" in numa:
1461 threads = self.process_numa_threads(numa, extra_specs)
1462 cpu_threads += threads
1463
1464 if cpu_cores:
1465 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1466 if cpu_threads:
1467 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1468
1469 @staticmethod
1470 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1471 """According to number of numa nodes, updates the extra_specs for VIO.
1472
1473 Args:
1474
1475 numa_nodes (int): List keeps the numa node numbers
1476 extra_specs (dict): Extra specs dict to be updated
1477
1478 """
1479 # If there are several numas, we do not define specific affinity.
1480 extra_specs["vmware:latency_sensitivity_level"] = "high"
1481
1482 def _change_flavor_name(
1483 self, name: str, name_suffix: int, flavor_data: dict
1484 ) -> str:
1485 """Change the flavor name if the name already exists.
1486
1487 Args:
1488 name (str): Flavor name to be checked
1489 name_suffix (int): Suffix to be appended to name
1490 flavor_data (dict): Flavor dict
1491
1492 Returns:
1493 name (str): New flavor name to be used
1494
1495 """
1496 # Get used names
1497 fl = self.nova.flavors.list()
1498 fl_names = [f.name for f in fl]
1499
1500 while name in fl_names:
1501 name_suffix += 1
1502 name = flavor_data["name"] + "-" + str(name_suffix)
1503
1504 return name
1505
1506 def _process_extended_config_of_flavor(
1507 self, extended: dict, extra_specs: dict
1508 ) -> None:
1509 """Process the extended dict to fill up extra_specs.
1510 Args:
1511
1512 extended (dict): Keeping the extra specification of flavor
1513 extra_specs (dict) Dict to be filled to be used during flavor creation
1514
1515 """
1516 quotas = {
1517 "cpu-quota": "cpu",
1518 "mem-quota": "memory",
1519 "vif-quota": "vif",
1520 "disk-io-quota": "disk_io",
1521 }
1522
1523 page_sizes = {
1524 "LARGE": "large",
1525 "SMALL": "small",
1526 "SIZE_2MB": "2MB",
1527 "SIZE_1GB": "1GB",
1528 "PREFER_LARGE": "any",
1529 }
1530
1531 policies = {
1532 "cpu-pinning-policy": "hw:cpu_policy",
1533 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1534 "mem-policy": "hw:numa_mempolicy",
1535 }
1536
1537 numas = extended.get("numas")
1538 if numas:
1539 self._process_numa_parameters_of_flavor(numas, extra_specs)
1540
1541 for quota, item in quotas.items():
1542 if quota in extended.keys():
1543 self.process_resource_quota(extended.get(quota), item, extra_specs)
1544
1545 # Set the mempage size as specified in the descriptor
1546 if extended.get("mempage-size"):
1547 if extended["mempage-size"] in page_sizes.keys():
1548 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1549 else:
1550 # Normally, validations in NBI should not allow to this condition.
1551 self.logger.debug(
1552 "Invalid mempage-size %s. Will be ignored",
1553 extended.get("mempage-size"),
1554 )
1555
1556 for policy, hw_policy in policies.items():
1557 if extended.get(policy):
1558 extra_specs[hw_policy] = extended[policy].lower()
1559
1560 @staticmethod
1561 def _get_flavor_details(flavor_data: dict) -> Tuple:
1562 """Returns the details of flavor
1563 Args:
1564 flavor_data (dict): Dictionary that includes required flavor details
1565
1566 Returns:
1567 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1568
1569 """
1570 return (
1571 flavor_data.get("ram", 64),
1572 flavor_data.get("vcpus", 1),
1573 {},
1574 flavor_data.get("extended"),
1575 )
1576
1577 @catch_any_exception
1578 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1579 """Adds a tenant flavor to openstack VIM.
1580 if change_name_if_used is True, it will change name in case of conflict,
1581 because it is not supported name repetition.
1582
1583 Args:
1584 flavor_data (dict): Flavor details to be processed
1585 change_name_if_used (bool): Change name in case of conflict
1586
1587 Returns:
1588 flavor_id (str): flavor identifier
1589
1590 """
1591 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1592 retry = 0
1593 max_retries = 3
1594 name_suffix = 0
1595 name = flavor_data["name"]
1596 while retry < max_retries:
1597 retry += 1
1598 try:
1599 self._reload_connection()
1600
1601 if change_name_if_used:
1602 name = self._change_flavor_name(name, name_suffix, flavor_data)
1603
1604 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1605 flavor_data
1606 )
1607 if extended:
1608 self._process_extended_config_of_flavor(extended, extra_specs)
1609
1610 # Create flavor
1611
1612 new_flavor = self.nova.flavors.create(
1613 name=name,
1614 ram=ram,
1615 vcpus=vcpus,
1616 disk=flavor_data.get("disk", 0),
1617 ephemeral=flavor_data.get("ephemeral", 0),
1618 swap=flavor_data.get("swap", 0),
1619 is_public=flavor_data.get("is_public", True),
1620 )
1621
1622 # Add metadata
1623 if extra_specs:
1624 new_flavor.set_keys(extra_specs)
1625
1626 return new_flavor.id
1627
1628 except nvExceptions.Conflict as e:
1629 if change_name_if_used and retry < max_retries:
1630 continue
1631
1632 self._format_exception(e)
1633
1634 @catch_any_exception
1635 def delete_flavor(self, flavor_id):
1636 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1637 try:
1638 self._reload_connection()
1639 self.nova.flavors.delete(flavor_id)
1640 return flavor_id
1641
1642 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
1643 # If flavor is not found, it does not raise.
1644 self.logger.warning(
1645 f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
1646 )
1647
1648 def new_image(self, image_dict):
1649 """
1650 Adds a tenant image to VIM. imge_dict is a dictionary with:
1651 name: name
1652 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1653 location: path or URI
1654 public: "yes" or "no"
1655 metadata: metadata of the image
1656 Returns the image_id
1657 """
1658 retry = 0
1659 max_retries = 3
1660
1661 while retry < max_retries:
1662 retry += 1
1663 try:
1664 self._reload_connection()
1665
1666 # determine format http://docs.openstack.org/developer/glance/formats.html
1667 if "disk_format" in image_dict:
1668 disk_format = image_dict["disk_format"]
1669 else: # autodiscover based on extension
1670 if image_dict["location"].endswith(".qcow2"):
1671 disk_format = "qcow2"
1672 elif image_dict["location"].endswith(".vhd"):
1673 disk_format = "vhd"
1674 elif image_dict["location"].endswith(".vmdk"):
1675 disk_format = "vmdk"
1676 elif image_dict["location"].endswith(".vdi"):
1677 disk_format = "vdi"
1678 elif image_dict["location"].endswith(".iso"):
1679 disk_format = "iso"
1680 elif image_dict["location"].endswith(".aki"):
1681 disk_format = "aki"
1682 elif image_dict["location"].endswith(".ari"):
1683 disk_format = "ari"
1684 elif image_dict["location"].endswith(".ami"):
1685 disk_format = "ami"
1686 else:
1687 disk_format = "raw"
1688
1689 self.logger.debug(
1690 "new_image: '%s' loading from '%s'",
1691 image_dict["name"],
1692 image_dict["location"],
1693 )
1694 if self.vim_type == "VIO":
1695 container_format = "bare"
1696 if "container_format" in image_dict:
1697 container_format = image_dict["container_format"]
1698
1699 new_image = self.glance.images.create(
1700 name=image_dict["name"],
1701 container_format=container_format,
1702 disk_format=disk_format,
1703 )
1704 else:
1705 new_image = self.glance.images.create(name=image_dict["name"])
1706
1707 if image_dict["location"].startswith("http"):
1708 # TODO there is not a method to direct download. It must be downloaded locally with requests
1709 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1710 else: # local path
1711 with open(image_dict["location"]) as fimage:
1712 self.glance.images.upload(new_image.id, fimage)
1713 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1714 # image_dict.get("public","yes")=="yes",
1715 # container_format="bare", data=fimage, disk_format=disk_format)
1716
1717 metadata_to_load = image_dict.get("metadata")
1718
1719 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1720 # for openstack
1721 if self.vim_type == "VIO":
1722 metadata_to_load["upload_location"] = image_dict["location"]
1723 else:
1724 metadata_to_load["location"] = image_dict["location"]
1725
1726 self.glance.images.update(new_image.id, **metadata_to_load)
1727
1728 return new_image.id
1729 except (
1730 HTTPException,
1731 gl1Exceptions.HTTPException,
1732 gl1Exceptions.CommunicationError,
1733 ConnectionError,
1734 ) as e:
1735 if retry == max_retries:
1736 continue
1737
1738 self._format_exception(e)
1739 except IOError as e: # can not open the file
1740 raise vimconn.VimConnConnectionException(
1741 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1742 http_code=vimconn.HTTP_Bad_Request,
1743 )
1744 except Exception as e:
1745 self._format_exception(e)
1746
1747 @catch_any_exception
1748 def delete_image(self, image_id):
1749 """Deletes a tenant image from openstack VIM. Returns the old id"""
1750 try:
1751 self._reload_connection()
1752 self.glance.images.delete(image_id)
1753
1754 return image_id
1755 except gl1Exceptions.NotFound as e:
1756 # If image is not found, it does not raise.
1757 self.logger.warning(
1758 f"Error deleting image: {image_id} is not found, {str(e)}"
1759 )
1760
1761 @catch_any_exception
1762 def get_image_id_from_path(self, path):
1763 """Get the image id from image path in the VIM database. Returns the image_id"""
1764 self._reload_connection()
1765 images = self.glance.images.list()
1766
1767 for image in images:
1768 if image.metadata.get("location") == path:
1769 return image.id
1770
1771 raise vimconn.VimConnNotFoundException(
1772 "image with location '{}' not found".format(path)
1773 )
1774
1775 def get_image_list(self, filter_dict={}):
1776 """Obtain tenant images from VIM
1777 Filter_dict can be:
1778 id: image id
1779 name: image name
1780 checksum: image checksum
1781 Returns the image list of dictionaries:
1782 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1783 List can be empty
1784 """
1785 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1786 try:
1787 self._reload_connection()
1788 # filter_dict_os = filter_dict.copy()
1789 # First we filter by the available filter fields: name, id. The others are removed.
1790 image_list = self.glance.images.list()
1791 filtered_list = []
1792
1793 for image in image_list:
1794 try:
1795 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1796 continue
1797
1798 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1799 continue
1800
1801 if (
1802 filter_dict.get("checksum")
1803 and image["checksum"] != filter_dict["checksum"]
1804 ):
1805 continue
1806
1807 filtered_list.append(image.copy())
1808 except gl1Exceptions.HTTPNotFound:
1809 pass
1810
1811 return filtered_list
1812
1813 except (
1814 ksExceptions.ClientException,
1815 nvExceptions.ClientException,
1816 gl1Exceptions.CommunicationError,
1817 ConnectionError,
1818 ) as e:
1819 self._format_exception(e)
1820
1821 def __wait_for_vm(self, vm_id, status):
1822 """wait until vm is in the desired status and return True.
1823 If the VM gets in ERROR status, return false.
1824 If the timeout is reached generate an exception"""
1825 elapsed_time = 0
1826 while elapsed_time < server_timeout:
1827 vm_status = self.nova.servers.get(vm_id).status
1828
1829 if vm_status == status:
1830 return True
1831
1832 if vm_status == "ERROR":
1833 return False
1834
1835 time.sleep(5)
1836 elapsed_time += 5
1837
1838 # if we exceeded the timeout rollback
1839 if elapsed_time >= server_timeout:
1840 raise vimconn.VimConnException(
1841 "Timeout waiting for instance " + vm_id + " to get " + status,
1842 http_code=vimconn.HTTP_Request_Timeout,
1843 )
1844
1845 def _get_openstack_availablity_zones(self):
1846 """
1847 Get from openstack availability zones available
1848 :return:
1849 """
1850 try:
1851 openstack_availability_zone = self.nova.availability_zones.list()
1852 openstack_availability_zone = [
1853 str(zone.zoneName)
1854 for zone in openstack_availability_zone
1855 if zone.zoneName != "internal"
1856 ]
1857
1858 return openstack_availability_zone
1859 except Exception:
1860 return None
1861
1862 def _set_availablity_zones(self):
1863 """
1864 Set vim availablity zone
1865 :return:
1866 """
1867 if "availability_zone" in self.config:
1868 vim_availability_zones = self.config.get("availability_zone")
1869
1870 if isinstance(vim_availability_zones, str):
1871 self.availability_zone = [vim_availability_zones]
1872 elif isinstance(vim_availability_zones, list):
1873 self.availability_zone = vim_availability_zones
1874 else:
1875 self.availability_zone = self._get_openstack_availablity_zones()
1876
1877 def _get_vm_availability_zone(
1878 self, availability_zone_index, availability_zone_list
1879 ):
1880 """
1881 Return thge availability zone to be used by the created VM.
1882 :return: The VIM availability zone to be used or None
1883 """
1884 if availability_zone_index is None:
1885 if not self.config.get("availability_zone"):
1886 return None
1887 elif isinstance(self.config.get("availability_zone"), str):
1888 return self.config["availability_zone"]
1889 else:
1890 # TODO consider using a different parameter at config for default AV and AV list match
1891 return self.config["availability_zone"][0]
1892
1893 vim_availability_zones = self.availability_zone
1894 # check if VIM offer enough availability zones describe in the VNFD
1895 if vim_availability_zones and len(availability_zone_list) <= len(
1896 vim_availability_zones
1897 ):
1898 # check if all the names of NFV AV match VIM AV names
1899 match_by_index = False
1900 for av in availability_zone_list:
1901 if av not in vim_availability_zones:
1902 match_by_index = True
1903 break
1904
1905 if match_by_index:
1906 return vim_availability_zones[availability_zone_index]
1907 else:
1908 return availability_zone_list[availability_zone_index]
1909 else:
1910 raise vimconn.VimConnConflictException(
1911 "No enough availability zones at VIM for this deployment"
1912 )
1913
1914 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1915 """Fill up the security_groups in the port_dict.
1916
1917 Args:
1918 net (dict): Network details
1919 port_dict (dict): Port details
1920
1921 """
1922 if (
1923 self.config.get("security_groups")
1924 and net.get("port_security") is not False
1925 and not self.config.get("no_port_security_extension")
1926 ):
1927 if not self.security_groups_id:
1928 self._get_ids_from_name()
1929
1930 port_dict["security_groups"] = self.security_groups_id
1931
1932 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1933 """Fill up the network binding depending on network type in the port_dict.
1934
1935 Args:
1936 net (dict): Network details
1937 port_dict (dict): Port details
1938
1939 """
1940 if not net.get("type"):
1941 raise vimconn.VimConnException("Type is missing in the network details.")
1942
1943 if net["type"] == "virtual":
1944 pass
1945
1946 # For VF
1947 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1948 port_dict["binding:vnic_type"] = "direct"
1949
1950 # VIO specific Changes
1951 if self.vim_type == "VIO":
1952 # Need to create port with port_security_enabled = False and no-security-groups
1953 port_dict["port_security_enabled"] = False
1954 port_dict["provider_security_groups"] = []
1955 port_dict["security_groups"] = []
1956
1957 else:
1958 # For PT PCI-PASSTHROUGH
1959 port_dict["binding:vnic_type"] = "direct-physical"
1960
1961 @staticmethod
1962 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1963 """Set the "ip" parameter in net dictionary.
1964
1965 Args:
1966 new_port (dict): New created port
1967 net (dict): Network details
1968
1969 """
1970 fixed_ips = new_port["port"].get("fixed_ips")
1971
1972 if fixed_ips:
1973 net["ip"] = fixed_ips[0].get("ip_address")
1974 else:
1975 net["ip"] = None
1976
1977 @staticmethod
1978 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1979 """Fill up the mac_address and fixed_ips in port_dict.
1980
1981 Args:
1982 net (dict): Network details
1983 port_dict (dict): Port details
1984
1985 """
1986 if net.get("mac_address"):
1987 port_dict["mac_address"] = net["mac_address"]
1988
1989 ip_dual_list = []
1990 if ip_list := net.get("ip_address"):
1991 if not isinstance(ip_list, list):
1992 ip_list = [ip_list]
1993 for ip in ip_list:
1994 ip_dict = {"ip_address": ip}
1995 ip_dual_list.append(ip_dict)
1996 port_dict["fixed_ips"] = ip_dual_list
1997 # TODO add "subnet_id": <subnet_id>
1998
1999 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
2000 """Create new port using neutron.
2001
2002 Args:
2003 port_dict (dict): Port details
2004 created_items (dict): All created items
2005 net (dict): Network details
2006
2007 Returns:
2008 new_port (dict): New created port
2009
2010 """
2011 new_port = self.neutron.create_port({"port": port_dict})
2012 created_items["port:" + str(new_port["port"]["id"])] = True
2013 net["mac_address"] = new_port["port"]["mac_address"]
2014 net["vim_id"] = new_port["port"]["id"]
2015
2016 return new_port
2017
2018 def _create_port(
2019 self, net: dict, name: str, created_items: dict
2020 ) -> Tuple[dict, dict]:
2021 """Create port using net details.
2022
2023 Args:
2024 net (dict): Network details
2025 name (str): Name to be used as network name if net dict does not include name
2026 created_items (dict): All created items
2027
2028 Returns:
2029 new_port, port New created port, port dictionary
2030
2031 """
2032
2033 port_dict = {
2034 "network_id": net["net_id"],
2035 "name": net.get("name"),
2036 "admin_state_up": True,
2037 }
2038
2039 if not port_dict["name"]:
2040 port_dict["name"] = name
2041
2042 self._prepare_port_dict_security_groups(net, port_dict)
2043
2044 self._prepare_port_dict_binding(net, port_dict)
2045
2046 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2047
2048 new_port = self._create_new_port(port_dict, created_items, net)
2049
2050 vimconnector._set_fixed_ip(new_port, net)
2051
2052 port = {"port-id": new_port["port"]["id"]}
2053
2054 if float(self.nova.api_version.get_string()) >= 2.32:
2055 port["tag"] = new_port["port"]["name"]
2056
2057 return new_port, port
2058
2059 def _prepare_network_for_vminstance(
2060 self,
2061 name: str,
2062 net_list: list,
2063 created_items: dict,
2064 net_list_vim: list,
2065 external_network: list,
2066 no_secured_ports: list,
2067 ) -> None:
2068 """Create port and fill up net dictionary for new VM instance creation.
2069
2070 Args:
2071 name (str): Name of network
2072 net_list (list): List of networks
2073 created_items (dict): All created items belongs to a VM
2074 net_list_vim (list): List of ports
2075 external_network (list): List of external-networks
2076 no_secured_ports (list): Port security disabled ports
2077 """
2078
2079 self._reload_connection()
2080
2081 for net in net_list:
2082 # Skip non-connected iface
2083 if not net.get("net_id"):
2084 continue
2085
2086 new_port, port = self._create_port(net, name, created_items)
2087
2088 net_list_vim.append(port)
2089
2090 if net.get("floating_ip", False):
2091 net["exit_on_floating_ip_error"] = True
2092 external_network.append(net)
2093
2094 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2095 net["exit_on_floating_ip_error"] = False
2096 external_network.append(net)
2097 net["floating_ip"] = self.config.get("use_floating_ip")
2098
2099 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2100 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2101 if net.get("port_security") is False and not self.config.get(
2102 "no_port_security_extension"
2103 ):
2104 no_secured_ports.append(
2105 (
2106 new_port["port"]["id"],
2107 net.get("port_security_disable_strategy"),
2108 )
2109 )
2110
2111 def _prepare_persistent_root_volumes(
2112 self,
2113 name: str,
2114 vm_av_zone: list,
2115 disk: dict,
2116 base_disk_index: int,
2117 block_device_mapping: dict,
2118 existing_vim_volumes: list,
2119 created_items: dict,
2120 ) -> Optional[str]:
2121 """Prepare persistent root volumes for new VM instance.
2122
2123 Args:
2124 name (str): Name of VM instance
2125 vm_av_zone (list): List of availability zones
2126 disk (dict): Disk details
2127 base_disk_index (int): Disk index
2128 block_device_mapping (dict): Block device details
2129 existing_vim_volumes (list): Existing disk details
2130 created_items (dict): All created items belongs to VM
2131
2132 Returns:
2133 boot_volume_id (str): ID of boot volume
2134
2135 """
2136 # Disk may include only vim_volume_id or only vim_id."
2137 # Use existing persistent root volume finding with volume_id or vim_id
2138 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2139
2140 if disk.get(key_id):
2141 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2142 existing_vim_volumes.append({"id": disk[key_id]})
2143
2144 else:
2145 # Create persistent root volume
2146 volume = self.cinder.volumes.create(
2147 size=disk["size"],
2148 name=name + "vd" + chr(base_disk_index),
2149 imageRef=disk["image_id"],
2150 # Make sure volume is in the same AZ as the VM to be attached to
2151 availability_zone=vm_av_zone,
2152 )
2153 boot_volume_id = volume.id
2154 self.update_block_device_mapping(
2155 volume=volume,
2156 block_device_mapping=block_device_mapping,
2157 base_disk_index=base_disk_index,
2158 disk=disk,
2159 created_items=created_items,
2160 )
2161
2162 return boot_volume_id
2163
2164 @staticmethod
2165 def update_block_device_mapping(
2166 volume: object,
2167 block_device_mapping: dict,
2168 base_disk_index: int,
2169 disk: dict,
2170 created_items: dict,
2171 ) -> None:
2172 """Add volume information to block device mapping dict.
2173 Args:
2174 volume (object): Created volume object
2175 block_device_mapping (dict): Block device details
2176 base_disk_index (int): Disk index
2177 disk (dict): Disk details
2178 created_items (dict): All created items belongs to VM
2179 """
2180 if not volume:
2181 raise vimconn.VimConnException("Volume is empty.")
2182
2183 if not hasattr(volume, "id"):
2184 raise vimconn.VimConnException(
2185 "Created volume is not valid, does not have id attribute."
2186 )
2187
2188 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2189 if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
2190 return
2191 volume_txt = "volume:" + str(volume.id)
2192 if disk.get("keep"):
2193 volume_txt += ":keep"
2194 created_items[volume_txt] = True
2195
2196 @catch_any_exception
2197 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2198 volume = self.cinder.volumes.create(
2199 size=shared_volume_data["size"],
2200 name=shared_volume_data["name"],
2201 volume_type="multiattach",
2202 )
2203 return volume.name, volume.id
2204
2205 def _prepare_shared_volumes(
2206 self,
2207 name: str,
2208 disk: dict,
2209 base_disk_index: int,
2210 block_device_mapping: dict,
2211 existing_vim_volumes: list,
2212 created_items: dict,
2213 ):
2214 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2215 if volumes.get(disk["name"]):
2216 sv_id = volumes[disk["name"]]
2217 max_retries = 3
2218 vol_status = ""
2219 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2220 while max_retries:
2221 max_retries -= 1
2222 volume = self.cinder.volumes.get(sv_id)
2223 vol_status = volume.status
2224 if volume.status not in ("in-use", "available"):
2225 time.sleep(5)
2226 continue
2227 self.update_block_device_mapping(
2228 volume=volume,
2229 block_device_mapping=block_device_mapping,
2230 base_disk_index=base_disk_index,
2231 disk=disk,
2232 created_items=created_items,
2233 )
2234 return
2235 raise vimconn.VimConnException(
2236 "Shared volume is not prepared, status is: {}".format(vol_status),
2237 http_code=vimconn.HTTP_Internal_Server_Error,
2238 )
2239
2240 def _prepare_non_root_persistent_volumes(
2241 self,
2242 name: str,
2243 disk: dict,
2244 vm_av_zone: list,
2245 block_device_mapping: dict,
2246 base_disk_index: int,
2247 existing_vim_volumes: list,
2248 created_items: dict,
2249 ) -> None:
2250 """Prepare persistent volumes for new VM instance.
2251
2252 Args:
2253 name (str): Name of VM instance
2254 disk (dict): Disk details
2255 vm_av_zone (list): List of availability zones
2256 block_device_mapping (dict): Block device details
2257 base_disk_index (int): Disk index
2258 existing_vim_volumes (list): Existing disk details
2259 created_items (dict): All created items belongs to VM
2260 """
2261 # Non-root persistent volumes
2262 # Disk may include only vim_volume_id or only vim_id."
2263 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2264 if disk.get(key_id):
2265 # Use existing persistent volume
2266 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2267 existing_vim_volumes.append({"id": disk[key_id]})
2268 else:
2269 volume_name = f"{name}vd{chr(base_disk_index)}"
2270 volume = self.cinder.volumes.create(
2271 size=disk["size"],
2272 name=volume_name,
2273 # Make sure volume is in the same AZ as the VM to be attached to
2274 availability_zone=vm_av_zone,
2275 )
2276 self.update_block_device_mapping(
2277 volume=volume,
2278 block_device_mapping=block_device_mapping,
2279 base_disk_index=base_disk_index,
2280 disk=disk,
2281 created_items=created_items,
2282 )
2283
2284 def _wait_for_created_volumes_availability(
2285 self, elapsed_time: int, created_items: dict
2286 ) -> Optional[int]:
2287 """Wait till created volumes become available.
2288
2289 Args:
2290 elapsed_time (int): Passed time while waiting
2291 created_items (dict): All created items belongs to VM
2292
2293 Returns:
2294 elapsed_time (int): Time spent while waiting
2295
2296 """
2297 while elapsed_time < volume_timeout:
2298 for created_item in created_items:
2299 v, volume_id = (
2300 created_item.split(":")[0],
2301 created_item.split(":")[1],
2302 )
2303 if v == "volume":
2304 volume = self.cinder.volumes.get(volume_id)
2305 if (
2306 volume.volume_type == "multiattach"
2307 and volume.status == "in-use"
2308 ):
2309 return elapsed_time
2310 elif volume.status != "available":
2311 break
2312 else:
2313 # All ready: break from while
2314 break
2315
2316 time.sleep(5)
2317 elapsed_time += 5
2318
2319 return elapsed_time
2320
2321 def _wait_for_existing_volumes_availability(
2322 self, elapsed_time: int, existing_vim_volumes: list
2323 ) -> Optional[int]:
2324 """Wait till existing volumes become available.
2325
2326 Args:
2327 elapsed_time (int): Passed time while waiting
2328 existing_vim_volumes (list): Existing volume details
2329
2330 Returns:
2331 elapsed_time (int): Time spent while waiting
2332
2333 """
2334
2335 while elapsed_time < volume_timeout:
2336 for volume in existing_vim_volumes:
2337 v = self.cinder.volumes.get(volume["id"])
2338 if v.volume_type == "multiattach" and v.status == "in-use":
2339 return elapsed_time
2340 elif v.status != "available":
2341 break
2342 else: # all ready: break from while
2343 break
2344
2345 time.sleep(5)
2346 elapsed_time += 5
2347
2348 return elapsed_time
2349
2350 def _prepare_disk_for_vminstance(
2351 self,
2352 name: str,
2353 existing_vim_volumes: list,
2354 created_items: dict,
2355 vm_av_zone: list,
2356 block_device_mapping: dict,
2357 disk_list: list = None,
2358 ) -> None:
2359 """Prepare all volumes for new VM instance.
2360
2361 Args:
2362 name (str): Name of Instance
2363 existing_vim_volumes (list): List of existing volumes
2364 created_items (dict): All created items belongs to VM
2365 vm_av_zone (list): VM availability zone
2366 block_device_mapping (dict): Block devices to be attached to VM
2367 disk_list (list): List of disks
2368
2369 """
2370 # Create additional volumes in case these are present in disk_list
2371 base_disk_index = ord("b")
2372 boot_volume_id = None
2373 elapsed_time = 0
2374 for disk in disk_list:
2375 if "image_id" in disk:
2376 # Root persistent volume
2377 base_disk_index = ord("a")
2378 boot_volume_id = self._prepare_persistent_root_volumes(
2379 name=name,
2380 vm_av_zone=vm_av_zone,
2381 disk=disk,
2382 base_disk_index=base_disk_index,
2383 block_device_mapping=block_device_mapping,
2384 existing_vim_volumes=existing_vim_volumes,
2385 created_items=created_items,
2386 )
2387 elif disk.get("multiattach"):
2388 self._prepare_shared_volumes(
2389 name=name,
2390 disk=disk,
2391 base_disk_index=base_disk_index,
2392 block_device_mapping=block_device_mapping,
2393 existing_vim_volumes=existing_vim_volumes,
2394 created_items=created_items,
2395 )
2396 else:
2397 # Non-root persistent volume
2398 self._prepare_non_root_persistent_volumes(
2399 name=name,
2400 disk=disk,
2401 vm_av_zone=vm_av_zone,
2402 block_device_mapping=block_device_mapping,
2403 base_disk_index=base_disk_index,
2404 existing_vim_volumes=existing_vim_volumes,
2405 created_items=created_items,
2406 )
2407 base_disk_index += 1
2408
2409 # Wait until created volumes are with status available
2410 elapsed_time = self._wait_for_created_volumes_availability(
2411 elapsed_time, created_items
2412 )
2413 # Wait until existing volumes in vim are with status available
2414 elapsed_time = self._wait_for_existing_volumes_availability(
2415 elapsed_time, existing_vim_volumes
2416 )
2417 # If we exceeded the timeout rollback
2418 if elapsed_time >= volume_timeout:
2419 raise vimconn.VimConnException(
2420 "Timeout creating volumes for instance " + name,
2421 http_code=vimconn.HTTP_Request_Timeout,
2422 )
2423 if boot_volume_id:
2424 self.cinder.volumes.set_bootable(boot_volume_id, True)
2425
2426 def _find_the_external_network_for_floating_ip(self):
2427 """Get the external network ip in order to create floating IP.
2428
2429 Returns:
2430 pool_id (str): External network pool ID
2431
2432 """
2433
2434 # Find the external network
2435 external_nets = list()
2436
2437 for net in self.neutron.list_networks()["networks"]:
2438 if net["router:external"]:
2439 external_nets.append(net)
2440
2441 if len(external_nets) == 0:
2442 raise vimconn.VimConnException(
2443 "Cannot create floating_ip automatically since "
2444 "no external network is present",
2445 http_code=vimconn.HTTP_Conflict,
2446 )
2447
2448 if len(external_nets) > 1:
2449 raise vimconn.VimConnException(
2450 "Cannot create floating_ip automatically since "
2451 "multiple external networks are present",
2452 http_code=vimconn.HTTP_Conflict,
2453 )
2454
2455 # Pool ID
2456 return external_nets[0].get("id")
2457
2458 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2459 """Trigger neutron to create a new floating IP using external network ID.
2460
2461 Args:
2462 param (dict): Input parameters to create a floating IP
2463 created_items (dict): All created items belongs to new VM instance
2464
2465 Raises:
2466
2467 VimConnException
2468 """
2469 try:
2470 self.logger.debug("Creating floating IP")
2471 new_floating_ip = self.neutron.create_floatingip(param)
2472 free_floating_ip = new_floating_ip["floatingip"]["id"]
2473 created_items["floating_ip:" + str(free_floating_ip)] = True
2474
2475 except Exception as e:
2476 raise vimconn.VimConnException(
2477 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2478 http_code=vimconn.HTTP_Conflict,
2479 )
2480
2481 def _create_floating_ip(
2482 self, floating_network: dict, server: object, created_items: dict
2483 ) -> None:
2484 """Get the available Pool ID and create a new floating IP.
2485
2486 Args:
2487 floating_network (dict): Dict including external network ID
2488 server (object): Server object
2489 created_items (dict): All created items belongs to new VM instance
2490
2491 """
2492
2493 # Pool_id is available
2494 if (
2495 isinstance(floating_network["floating_ip"], str)
2496 and floating_network["floating_ip"].lower() != "true"
2497 ):
2498 pool_id = floating_network["floating_ip"]
2499
2500 # Find the Pool_id
2501 else:
2502 pool_id = self._find_the_external_network_for_floating_ip()
2503
2504 param = {
2505 "floatingip": {
2506 "floating_network_id": pool_id,
2507 "tenant_id": server.tenant_id,
2508 }
2509 }
2510
2511 self._neutron_create_float_ip(param, created_items)
2512
2513 def _find_floating_ip(
2514 self,
2515 server: object,
2516 floating_ips: list,
2517 floating_network: dict,
2518 ) -> Optional[str]:
2519 """Find the available free floating IPs if there are.
2520
2521 Args:
2522 server (object): Server object
2523 floating_ips (list): List of floating IPs
2524 floating_network (dict): Details of floating network such as ID
2525
2526 Returns:
2527 free_floating_ip (str): Free floating ip address
2528
2529 """
2530 for fip in floating_ips:
2531 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2532 continue
2533
2534 if isinstance(floating_network["floating_ip"], str):
2535 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2536 continue
2537
2538 return fip["id"]
2539
2540 def _assign_floating_ip(
2541 self, free_floating_ip: str, floating_network: dict
2542 ) -> Dict:
2543 """Assign the free floating ip address to port.
2544
2545 Args:
2546 free_floating_ip (str): Floating IP to be assigned
2547 floating_network (dict): ID of floating network
2548
2549 Returns:
2550 fip (dict) (dict): Floating ip details
2551
2552 """
2553 # The vim_id key contains the neutron.port_id
2554 self.neutron.update_floatingip(
2555 free_floating_ip,
2556 {"floatingip": {"port_id": floating_network["vim_id"]}},
2557 )
2558 # For race condition ensure not re-assigned to other VM after 5 seconds
2559 time.sleep(5)
2560
2561 return self.neutron.show_floatingip(free_floating_ip)
2562
2563 def _get_free_floating_ip(
2564 self, server: object, floating_network: dict
2565 ) -> Optional[str]:
2566 """Get the free floating IP address.
2567
2568 Args:
2569 server (object): Server Object
2570 floating_network (dict): Floating network details
2571
2572 Returns:
2573 free_floating_ip (str): Free floating ip addr
2574
2575 """
2576
2577 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2578
2579 # Randomize
2580 random.shuffle(floating_ips)
2581
2582 return self._find_floating_ip(server, floating_ips, floating_network)
2583
2584 def _prepare_external_network_for_vminstance(
2585 self,
2586 external_network: list,
2587 server: object,
2588 created_items: dict,
2589 vm_start_time: float,
2590 ) -> None:
2591 """Assign floating IP address for VM instance.
2592
2593 Args:
2594 external_network (list): ID of External network
2595 server (object): Server Object
2596 created_items (dict): All created items belongs to new VM instance
2597 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2598
2599 Raises:
2600 VimConnException
2601
2602 """
2603 for floating_network in external_network:
2604 try:
2605 assigned = False
2606 floating_ip_retries = 3
2607 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2608 # several times
2609 while not assigned:
2610 free_floating_ip = self._get_free_floating_ip(
2611 server, floating_network
2612 )
2613
2614 if not free_floating_ip:
2615 self._create_floating_ip(
2616 floating_network, server, created_items
2617 )
2618
2619 try:
2620 # For race condition ensure not already assigned
2621 fip = self.neutron.show_floatingip(free_floating_ip)
2622
2623 if fip["floatingip"].get("port_id"):
2624 continue
2625
2626 # Assign floating ip
2627 fip = self._assign_floating_ip(
2628 free_floating_ip, floating_network
2629 )
2630
2631 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2632 self.logger.warning(
2633 "floating_ip {} re-assigned to other port".format(
2634 free_floating_ip
2635 )
2636 )
2637 continue
2638
2639 self.logger.debug(
2640 "Assigned floating_ip {} to VM {}".format(
2641 free_floating_ip, server.id
2642 )
2643 )
2644
2645 assigned = True
2646
2647 except Exception as e:
2648 # Openstack need some time after VM creation to assign an IP. So retry if fails
2649 vm_status = self.nova.servers.get(server.id).status
2650
2651 if vm_status not in ("ACTIVE", "ERROR"):
2652 if time.time() - vm_start_time < server_timeout:
2653 time.sleep(5)
2654 continue
2655 elif floating_ip_retries > 0:
2656 floating_ip_retries -= 1
2657 continue
2658
2659 raise vimconn.VimConnException(
2660 "Cannot create floating_ip: {} {}".format(
2661 type(e).__name__, e
2662 ),
2663 http_code=vimconn.HTTP_Conflict,
2664 )
2665
2666 except Exception as e:
2667 if not floating_network["exit_on_floating_ip_error"]:
2668 self.logger.error("Cannot create floating_ip. %s", str(e))
2669 continue
2670
2671 raise
2672
2673 def _update_port_security_for_vminstance(
2674 self,
2675 no_secured_ports: list,
2676 server: object,
2677 ) -> None:
2678 """Updates the port security according to no_secured_ports list.
2679
2680 Args:
2681 no_secured_ports (list): List of ports that security will be disabled
2682 server (object): Server Object
2683
2684 Raises:
2685 VimConnException
2686
2687 """
2688 # Wait until the VM is active and then disable the port-security
2689 if no_secured_ports:
2690 self.__wait_for_vm(server.id, "ACTIVE")
2691
2692 for port in no_secured_ports:
2693 port_update = {
2694 "port": {"port_security_enabled": False, "security_groups": None}
2695 }
2696
2697 if port[1] == "allow-address-pairs":
2698 port_update = {
2699 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2700 }
2701
2702 try:
2703 self.neutron.update_port(port[0], port_update)
2704
2705 except Exception:
2706 raise vimconn.VimConnException(
2707 "It was not possible to disable port security for port {}".format(
2708 port[0]
2709 )
2710 )
2711
2712 def new_vminstance(
2713 self,
2714 name: str,
2715 description: str,
2716 start: bool,
2717 image_id: str,
2718 flavor_id: str,
2719 affinity_group_list: list,
2720 net_list: list,
2721 cloud_config=None,
2722 disk_list=None,
2723 availability_zone_index=None,
2724 availability_zone_list=None,
2725 ) -> tuple:
2726 """Adds a VM instance to VIM.
2727
2728 Args:
2729 name (str): name of VM
2730 description (str): description
2731 start (bool): indicates if VM must start or boot in pause mode. Ignored
2732 image_id (str) image uuid
2733 flavor_id (str) flavor uuid
2734 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2735 net_list (list): list of interfaces, each one is a dictionary with:
2736 name: name of network
2737 net_id: network uuid to connect
2738 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2739 model: interface model, ignored #TODO
2740 mac_address: used for SR-IOV ifaces #TODO for other types
2741 use: 'data', 'bridge', 'mgmt'
2742 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2743 vim_id: filled/added by this function
2744 floating_ip: True/False (or it can be None)
2745 port_security: True/False
2746 cloud_config (dict): (optional) dictionary with:
2747 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2748 users: (optional) list of users to be inserted, each item is a dict with:
2749 name: (mandatory) user name,
2750 key-pairs: (optional) list of strings with the public key to be inserted to the user
2751 user-data: (optional) string is a text script to be passed directly to cloud-init
2752 config-files: (optional). List of files to be transferred. Each item is a dict with:
2753 dest: (mandatory) string with the destination absolute path
2754 encoding: (optional, by default text). Can be one of:
2755 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2756 content : (mandatory) string with the content of the file
2757 permissions: (optional) string with file permissions, typically octal notation '0644'
2758 owner: (optional) file owner, string with the format 'owner:group'
2759 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2760 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2761 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2762 size: (mandatory) string with the size of the disk in GB
2763 vim_id: (optional) should use this existing volume id
2764 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2765 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2766 availability_zone_index is None
2767 #TODO ip, security groups
2768
2769 Returns:
2770 A tuple with the instance identifier and created_items or raises an exception on error
2771 created_items can be None or a dictionary where this method can include key-values that will be passed to
2772 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2773 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2774 as not present.
2775
2776 """
2777 self.logger.debug(
2778 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2779 image_id,
2780 flavor_id,
2781 str(net_list),
2782 )
2783 server = None
2784 created_items = {}
2785 net_list_vim = []
2786 # list of external networks to be connected to instance, later on used to create floating_ip
2787 external_network = []
2788 # List of ports with port-security disabled
2789 no_secured_ports = []
2790 block_device_mapping = {}
2791 existing_vim_volumes = []
2792 server_group_id = None
2793 scheduller_hints = {}
2794
2795 try:
2796 # Check the Openstack Connection
2797 self._reload_connection()
2798
2799 # Prepare network list
2800 self._prepare_network_for_vminstance(
2801 name=name,
2802 net_list=net_list,
2803 created_items=created_items,
2804 net_list_vim=net_list_vim,
2805 external_network=external_network,
2806 no_secured_ports=no_secured_ports,
2807 )
2808
2809 # Cloud config
2810 config_drive, userdata = self._create_user_data(cloud_config)
2811
2812 # Get availability Zone
2813 vm_av_zone = self._get_vm_availability_zone(
2814 availability_zone_index, availability_zone_list
2815 )
2816
2817 if disk_list:
2818 # Prepare disks
2819 self._prepare_disk_for_vminstance(
2820 name=name,
2821 existing_vim_volumes=existing_vim_volumes,
2822 created_items=created_items,
2823 vm_av_zone=vm_av_zone,
2824 block_device_mapping=block_device_mapping,
2825 disk_list=disk_list,
2826 )
2827
2828 if affinity_group_list:
2829 # Only first id on the list will be used. Openstack restriction
2830 server_group_id = affinity_group_list[0]["affinity_group_id"]
2831 scheduller_hints["group"] = server_group_id
2832
2833 self.logger.debug(
2834 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2835 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2836 "block_device_mapping={}, server_group={})".format(
2837 name,
2838 image_id,
2839 flavor_id,
2840 net_list_vim,
2841 self.config.get("security_groups"),
2842 vm_av_zone,
2843 self.config.get("keypair"),
2844 userdata,
2845 config_drive,
2846 block_device_mapping,
2847 server_group_id,
2848 )
2849 )
2850 # Create VM
2851 server = self.nova.servers.create(
2852 name=name,
2853 image=image_id,
2854 flavor=flavor_id,
2855 nics=net_list_vim,
2856 security_groups=self.config.get("security_groups"),
2857 # TODO remove security_groups in future versions. Already at neutron port
2858 availability_zone=vm_av_zone,
2859 key_name=self.config.get("keypair"),
2860 userdata=userdata,
2861 config_drive=config_drive,
2862 block_device_mapping=block_device_mapping,
2863 scheduler_hints=scheduller_hints,
2864 )
2865
2866 vm_start_time = time.time()
2867
2868 self._update_port_security_for_vminstance(no_secured_ports, server)
2869
2870 self._prepare_external_network_for_vminstance(
2871 external_network=external_network,
2872 server=server,
2873 created_items=created_items,
2874 vm_start_time=vm_start_time,
2875 )
2876
2877 return server.id, created_items
2878
2879 except Exception as e:
2880 server_id = None
2881 if server:
2882 server_id = server.id
2883
2884 try:
2885 created_items = self.remove_keep_tag_from_persistent_volumes(
2886 created_items
2887 )
2888
2889 self.delete_vminstance(server_id, created_items)
2890
2891 except Exception as e2:
2892 self.logger.error("new_vminstance rollback fail {}".format(e2))
2893
2894 self._format_exception(e)
2895
2896 @staticmethod
2897 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2898 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2899
2900 Args:
2901 created_items (dict): All created items belongs to VM
2902
2903 Returns:
2904 updated_created_items (dict): Dict which does not include keep flag for volumes.
2905
2906 """
2907 return {
2908 key.replace(":keep", ""): value for (key, value) in created_items.items()
2909 }
2910
2911 def get_vminstance(self, vm_id):
2912 """Returns the VM instance information from VIM"""
2913 return self._find_nova_server(vm_id)
2914
2915 @catch_any_exception
2916 def get_vminstance_console(self, vm_id, console_type="vnc"):
2917 """
2918 Get a console for the virtual machine
2919 Params:
2920 vm_id: uuid of the VM
2921 console_type, can be:
2922 "novnc" (by default), "xvpvnc" for VNC types,
2923 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2924 Returns dict with the console parameters:
2925 protocol: ssh, ftp, http, https, ...
2926 server: usually ip address
2927 port: the http, ssh, ... port
2928 suffix: extra text, e.g. the http path and query string
2929 """
2930 self.logger.debug("Getting VM CONSOLE from VIM")
2931 self._reload_connection()
2932 server = self.nova.servers.find(id=vm_id)
2933
2934 if console_type is None or console_type == "novnc":
2935 console_dict = server.get_vnc_console("novnc")
2936 elif console_type == "xvpvnc":
2937 console_dict = server.get_vnc_console(console_type)
2938 elif console_type == "rdp-html5":
2939 console_dict = server.get_rdp_console(console_type)
2940 elif console_type == "spice-html5":
2941 console_dict = server.get_spice_console(console_type)
2942 else:
2943 raise vimconn.VimConnException(
2944 "console type '{}' not allowed".format(console_type),
2945 http_code=vimconn.HTTP_Bad_Request,
2946 )
2947
2948 console_dict1 = console_dict.get("console")
2949
2950 if console_dict1:
2951 console_url = console_dict1.get("url")
2952
2953 if console_url:
2954 # parse console_url
2955 protocol_index = console_url.find("//")
2956 suffix_index = (
2957 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2958 )
2959 port_index = (
2960 console_url[protocol_index + 2 : suffix_index].find(":")
2961 + protocol_index
2962 + 2
2963 )
2964
2965 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2966 return (
2967 -vimconn.HTTP_Internal_Server_Error,
2968 "Unexpected response from VIM",
2969 )
2970
2971 console_dict = {
2972 "protocol": console_url[0:protocol_index],
2973 "server": console_url[protocol_index + 2 : port_index],
2974 "port": console_url[port_index:suffix_index],
2975 "suffix": console_url[suffix_index + 1 :],
2976 }
2977 protocol_index += 2
2978
2979 return console_dict
2980 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2981
2982 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2983 """Neutron delete ports by id.
2984 Args:
2985 k_id (str): Port id in the VIM
2986 """
2987 try:
2988 self.neutron.delete_port(k_id)
2989
2990 except (neExceptions.ConnectionFailed, ConnectionError) as e:
2991 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2992 # If there is connection error, raise.
2993 self._format_exception(e)
2994 except Exception as e:
2995 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2996
2997 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
2998 """Cinder delete volume by id.
2999 Args:
3000 shared_volume_vim_id (str): ID of shared volume in VIM
3001 """
3002 elapsed_time = 0
3003 try:
3004 while elapsed_time < server_timeout:
3005 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3006 if vol_status == "available":
3007 self.cinder.volumes.delete(shared_volume_vim_id)
3008 return True
3009
3010 time.sleep(5)
3011 elapsed_time += 5
3012
3013 if elapsed_time >= server_timeout:
3014 raise vimconn.VimConnException(
3015 "Timeout waiting for volume "
3016 + shared_volume_vim_id
3017 + " to be available",
3018 http_code=vimconn.HTTP_Request_Timeout,
3019 )
3020
3021 except Exception as e:
3022 self.logger.error(
3023 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3024 )
3025 self._format_exception(e)
3026
3027 def _delete_volumes_by_id_wth_cinder(
3028 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3029 ) -> bool:
3030 """Cinder delete volume by id.
3031 Args:
3032 k (str): Full item name in created_items
3033 k_id (str): ID of floating ip in VIM
3034 volumes_to_hold (list): Volumes not to delete
3035 created_items (dict): All created items belongs to VM
3036 """
3037 try:
3038 if k_id in volumes_to_hold:
3039 return False
3040
3041 if self.cinder.volumes.get(k_id).status != "available":
3042 return True
3043
3044 else:
3045 self.cinder.volumes.delete(k_id)
3046 created_items[k] = None
3047
3048 except (cExceptions.ConnectionError, ConnectionError) as e:
3049 self.logger.error(
3050 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3051 )
3052 self._format_exception(e)
3053 except Exception as e:
3054 self.logger.error(
3055 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3056 )
3057
3058 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3059 """Neutron delete floating ip by id.
3060 Args:
3061 k (str): Full item name in created_items
3062 k_id (str): ID of floating ip in VIM
3063 created_items (dict): All created items belongs to VM
3064 """
3065 try:
3066 self.neutron.delete_floatingip(k_id)
3067 created_items[k] = None
3068
3069 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3070 self.logger.error(
3071 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3072 )
3073 self._format_exception(e)
3074 except Exception as e:
3075 self.logger.error(
3076 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3077 )
3078
3079 @staticmethod
3080 def _get_item_name_id(k: str) -> Tuple[str, str]:
3081 k_item, _, k_id = k.partition(":")
3082 return k_item, k_id
3083
3084 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3085 """Delete VM ports attached to the networks before deleting virtual machine.
3086 Args:
3087 created_items (dict): All created items belongs to VM
3088 """
3089
3090 for k, v in created_items.items():
3091 if not v: # skip already deleted
3092 continue
3093
3094 try:
3095 k_item, k_id = self._get_item_name_id(k)
3096 if k_item == "port":
3097 self._delete_ports_by_id_wth_neutron(k_id)
3098
3099 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3100 self.logger.error(
3101 "Error deleting port: {}: {}".format(type(e).__name__, e)
3102 )
3103 self._format_exception(e)
3104 except Exception as e:
3105 self.logger.error(
3106 "Error deleting port: {}: {}".format(type(e).__name__, e)
3107 )
3108
3109 def _delete_created_items(
3110 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3111 ) -> bool:
3112 """Delete Volumes and floating ip if they exist in created_items."""
3113 for k, v in created_items.items():
3114 if not v: # skip already deleted
3115 continue
3116
3117 try:
3118 k_item, k_id = self._get_item_name_id(k)
3119 if k_item == "volume":
3120 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3121 k, k_id, volumes_to_hold, created_items
3122 )
3123
3124 if unavailable_vol:
3125 keep_waiting = True
3126
3127 elif k_item == "floating_ip":
3128 self._delete_floating_ip_by_id(k, k_id, created_items)
3129
3130 except (
3131 cExceptions.ConnectionError,
3132 neExceptions.ConnectionFailed,
3133 ConnectionError,
3134 AttributeError,
3135 TypeError,
3136 ) as e:
3137 self.logger.error("Error deleting {}: {}".format(k, e))
3138 self._format_exception(e)
3139
3140 except Exception as e:
3141 self.logger.error("Error deleting {}: {}".format(k, e))
3142
3143 return keep_waiting
3144
3145 @staticmethod
3146 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3147 """Remove the volumes which has key flag from created_items
3148
3149 Args:
3150 created_items (dict): All created items belongs to VM
3151
3152 Returns:
3153 created_items (dict): Persistent volumes eliminated created_items
3154 """
3155 return {
3156 key: value
3157 for (key, value) in created_items.items()
3158 if len(key.split(":")) == 2
3159 }
3160
3161 @catch_any_exception
3162 def delete_vminstance(
3163 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3164 ) -> None:
3165 """Removes a VM instance from VIM. Returns the old identifier.
3166 Args:
3167 vm_id (str): Identifier of VM instance
3168 created_items (dict): All created items belongs to VM
3169 volumes_to_hold (list): Volumes_to_hold
3170 """
3171 if created_items is None:
3172 created_items = {}
3173 if volumes_to_hold is None:
3174 volumes_to_hold = []
3175
3176 try:
3177 created_items = self._extract_items_wth_keep_flag_from_created_items(
3178 created_items
3179 )
3180
3181 self._reload_connection()
3182
3183 # Delete VM ports attached to the networks before the virtual machine
3184 if created_items:
3185 self._delete_vm_ports_attached_to_network(created_items)
3186
3187 if vm_id:
3188 self.nova.servers.delete(vm_id)
3189
3190 # Although having detached, volumes should have in active status before deleting.
3191 # We ensure in this loop
3192 keep_waiting = True
3193 elapsed_time = 0
3194
3195 while keep_waiting and elapsed_time < volume_timeout:
3196 keep_waiting = False
3197
3198 # Delete volumes and floating IP.
3199 keep_waiting = self._delete_created_items(
3200 created_items, volumes_to_hold, keep_waiting
3201 )
3202
3203 if keep_waiting:
3204 time.sleep(1)
3205 elapsed_time += 1
3206 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
3207 # If VM does not exist, it does not raise
3208 self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
3209
3210 def refresh_vms_status(self, vm_list):
3211 """Get the status of the virtual machines and their interfaces/ports
3212 Params: the list of VM identifiers
3213 Returns a dictionary with:
3214 vm_id: #VIM id of this Virtual Machine
3215 status: #Mandatory. Text with one of:
3216 # DELETED (not found at vim)
3217 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3218 # OTHER (Vim reported other status not understood)
3219 # ERROR (VIM indicates an ERROR status)
3220 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3221 # CREATING (on building process), ERROR
3222 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3223 #
3224 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3225 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3226 interfaces:
3227 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3228 mac_address: #Text format XX:XX:XX:XX:XX:XX
3229 vim_net_id: #network id where this interface is connected
3230 vim_interface_id: #interface/port VIM id
3231 ip_address: #null, or text with IPv4, IPv6 address
3232 compute_node: #identification of compute node where PF,VF interface is allocated
3233 pci: #PCI address of the NIC that hosts the PF,VF
3234 vlan: #physical VLAN used for VF
3235 """
3236 vm_dict = {}
3237 self.logger.debug(
3238 "refresh_vms status: Getting tenant VM instance information from VIM"
3239 )
3240 for vm_id in vm_list:
3241 vm = {}
3242
3243 try:
3244 vm_vim = self.get_vminstance(vm_id)
3245
3246 if vm_vim["status"] in vmStatus2manoFormat:
3247 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3248 else:
3249 vm["status"] = "OTHER"
3250 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3251
3252 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3253 vm_vim.pop("user_data", None)
3254 vm["vim_info"] = self.serialize(vm_vim)
3255
3256 vm["interfaces"] = []
3257 if vm_vim.get("fault"):
3258 vm["error_msg"] = str(vm_vim["fault"])
3259
3260 # get interfaces
3261 try:
3262 self._reload_connection()
3263 port_dict = self.neutron.list_ports(device_id=vm_id)
3264
3265 for port in port_dict["ports"]:
3266 interface = {}
3267 interface["vim_info"] = self.serialize(port)
3268 interface["mac_address"] = port.get("mac_address")
3269 interface["vim_net_id"] = port["network_id"]
3270 interface["vim_interface_id"] = port["id"]
3271 # check if OS-EXT-SRV-ATTR:host is there,
3272 # in case of non-admin credentials, it will be missing
3273
3274 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3275 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3276
3277 interface["pci"] = None
3278
3279 # check if binding:profile is there,
3280 # in case of non-admin credentials, it will be missing
3281 if port.get("binding:profile"):
3282 if port["binding:profile"].get("pci_slot"):
3283 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3284 # the slot to 0x00
3285 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3286 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3287 pci = port["binding:profile"]["pci_slot"]
3288 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3289 interface["pci"] = pci
3290
3291 interface["vlan"] = None
3292
3293 if port.get("binding:vif_details"):
3294 interface["vlan"] = port["binding:vif_details"].get("vlan")
3295
3296 # Get vlan from network in case not present in port for those old openstacks and cases where
3297 # it is needed vlan at PT
3298 if not interface["vlan"]:
3299 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3300 network = self.neutron.show_network(port["network_id"])
3301
3302 if (
3303 network["network"].get("provider:network_type")
3304 == "vlan"
3305 ):
3306 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3307 interface["vlan"] = network["network"].get(
3308 "provider:segmentation_id"
3309 )
3310
3311 ips = []
3312 # look for floating ip address
3313 try:
3314 floating_ip_dict = self.neutron.list_floatingips(
3315 port_id=port["id"]
3316 )
3317
3318 if floating_ip_dict.get("floatingips"):
3319 ips.append(
3320 floating_ip_dict["floatingips"][0].get(
3321 "floating_ip_address"
3322 )
3323 )
3324 except Exception:
3325 pass
3326
3327 for subnet in port["fixed_ips"]:
3328 ips.append(subnet["ip_address"])
3329
3330 interface["ip_address"] = ";".join(ips)
3331 vm["interfaces"].append(interface)
3332 except Exception as e:
3333 self.logger.error(
3334 "Error getting vm interface information {}: {}".format(
3335 type(e).__name__, e
3336 ),
3337 exc_info=True,
3338 )
3339 except vimconn.VimConnNotFoundException as e:
3340 self.logger.error("Exception getting vm status: %s", str(e))
3341 vm["status"] = "DELETED"
3342 vm["error_msg"] = str(e)
3343 except vimconn.VimConnException as e:
3344 self.logger.error("Exception getting vm status: %s", str(e))
3345 vm["status"] = "VIM_ERROR"
3346 vm["error_msg"] = str(e)
3347
3348 vm_dict[vm_id] = vm
3349
3350 return vm_dict
3351
3352 @catch_any_exception
3353 def action_vminstance(self, vm_id, action_dict, created_items={}):
3354 """Send and action over a VM instance from VIM
3355 Returns None or the console dict if the action was successfully sent to the VIM
3356 """
3357 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3358 self._reload_connection()
3359 server = self.nova.servers.find(id=vm_id)
3360 if "start" in action_dict:
3361 if action_dict["start"] == "rebuild":
3362 server.rebuild()
3363 else:
3364 if server.status == "PAUSED":
3365 server.unpause()
3366 elif server.status == "SUSPENDED":
3367 server.resume()
3368 elif server.status == "SHUTOFF":
3369 server.start()
3370 else:
3371 self.logger.debug(
3372 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3373 )
3374 raise vimconn.VimConnException(
3375 "Cannot 'start' instance while it is in active state",
3376 http_code=vimconn.HTTP_Bad_Request,
3377 )
3378 elif "pause" in action_dict:
3379 server.pause()
3380 elif "resume" in action_dict:
3381 server.resume()
3382 elif "shutoff" in action_dict or "shutdown" in action_dict:
3383 self.logger.debug("server status %s", server.status)
3384 if server.status == "ACTIVE":
3385 server.stop()
3386 else:
3387 self.logger.debug("ERROR: VM is not in Active state")
3388 raise vimconn.VimConnException(
3389 "VM is not in active state, stop operation is not allowed",
3390 http_code=vimconn.HTTP_Bad_Request,
3391 )
3392 elif "forceOff" in action_dict:
3393 server.stop() # TODO
3394 elif "terminate" in action_dict:
3395 server.delete()
3396 elif "createImage" in action_dict:
3397 server.create_image()
3398 # "path":path_schema,
3399 # "description":description_schema,
3400 # "name":name_schema,
3401 # "metadata":metadata_schema,
3402 # "imageRef": id_schema,
3403 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3404 elif "rebuild" in action_dict:
3405 server.rebuild(server.image["id"])
3406 elif "reboot" in action_dict:
3407 server.reboot() # reboot_type="SOFT"
3408 elif "console" in action_dict:
3409 console_type = action_dict["console"]
3410
3411 if console_type is None or console_type == "novnc":
3412 console_dict = server.get_vnc_console("novnc")
3413 elif console_type == "xvpvnc":
3414 console_dict = server.get_vnc_console(console_type)
3415 elif console_type == "rdp-html5":
3416 console_dict = server.get_rdp_console(console_type)
3417 elif console_type == "spice-html5":
3418 console_dict = server.get_spice_console(console_type)
3419 else:
3420 raise vimconn.VimConnException(
3421 "console type '{}' not allowed".format(console_type),
3422 http_code=vimconn.HTTP_Bad_Request,
3423 )
3424
3425 try:
3426 console_url = console_dict["console"]["url"]
3427 # parse console_url
3428 protocol_index = console_url.find("//")
3429 suffix_index = (
3430 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3431 )
3432 port_index = (
3433 console_url[protocol_index + 2 : suffix_index].find(":")
3434 + protocol_index
3435 + 2
3436 )
3437
3438 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3439 raise vimconn.VimConnException(
3440 "Unexpected response from VIM " + str(console_dict)
3441 )
3442
3443 console_dict2 = {
3444 "protocol": console_url[0:protocol_index],
3445 "server": console_url[protocol_index + 2 : port_index],
3446 "port": int(console_url[port_index + 1 : suffix_index]),
3447 "suffix": console_url[suffix_index + 1 :],
3448 }
3449
3450 return console_dict2
3451 except Exception:
3452 raise vimconn.VimConnException(
3453 "Unexpected response from VIM " + str(console_dict)
3454 )
3455
3456 return None
3457
3458 # ###### VIO Specific Changes #########
3459 def _generate_vlanID(self):
3460 """
3461 Method to get unused vlanID
3462 Args:
3463 None
3464 Returns:
3465 vlanID
3466 """
3467 # Get used VLAN IDs
3468 usedVlanIDs = []
3469 networks = self.get_network_list()
3470
3471 for net in networks:
3472 if net.get("provider:segmentation_id"):
3473 usedVlanIDs.append(net.get("provider:segmentation_id"))
3474
3475 used_vlanIDs = set(usedVlanIDs)
3476
3477 # find unused VLAN ID
3478 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3479 try:
3480 start_vlanid, end_vlanid = map(
3481 int, vlanID_range.replace(" ", "").split("-")
3482 )
3483
3484 for vlanID in range(start_vlanid, end_vlanid + 1):
3485 if vlanID not in used_vlanIDs:
3486 return vlanID
3487 except Exception as exp:
3488 raise vimconn.VimConnException(
3489 "Exception {} occurred while generating VLAN ID.".format(exp)
3490 )
3491 else:
3492 raise vimconn.VimConnConflictException(
3493 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3494 self.config.get("dataplane_net_vlan_range")
3495 )
3496 )
3497
3498 def _generate_multisegment_vlanID(self):
3499 """
3500 Method to get unused vlanID
3501 Args:
3502 None
3503 Returns:
3504 vlanID
3505 """
3506 # Get used VLAN IDs
3507 usedVlanIDs = []
3508 networks = self.get_network_list()
3509 for net in networks:
3510 if net.get("provider:network_type") == "vlan" and net.get(
3511 "provider:segmentation_id"
3512 ):
3513 usedVlanIDs.append(net.get("provider:segmentation_id"))
3514 elif net.get("segments"):
3515 for segment in net.get("segments"):
3516 if segment.get("provider:network_type") == "vlan" and segment.get(
3517 "provider:segmentation_id"
3518 ):
3519 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3520
3521 used_vlanIDs = set(usedVlanIDs)
3522
3523 # find unused VLAN ID
3524 for vlanID_range in self.config.get("multisegment_vlan_range"):
3525 try:
3526 start_vlanid, end_vlanid = map(
3527 int, vlanID_range.replace(" ", "").split("-")
3528 )
3529
3530 for vlanID in range(start_vlanid, end_vlanid + 1):
3531 if vlanID not in used_vlanIDs:
3532 return vlanID
3533 except Exception as exp:
3534 raise vimconn.VimConnException(
3535 "Exception {} occurred while generating VLAN ID.".format(exp)
3536 )
3537 else:
3538 raise vimconn.VimConnConflictException(
3539 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3540 self.config.get("multisegment_vlan_range")
3541 )
3542 )
3543
3544 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3545 """
3546 Method to validate user given vlanID ranges
3547 Args: None
3548 Returns: None
3549 """
3550 for vlanID_range in input_vlan_range:
3551 vlan_range = vlanID_range.replace(" ", "")
3552 # validate format
3553 vlanID_pattern = r"(\d)*-(\d)*$"
3554 match_obj = re.match(vlanID_pattern, vlan_range)
3555 if not match_obj:
3556 raise vimconn.VimConnConflictException(
3557 "Invalid VLAN range for {}: {}.You must provide "
3558 "'{}' in format [start_ID - end_ID].".format(
3559 text_vlan_range, vlanID_range, text_vlan_range
3560 )
3561 )
3562
3563 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3564 if start_vlanid <= 0:
3565 raise vimconn.VimConnConflictException(
3566 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3567 "networks valid IDs are 1 to 4094 ".format(
3568 text_vlan_range, vlanID_range
3569 )
3570 )
3571
3572 if end_vlanid > 4094:
3573 raise vimconn.VimConnConflictException(
3574 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3575 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3576 text_vlan_range, vlanID_range
3577 )
3578 )
3579
3580 if start_vlanid > end_vlanid:
3581 raise vimconn.VimConnConflictException(
3582 "Invalid VLAN range for {}: {}. You must provide '{}'"
3583 " in format start_ID - end_ID and start_ID < end_ID ".format(
3584 text_vlan_range, vlanID_range, text_vlan_range
3585 )
3586 )
3587
3588 def get_hosts_info(self):
3589 """Get the information of deployed hosts
3590 Returns the hosts content"""
3591 if self.debug:
3592 print("osconnector: Getting Host info from VIM")
3593
3594 try:
3595 h_list = []
3596 self._reload_connection()
3597 hypervisors = self.nova.hypervisors.list()
3598
3599 for hype in hypervisors:
3600 h_list.append(hype.to_dict())
3601
3602 return 1, {"hosts": h_list}
3603 except nvExceptions.NotFound as e:
3604 error_value = -vimconn.HTTP_Not_Found
3605 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3606 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3607 error_value = -vimconn.HTTP_Bad_Request
3608 error_text = (
3609 type(e).__name__
3610 + ": "
3611 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3612 )
3613
3614 # TODO insert exception vimconn.HTTP_Unauthorized
3615 # if reaching here is because an exception
3616 self.logger.debug("get_hosts_info " + error_text)
3617
3618 return error_value, error_text
3619
3620 def get_hosts(self, vim_tenant):
3621 """Get the hosts and deployed instances
3622 Returns the hosts content"""
3623 r, hype_dict = self.get_hosts_info()
3624
3625 if r < 0:
3626 return r, hype_dict
3627
3628 hypervisors = hype_dict["hosts"]
3629
3630 try:
3631 servers = self.nova.servers.list()
3632 for hype in hypervisors:
3633 for server in servers:
3634 if (
3635 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3636 == hype["hypervisor_hostname"]
3637 ):
3638 if "vm" in hype:
3639 hype["vm"].append(server.id)
3640 else:
3641 hype["vm"] = [server.id]
3642
3643 return 1, hype_dict
3644 except nvExceptions.NotFound as e:
3645 error_value = -vimconn.HTTP_Not_Found
3646 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3647 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3648 error_value = -vimconn.HTTP_Bad_Request
3649 error_text = (
3650 type(e).__name__
3651 + ": "
3652 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3653 )
3654
3655 # TODO insert exception vimconn.HTTP_Unauthorized
3656 # if reaching here is because an exception
3657 self.logger.debug("get_hosts " + error_text)
3658
3659 return error_value, error_text
3660
3661 @catch_any_exception
3662 def new_affinity_group(self, affinity_group_data):
3663 """Adds a server group to VIM
3664 affinity_group_data contains a dictionary with information, keys:
3665 name: name in VIM for the server group
3666 type: affinity or anti-affinity
3667 scope: Only nfvi-node allowed
3668 Returns the server group identifier"""
3669 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3670 name = affinity_group_data["name"]
3671 policy = affinity_group_data["type"]
3672 self._reload_connection()
3673 new_server_group = self.nova.server_groups.create(name, policy)
3674 return new_server_group.id
3675
3676 @catch_any_exception
3677 def get_affinity_group(self, affinity_group_id):
3678 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3679 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3680 self._reload_connection()
3681 server_group = self.nova.server_groups.find(id=affinity_group_id)
3682 return server_group.to_dict()
3683
3684 @catch_any_exception
3685 def delete_affinity_group(self, affinity_group_id):
3686 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3687 self.logger.debug("Getting server group '%s'", affinity_group_id)
3688 self._reload_connection()
3689 self.nova.server_groups.delete(affinity_group_id)
3690 return affinity_group_id
3691
3692 @catch_any_exception
3693 def get_vdu_state(self, vm_id, host_is_required=False) -> list:
3694 """Getting the state of a VDU.
3695 Args:
3696 vm_id (str): ID of an instance
3697 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
3698 and if this is set to True, it raises KeyError.
3699 Returns:
3700 vdu_data (list): VDU details including state, flavor, host_info, AZ
3701 """
3702 self.logger.debug("Getting the status of VM")
3703 self.logger.debug("VIM VM ID %s", vm_id)
3704 self._reload_connection()
3705 server_dict = self._find_nova_server(vm_id)
3706 srv_attr = "OS-EXT-SRV-ATTR:host"
3707 host_info = (
3708 server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
3709 )
3710 vdu_data = [
3711 server_dict["status"],
3712 server_dict["flavor"]["id"],
3713 host_info,
3714 server_dict["OS-EXT-AZ:availability_zone"],
3715 ]
3716 self.logger.debug("vdu_data %s", vdu_data)
3717 return vdu_data
3718
3719 def check_compute_availability(self, host, server_flavor_details):
3720 self._reload_connection()
3721 hypervisor_search = self.nova.hypervisors.search(
3722 hypervisor_match=host, servers=True
3723 )
3724 for hypervisor in hypervisor_search:
3725 hypervisor_id = hypervisor.to_dict()["id"]
3726 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3727 hypervisor_dict = hypervisor_details.to_dict()
3728 hypervisor_temp = json.dumps(hypervisor_dict)
3729 hypervisor_json = json.loads(hypervisor_temp)
3730 resources_available = [
3731 hypervisor_json["free_ram_mb"],
3732 hypervisor_json["disk_available_least"],
3733 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3734 ]
3735 compute_available = all(
3736 x > y for x, y in zip(resources_available, server_flavor_details)
3737 )
3738 if compute_available:
3739 return host
3740
3741 def check_availability_zone(
3742 self, old_az, server_flavor_details, old_host, host=None
3743 ):
3744 self._reload_connection()
3745 az_check = {"zone_check": False, "compute_availability": None}
3746 aggregates_list = self.nova.aggregates.list()
3747 for aggregate in aggregates_list:
3748 aggregate_details = aggregate.to_dict()
3749 aggregate_temp = json.dumps(aggregate_details)
3750 aggregate_json = json.loads(aggregate_temp)
3751 if aggregate_json["availability_zone"] == old_az:
3752 hosts_list = aggregate_json["hosts"]
3753 if host is not None:
3754 if host in hosts_list:
3755 az_check["zone_check"] = True
3756 available_compute_id = self.check_compute_availability(
3757 host, server_flavor_details
3758 )
3759 if available_compute_id is not None:
3760 az_check["compute_availability"] = available_compute_id
3761 else:
3762 for check_host in hosts_list:
3763 if check_host != old_host:
3764 available_compute_id = self.check_compute_availability(
3765 check_host, server_flavor_details
3766 )
3767 if available_compute_id is not None:
3768 az_check["zone_check"] = True
3769 az_check["compute_availability"] = available_compute_id
3770 break
3771 else:
3772 az_check["zone_check"] = True
3773 return az_check
3774
3775 @catch_any_exception
3776 def migrate_instance(self, vm_id, compute_host=None):
3777 """
3778 Migrate a vdu
3779 param:
3780 vm_id: ID of an instance
3781 compute_host: Host to migrate the vdu to
3782 """
3783 self._reload_connection()
3784 vm_state = False
3785 instance_state = self.get_vdu_state(vm_id, host_is_required=True)
3786 server_flavor_id = instance_state[1]
3787 server_hypervisor_name = instance_state[2]
3788 server_availability_zone = instance_state[3]
3789 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3790 server_flavor_details = [
3791 server_flavor["ram"],
3792 server_flavor["disk"],
3793 server_flavor["vcpus"],
3794 ]
3795 if compute_host == server_hypervisor_name:
3796 raise vimconn.VimConnException(
3797 "Unable to migrate instance '{}' to the same host '{}'".format(
3798 vm_id, compute_host
3799 ),
3800 http_code=vimconn.HTTP_Bad_Request,
3801 )
3802 az_status = self.check_availability_zone(
3803 server_availability_zone,
3804 server_flavor_details,
3805 server_hypervisor_name,
3806 compute_host,
3807 )
3808 availability_zone_check = az_status["zone_check"]
3809 available_compute_id = az_status.get("compute_availability")
3810
3811 if availability_zone_check is False:
3812 raise vimconn.VimConnException(
3813 "Unable to migrate instance '{}' to a different availability zone".format(
3814 vm_id
3815 ),
3816 http_code=vimconn.HTTP_Bad_Request,
3817 )
3818 if available_compute_id is not None:
3819 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
3820 self.nova.servers.live_migrate(
3821 server=vm_id,
3822 host=available_compute_id,
3823 block_migration=True,
3824 )
3825 state = "MIGRATING"
3826 changed_compute_host = ""
3827 if state == "MIGRATING":
3828 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3829 changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
3830 2
3831 ]
3832 if vm_state and changed_compute_host == available_compute_id:
3833 self.logger.debug(
3834 "Instance '{}' migrated to the new compute host '{}'".format(
3835 vm_id, changed_compute_host
3836 )
3837 )
3838 return state, available_compute_id
3839 else:
3840 raise vimconn.VimConnException(
3841 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3842 vm_id, available_compute_id
3843 ),
3844 http_code=vimconn.HTTP_Bad_Request,
3845 )
3846 else:
3847 raise vimconn.VimConnException(
3848 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3849 available_compute_id
3850 ),
3851 http_code=vimconn.HTTP_Bad_Request,
3852 )
3853
3854 @catch_any_exception
3855 def resize_instance(self, vm_id, new_flavor_id):
3856 """
3857 For resizing the vm based on the given
3858 flavor details
3859 param:
3860 vm_id : ID of an instance
3861 new_flavor_id : Flavor id to be resized
3862 Return the status of a resized instance
3863 """
3864 self._reload_connection()
3865 self.logger.debug("resize the flavor of an instance")
3866 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3867 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3868 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3869 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3870 if old_flavor_disk > new_flavor_disk:
3871 raise nvExceptions.BadRequest(
3872 400,
3873 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3874 )
3875 else:
3876 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3877 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3878 if vm_state:
3879 instance_resized_status = self.confirm_resize(vm_id)
3880 return instance_resized_status
3881 else:
3882 raise nvExceptions.BadRequest(
3883 409,
3884 message="Cannot 'resize' vm_state is in ERROR",
3885 )
3886
3887 else:
3888 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3889 raise nvExceptions.BadRequest(
3890 409,
3891 message="Cannot 'resize' instance while it is in vm_state resized",
3892 )
3893
3894 def confirm_resize(self, vm_id):
3895 """
3896 Confirm the resize of an instance
3897 param:
3898 vm_id: ID of an instance
3899 """
3900 self._reload_connection()
3901 self.nova.servers.confirm_resize(server=vm_id)
3902 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3903 self.__wait_for_vm(vm_id, "ACTIVE")
3904 instance_status = self.get_vdu_state(vm_id)[0]
3905 return instance_status
3906
3907 def get_monitoring_data(self):
3908 try:
3909 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3910 self._reload_connection()
3911 all_servers = self.nova.servers.list(detailed=True)
3912 try:
3913 for server in all_servers:
3914 if server.flavor.get("original_name"):
3915 server.flavor["id"] = self.nova.flavors.find(
3916 name=server.flavor["original_name"]
3917 ).id
3918 except nClient.exceptions.NotFound as e:
3919 self.logger.warning(str(e.message))
3920 all_ports = self.neutron.list_ports()
3921 return all_servers, all_ports
3922 except Exception as e:
3923 raise vimconn.VimConnException(
3924 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3925 )