Feature 11003: AZ for Cinder
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 import cinderclient.exceptions as cExceptions
45 from glanceclient import client as glClient
46 import glanceclient.exc as gl1Exceptions
47 from keystoneauth1 import session
48 from keystoneauth1.identity import v2, v3
49 import keystoneclient.exceptions as ksExceptions
50 import keystoneclient.v2_0.client as ksClient_v2
51 import keystoneclient.v3.client as ksClient_v3
52 import netaddr
53 from neutronclient.common import exceptions as neExceptions
54 from neutronclient.neutron import client as neClient
55 from novaclient import client as nClient, exceptions as nvExceptions
56 from osm_ro_plugin import vimconn
57 from requests.exceptions import ConnectionError
58 import yaml
59
60 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
61 __date__ = "$22-sep-2017 23:59:59$"
62
63 """contain the openstack virtual machine status to openmano status"""
64 vmStatus2manoFormat = {
65 "ACTIVE": "ACTIVE",
66 "PAUSED": "PAUSED",
67 "SUSPENDED": "SUSPENDED",
68 "SHUTOFF": "INACTIVE",
69 "BUILD": "BUILD",
70 "ERROR": "ERROR",
71 "DELETED": "DELETED",
72 }
73 netStatus2manoFormat = {
74 "ACTIVE": "ACTIVE",
75 "PAUSED": "PAUSED",
76 "INACTIVE": "INACTIVE",
77 "BUILD": "BUILD",
78 "ERROR": "ERROR",
79 "DELETED": "DELETED",
80 }
81
82 supportedClassificationTypes = ["legacy_flow_classifier"]
83
84 # global var to have a timeout creating and deleting volumes
85 volume_timeout = 1800
86 server_timeout = 1800
87
88
89 def catch_any_exception(func):
90 def format_exception(*args, **kwargs):
91 try:
92 return func(*args, *kwargs)
93 except Exception as e:
94 vimconnector._format_exception(e)
95
96 return format_exception
97
98
99 class SafeDumper(yaml.SafeDumper):
100 def represent_data(self, data):
101 # Openstack APIs use custom subclasses of dict and YAML safe dumper
102 # is designed to not handle that (reference issue 142 of pyyaml)
103 if isinstance(data, dict) and data.__class__ != dict:
104 # A simple solution is to convert those items back to dicts
105 data = dict(data.items())
106
107 return super(SafeDumper, self).represent_data(data)
108
109
110 class vimconnector(vimconn.VimConnector):
111 def __init__(
112 self,
113 uuid,
114 name,
115 tenant_id,
116 tenant_name,
117 url,
118 url_admin=None,
119 user=None,
120 passwd=None,
121 log_level=None,
122 config={},
123 persistent_info={},
124 ):
125 """using common constructor parameters. In this case
126 'url' is the keystone authorization url,
127 'url_admin' is not use
128 """
129 api_version = config.get("APIversion")
130
131 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
132 raise vimconn.VimConnException(
133 "Invalid value '{}' for config:APIversion. "
134 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
135 )
136
137 vim_type = config.get("vim_type")
138
139 if vim_type and vim_type not in ("vio", "VIO"):
140 raise vimconn.VimConnException(
141 "Invalid value '{}' for config:vim_type."
142 "Allowed values are 'vio' or 'VIO'".format(vim_type)
143 )
144
145 if config.get("dataplane_net_vlan_range") is not None:
146 # validate vlan ranges provided by user
147 self._validate_vlan_ranges(
148 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
149 )
150
151 if config.get("multisegment_vlan_range") is not None:
152 # validate vlan ranges provided by user
153 self._validate_vlan_ranges(
154 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
155 )
156
157 vimconn.VimConnector.__init__(
158 self,
159 uuid,
160 name,
161 tenant_id,
162 tenant_name,
163 url,
164 url_admin,
165 user,
166 passwd,
167 log_level,
168 config,
169 )
170
171 if self.config.get("insecure") and self.config.get("ca_cert"):
172 raise vimconn.VimConnException(
173 "options insecure and ca_cert are mutually exclusive"
174 )
175
176 self.verify = True
177
178 if self.config.get("insecure"):
179 self.verify = False
180
181 if self.config.get("ca_cert"):
182 self.verify = self.config.get("ca_cert")
183
184 if not url:
185 raise TypeError("url param can not be NoneType")
186
187 self.persistent_info = persistent_info
188 self.availability_zone = persistent_info.get("availability_zone", None)
189 self.storage_availability_zone = None
190 self.session = persistent_info.get("session", {"reload_client": True})
191 self.my_tenant_id = self.session.get("my_tenant_id")
192 self.nova = self.session.get("nova")
193 self.neutron = self.session.get("neutron")
194 self.cinder = self.session.get("cinder")
195 self.glance = self.session.get("glance")
196 # self.glancev1 = self.session.get("glancev1")
197 self.keystone = self.session.get("keystone")
198 self.api_version3 = self.session.get("api_version3")
199 self.vim_type = self.config.get("vim_type")
200
201 if self.vim_type:
202 self.vim_type = self.vim_type.upper()
203
204 if self.config.get("use_internal_endpoint"):
205 self.endpoint_type = "internalURL"
206 else:
207 self.endpoint_type = None
208
209 logging.getLogger("urllib3").setLevel(logging.WARNING)
210 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
211 logging.getLogger("novaclient").setLevel(logging.WARNING)
212 self.logger = logging.getLogger("ro.vim.openstack")
213
214 # allow security_groups to be a list or a single string
215 if isinstance(self.config.get("security_groups"), str):
216 self.config["security_groups"] = [self.config["security_groups"]]
217
218 self.security_groups_id = None
219
220 # ###### VIO Specific Changes #########
221 if self.vim_type == "VIO":
222 self.logger = logging.getLogger("ro.vim.vio")
223
224 if log_level:
225 self.logger.setLevel(getattr(logging, log_level))
226
227 def __getitem__(self, index):
228 """Get individuals parameters.
229 Throw KeyError"""
230 if index == "project_domain_id":
231 return self.config.get("project_domain_id")
232 elif index == "user_domain_id":
233 return self.config.get("user_domain_id")
234 else:
235 return vimconn.VimConnector.__getitem__(self, index)
236
237 def __setitem__(self, index, value):
238 """Set individuals parameters and it is marked as dirty so to force connection reload.
239 Throw KeyError"""
240 if index == "project_domain_id":
241 self.config["project_domain_id"] = value
242 elif index == "user_domain_id":
243 self.config["user_domain_id"] = value
244 else:
245 vimconn.VimConnector.__setitem__(self, index, value)
246
247 self.session["reload_client"] = True
248
249 def serialize(self, value):
250 """Serialization of python basic types.
251
252 In the case value is not serializable a message will be logged and a
253 simple representation of the data that cannot be converted back to
254 python is returned.
255 """
256 if isinstance(value, str):
257 return value
258
259 try:
260 return yaml.dump(
261 value, Dumper=SafeDumper, default_flow_style=True, width=256
262 )
263 except yaml.representer.RepresenterError:
264 self.logger.debug(
265 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
266 pformat(value),
267 exc_info=True,
268 )
269
270 return str(value)
271
272 def _reload_connection(self):
273 """Called before any operation, it check if credentials has changed
274 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
275 """
276 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
277 if self.session["reload_client"]:
278 if self.config.get("APIversion"):
279 self.api_version3 = (
280 self.config["APIversion"] == "v3.3"
281 or self.config["APIversion"] == "3"
282 )
283 else: # get from ending auth_url that end with v3 or with v2.0
284 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
285 "/v3/"
286 )
287
288 self.session["api_version3"] = self.api_version3
289
290 if self.api_version3:
291 if self.config.get("project_domain_id") or self.config.get(
292 "project_domain_name"
293 ):
294 project_domain_id_default = None
295 else:
296 project_domain_id_default = "default"
297
298 if self.config.get("user_domain_id") or self.config.get(
299 "user_domain_name"
300 ):
301 user_domain_id_default = None
302 else:
303 user_domain_id_default = "default"
304 auth = v3.Password(
305 auth_url=self.url,
306 username=self.user,
307 password=self.passwd,
308 project_name=self.tenant_name,
309 project_id=self.tenant_id,
310 project_domain_id=self.config.get(
311 "project_domain_id", project_domain_id_default
312 ),
313 user_domain_id=self.config.get(
314 "user_domain_id", user_domain_id_default
315 ),
316 project_domain_name=self.config.get("project_domain_name"),
317 user_domain_name=self.config.get("user_domain_name"),
318 )
319 else:
320 auth = v2.Password(
321 auth_url=self.url,
322 username=self.user,
323 password=self.passwd,
324 tenant_name=self.tenant_name,
325 tenant_id=self.tenant_id,
326 )
327
328 sess = session.Session(auth=auth, verify=self.verify)
329 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
330 # Titanium cloud and StarlingX
331 region_name = self.config.get("region_name")
332
333 if self.api_version3:
334 self.keystone = ksClient_v3.Client(
335 session=sess,
336 endpoint_type=self.endpoint_type,
337 region_name=region_name,
338 )
339 else:
340 self.keystone = ksClient_v2.Client(
341 session=sess, endpoint_type=self.endpoint_type
342 )
343
344 self.session["keystone"] = self.keystone
345 # In order to enable microversion functionality an explicit microversion must be specified in "config".
346 # This implementation approach is due to the warning message in
347 # https://developer.openstack.org/api-guide/compute/microversions.html
348 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
349 # always require an specific microversion.
350 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
351 version = self.config.get("microversion")
352
353 if not version:
354 version = "2.60"
355
356 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
357 # Titanium cloud and StarlingX
358 self.nova = self.session["nova"] = nClient.Client(
359 str(version),
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364 self.neutron = self.session["neutron"] = neClient.Client(
365 "2.0",
366 session=sess,
367 endpoint_type=self.endpoint_type,
368 region_name=region_name,
369 )
370
371 if sess.get_all_version_data(service_type="volumev2"):
372 self.cinder = self.session["cinder"] = cClient.Client(
373 2,
374 session=sess,
375 endpoint_type=self.endpoint_type,
376 region_name=region_name,
377 )
378 else:
379 self.cinder = self.session["cinder"] = cClient.Client(
380 3,
381 session=sess,
382 endpoint_type=self.endpoint_type,
383 region_name=region_name,
384 )
385
386 try:
387 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
388 except Exception:
389 self.logger.error("Cannot get project_id from session", exc_info=True)
390
391 if self.endpoint_type == "internalURL":
392 glance_service_id = self.keystone.services.list(name="glance")[0].id
393 glance_endpoint = self.keystone.endpoints.list(
394 glance_service_id, interface="internal"
395 )[0].url
396 else:
397 glance_endpoint = None
398
399 self.glance = self.session["glance"] = glClient.Client(
400 2, session=sess, endpoint=glance_endpoint
401 )
402 # using version 1 of glance client in new_image()
403 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
404 # endpoint=glance_endpoint)
405 self.session["reload_client"] = False
406 self.persistent_info["session"] = self.session
407 # add availablity zone info inside self.persistent_info
408 self._set_availablity_zones()
409 self.persistent_info["availability_zone"] = self.availability_zone
410 # force to get again security_groups_ids next time they are needed
411 self.security_groups_id = None
412
413 def __net_os2mano(self, net_list_dict):
414 """Transform the net openstack format to mano format
415 net_list_dict can be a list of dict or a single dict"""
416 if type(net_list_dict) is dict:
417 net_list_ = (net_list_dict,)
418 elif type(net_list_dict) is list:
419 net_list_ = net_list_dict
420 else:
421 raise TypeError("param net_list_dict must be a list or a dictionary")
422 for net in net_list_:
423 if net.get("provider:network_type") == "vlan":
424 net["type"] = "data"
425 else:
426 net["type"] = "bridge"
427
428 def __classification_os2mano(self, class_list_dict):
429 """Transform the openstack format (Flow Classifier) to mano format
430 (Classification) class_list_dict can be a list of dict or a single dict
431 """
432 if isinstance(class_list_dict, dict):
433 class_list_ = [class_list_dict]
434 elif isinstance(class_list_dict, list):
435 class_list_ = class_list_dict
436 else:
437 raise TypeError("param class_list_dict must be a list or a dictionary")
438 for classification in class_list_:
439 id = classification.pop("id")
440 name = classification.pop("name")
441 description = classification.pop("description")
442 project_id = classification.pop("project_id")
443 tenant_id = classification.pop("tenant_id")
444 original_classification = copy.deepcopy(classification)
445 classification.clear()
446 classification["ctype"] = "legacy_flow_classifier"
447 classification["definition"] = original_classification
448 classification["id"] = id
449 classification["name"] = name
450 classification["description"] = description
451 classification["project_id"] = project_id
452 classification["tenant_id"] = tenant_id
453
454 def __sfi_os2mano(self, sfi_list_dict):
455 """Transform the openstack format (Port Pair) to mano format (SFI)
456 sfi_list_dict can be a list of dict or a single dict
457 """
458 if isinstance(sfi_list_dict, dict):
459 sfi_list_ = [sfi_list_dict]
460 elif isinstance(sfi_list_dict, list):
461 sfi_list_ = sfi_list_dict
462 else:
463 raise TypeError("param sfi_list_dict must be a list or a dictionary")
464
465 for sfi in sfi_list_:
466 sfi["ingress_ports"] = []
467 sfi["egress_ports"] = []
468
469 if sfi.get("ingress"):
470 sfi["ingress_ports"].append(sfi["ingress"])
471
472 if sfi.get("egress"):
473 sfi["egress_ports"].append(sfi["egress"])
474
475 del sfi["ingress"]
476 del sfi["egress"]
477 params = sfi.get("service_function_parameters")
478 sfc_encap = False
479
480 if params:
481 correlation = params.get("correlation")
482
483 if correlation:
484 sfc_encap = True
485
486 sfi["sfc_encap"] = sfc_encap
487 del sfi["service_function_parameters"]
488
489 def __sf_os2mano(self, sf_list_dict):
490 """Transform the openstack format (Port Pair Group) to mano format (SF)
491 sf_list_dict can be a list of dict or a single dict
492 """
493 if isinstance(sf_list_dict, dict):
494 sf_list_ = [sf_list_dict]
495 elif isinstance(sf_list_dict, list):
496 sf_list_ = sf_list_dict
497 else:
498 raise TypeError("param sf_list_dict must be a list or a dictionary")
499
500 for sf in sf_list_:
501 del sf["port_pair_group_parameters"]
502 sf["sfis"] = sf["port_pairs"]
503 del sf["port_pairs"]
504
505 def __sfp_os2mano(self, sfp_list_dict):
506 """Transform the openstack format (Port Chain) to mano format (SFP)
507 sfp_list_dict can be a list of dict or a single dict
508 """
509 if isinstance(sfp_list_dict, dict):
510 sfp_list_ = [sfp_list_dict]
511 elif isinstance(sfp_list_dict, list):
512 sfp_list_ = sfp_list_dict
513 else:
514 raise TypeError("param sfp_list_dict must be a list or a dictionary")
515
516 for sfp in sfp_list_:
517 params = sfp.pop("chain_parameters")
518 sfc_encap = False
519
520 if params:
521 correlation = params.get("correlation")
522
523 if correlation:
524 sfc_encap = True
525
526 sfp["sfc_encap"] = sfc_encap
527 sfp["spi"] = sfp.pop("chain_id")
528 sfp["classifications"] = sfp.pop("flow_classifiers")
529 sfp["service_functions"] = sfp.pop("port_pair_groups")
530
531 # placeholder for now; read TODO note below
532 def _validate_classification(self, type, definition):
533 # only legacy_flow_classifier Type is supported at this point
534 return True
535 # TODO(igordcard): this method should be an abstract method of an
536 # abstract Classification class to be implemented by the specific
537 # Types. Also, abstract vimconnector should call the validation
538 # method before the implemented VIM connectors are called.
539
540 @staticmethod
541 def _format_exception(exception):
542 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
543 message_error = str(exception)
544 tip = ""
545
546 if isinstance(
547 exception,
548 (
549 neExceptions.NetworkNotFoundClient,
550 nvExceptions.NotFound,
551 nvExceptions.ResourceNotFound,
552 ksExceptions.NotFound,
553 gl1Exceptions.HTTPNotFound,
554 cExceptions.NotFound,
555 ),
556 ):
557 raise vimconn.VimConnNotFoundException(
558 type(exception).__name__ + ": " + message_error
559 )
560 elif isinstance(
561 exception,
562 (
563 HTTPException,
564 gl1Exceptions.HTTPException,
565 gl1Exceptions.CommunicationError,
566 ConnectionError,
567 ksExceptions.ConnectionError,
568 neExceptions.ConnectionFailed,
569 cExceptions.ConnectionError,
570 ),
571 ):
572 if type(exception).__name__ == "SSLError":
573 tip = " (maybe option 'insecure' must be added to the VIM)"
574
575 raise vimconn.VimConnConnectionException(
576 "Invalid URL or credentials{}: {}".format(tip, message_error)
577 )
578 elif isinstance(
579 exception,
580 (
581 KeyError,
582 nvExceptions.BadRequest,
583 ksExceptions.BadRequest,
584 gl1Exceptions.BadRequest,
585 cExceptions.BadRequest,
586 ),
587 ):
588 if message_error == "OS-EXT-SRV-ATTR:host":
589 tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
590 raise vimconn.VimConnInsufficientCredentials(
591 type(exception).__name__ + ": " + message_error + tip
592 )
593 raise vimconn.VimConnException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 elif isinstance(
598 exception,
599 (
600 nvExceptions.ClientException,
601 ksExceptions.ClientException,
602 neExceptions.NeutronException,
603 cExceptions.ClientException,
604 ),
605 ):
606 raise vimconn.VimConnUnexpectedResponse(
607 type(exception).__name__ + ": " + message_error
608 )
609 elif isinstance(exception, nvExceptions.Conflict):
610 raise vimconn.VimConnConflictException(
611 type(exception).__name__ + ": " + message_error
612 )
613 elif isinstance(exception, vimconn.VimConnException):
614 raise exception
615 else: # ()
616 logger = logging.getLogger("ro.vim.openstack")
617 logger.error("General Exception " + message_error, exc_info=True)
618
619 raise vimconn.VimConnException(
620 type(exception).__name__ + ": " + message_error
621 )
622
623 def _get_ids_from_name(self):
624 """
625 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
626 :return: None
627 """
628 # get tenant_id if only tenant_name is supplied
629 self._reload_connection()
630
631 if not self.my_tenant_id:
632 raise vimconn.VimConnConnectionException(
633 "Error getting tenant information from name={} id={}".format(
634 self.tenant_name, self.tenant_id
635 )
636 )
637
638 if self.config.get("security_groups") and not self.security_groups_id:
639 # convert from name to id
640 neutron_sg_list = self.neutron.list_security_groups(
641 tenant_id=self.my_tenant_id
642 )["security_groups"]
643
644 self.security_groups_id = []
645 for sg in self.config.get("security_groups"):
646 for neutron_sg in neutron_sg_list:
647 if sg in (neutron_sg["id"], neutron_sg["name"]):
648 self.security_groups_id.append(neutron_sg["id"])
649 break
650 else:
651 self.security_groups_id = None
652
653 raise vimconn.VimConnConnectionException(
654 "Not found security group {} for this tenant".format(sg)
655 )
656
657 def _find_nova_server(self, vm_id):
658 """
659 Returns the VM instance from Openstack and completes it with flavor ID
660 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
661 """
662 try:
663 self._reload_connection()
664 server = self.nova.servers.find(id=vm_id)
665 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
666 server_dict = server.to_dict()
667 try:
668 if server_dict["flavor"].get("original_name"):
669 server_dict["flavor"]["id"] = self.nova.flavors.find(
670 name=server_dict["flavor"]["original_name"]
671 ).id
672 except nClient.exceptions.NotFound as e:
673 self.logger.warning(str(e.message))
674 return server_dict
675 except (
676 ksExceptions.ClientException,
677 nvExceptions.ClientException,
678 nvExceptions.NotFound,
679 ConnectionError,
680 ) as e:
681 self._format_exception(e)
682
683 def check_vim_connectivity(self):
684 # just get network list to check connectivity and credentials
685 self.get_network_list(filter_dict={})
686
687 def get_tenant_list(self, filter_dict={}):
688 """Obtain tenants of VIM
689 filter_dict can contain the following keys:
690 name: filter by tenant name
691 id: filter by tenant uuid/id
692 <other VIM specific>
693 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
694 """
695 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
696 try:
697 self._reload_connection()
698
699 if self.api_version3:
700 project_class_list = self.keystone.projects.list(
701 name=filter_dict.get("name")
702 )
703 else:
704 project_class_list = self.keystone.tenants.findall(**filter_dict)
705
706 project_list = []
707
708 for project in project_class_list:
709 if filter_dict.get("id") and filter_dict["id"] != project.id:
710 continue
711
712 project_list.append(project.to_dict())
713
714 return project_list
715 except (
716 ksExceptions.ConnectionError,
717 ksExceptions.ClientException,
718 ConnectionError,
719 ) as e:
720 self._format_exception(e)
721
722 def new_tenant(self, tenant_name, tenant_description):
723 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
724 self.logger.debug("Adding a new tenant name: %s", tenant_name)
725 try:
726 self._reload_connection()
727
728 if self.api_version3:
729 project = self.keystone.projects.create(
730 tenant_name,
731 self.config.get("project_domain_id", "default"),
732 description=tenant_description,
733 is_domain=False,
734 )
735 else:
736 project = self.keystone.tenants.create(tenant_name, tenant_description)
737
738 return project.id
739 except (
740 ksExceptions.ConnectionError,
741 ksExceptions.ClientException,
742 ksExceptions.BadRequest,
743 ConnectionError,
744 ) as e:
745 self._format_exception(e)
746
747 def delete_tenant(self, tenant_id):
748 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
749 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
750 try:
751 self._reload_connection()
752
753 if self.api_version3:
754 self.keystone.projects.delete(tenant_id)
755 else:
756 self.keystone.tenants.delete(tenant_id)
757
758 return tenant_id
759
760 except (
761 ksExceptions.ConnectionError,
762 ksExceptions.ClientException,
763 ksExceptions.NotFound,
764 ConnectionError,
765 ) as e:
766 self._format_exception(e)
767
768 def new_network(
769 self,
770 net_name,
771 net_type,
772 ip_profile=None,
773 shared=False,
774 provider_network_profile=None,
775 ):
776 """Adds a tenant network to VIM
777 Params:
778 'net_name': name of the network
779 'net_type': one of:
780 'bridge': overlay isolated network
781 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
782 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
783 'ip_profile': is a dict containing the IP parameters of the network
784 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
785 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
786 'gateway_address': (Optional) ip_schema, that is X.X.X.X
787 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
788 'dhcp_enabled': True or False
789 'dhcp_start_address': ip_schema, first IP to grant
790 'dhcp_count': number of IPs to grant.
791 'shared': if this network can be seen/use by other tenants/organization
792 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
793 physical-network: physnet-label}
794 Returns a tuple with the network identifier and created_items, or raises an exception on error
795 created_items can be None or a dictionary where this method can include key-values that will be passed to
796 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
797 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
798 as not present.
799 """
800 self.logger.debug(
801 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
802 )
803 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
804
805 try:
806 vlan = None
807
808 if provider_network_profile:
809 vlan = provider_network_profile.get("segmentation-id")
810
811 new_net = None
812 created_items = {}
813 self._reload_connection()
814 network_dict = {"name": net_name, "admin_state_up": True}
815
816 if net_type in ("data", "ptp") or provider_network_profile:
817 provider_physical_network = None
818
819 if provider_network_profile and provider_network_profile.get(
820 "physical-network"
821 ):
822 provider_physical_network = provider_network_profile.get(
823 "physical-network"
824 )
825
826 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
827 # or not declared, just ignore the checking
828 if (
829 isinstance(
830 self.config.get("dataplane_physical_net"), (tuple, list)
831 )
832 and provider_physical_network
833 not in self.config["dataplane_physical_net"]
834 ):
835 raise vimconn.VimConnConflictException(
836 "Invalid parameter 'provider-network:physical-network' "
837 "for network creation. '{}' is not one of the declared "
838 "list at VIM_config:dataplane_physical_net".format(
839 provider_physical_network
840 )
841 )
842
843 # use the default dataplane_physical_net
844 if not provider_physical_network:
845 provider_physical_network = self.config.get(
846 "dataplane_physical_net"
847 )
848
849 # if it is non-empty list, use the first value. If it is a string use the value directly
850 if (
851 isinstance(provider_physical_network, (tuple, list))
852 and provider_physical_network
853 ):
854 provider_physical_network = provider_physical_network[0]
855
856 if not provider_physical_network:
857 raise vimconn.VimConnConflictException(
858 "missing information needed for underlay networks. Provide "
859 "'dataplane_physical_net' configuration at VIM or use the NS "
860 "instantiation parameter 'provider-network.physical-network'"
861 " for the VLD"
862 )
863
864 if not self.config.get("multisegment_support"):
865 network_dict[
866 "provider:physical_network"
867 ] = provider_physical_network
868
869 if (
870 provider_network_profile
871 and "network-type" in provider_network_profile
872 ):
873 network_dict[
874 "provider:network_type"
875 ] = provider_network_profile["network-type"]
876 else:
877 network_dict["provider:network_type"] = self.config.get(
878 "dataplane_network_type", "vlan"
879 )
880
881 if vlan:
882 network_dict["provider:segmentation_id"] = vlan
883 else:
884 # Multi-segment case
885 segment_list = []
886 segment1_dict = {
887 "provider:physical_network": "",
888 "provider:network_type": "vxlan",
889 }
890 segment_list.append(segment1_dict)
891 segment2_dict = {
892 "provider:physical_network": provider_physical_network,
893 "provider:network_type": "vlan",
894 }
895
896 if vlan:
897 segment2_dict["provider:segmentation_id"] = vlan
898 elif self.config.get("multisegment_vlan_range"):
899 vlanID = self._generate_multisegment_vlanID()
900 segment2_dict["provider:segmentation_id"] = vlanID
901
902 # else
903 # raise vimconn.VimConnConflictException(
904 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
905 # network")
906 segment_list.append(segment2_dict)
907 network_dict["segments"] = segment_list
908
909 # VIO Specific Changes. It needs a concrete VLAN
910 if self.vim_type == "VIO" and vlan is None:
911 if self.config.get("dataplane_net_vlan_range") is None:
912 raise vimconn.VimConnConflictException(
913 "You must provide 'dataplane_net_vlan_range' in format "
914 "[start_ID - end_ID] at VIM_config for creating underlay "
915 "networks"
916 )
917
918 network_dict["provider:segmentation_id"] = self._generate_vlanID()
919
920 network_dict["shared"] = shared
921
922 if self.config.get("disable_network_port_security"):
923 network_dict["port_security_enabled"] = False
924
925 if self.config.get("neutron_availability_zone_hints"):
926 hints = self.config.get("neutron_availability_zone_hints")
927
928 if isinstance(hints, str):
929 hints = [hints]
930
931 network_dict["availability_zone_hints"] = hints
932
933 new_net = self.neutron.create_network({"network": network_dict})
934 # print new_net
935 # create subnetwork, even if there is no profile
936
937 if not ip_profile:
938 ip_profile = {}
939
940 if not ip_profile.get("subnet_address"):
941 # Fake subnet is required
942 subnet_rand = random.SystemRandom().randint(0, 255)
943 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
944
945 if "ip_version" not in ip_profile:
946 ip_profile["ip_version"] = "IPv4"
947
948 subnet = {
949 "name": net_name + "-subnet",
950 "network_id": new_net["network"]["id"],
951 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
952 "cidr": ip_profile["subnet_address"],
953 }
954
955 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
956 if ip_profile.get("gateway_address"):
957 subnet["gateway_ip"] = ip_profile["gateway_address"]
958 else:
959 subnet["gateway_ip"] = None
960
961 if ip_profile.get("dns_address"):
962 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
963
964 if "dhcp_enabled" in ip_profile:
965 subnet["enable_dhcp"] = (
966 False
967 if ip_profile["dhcp_enabled"] == "false"
968 or ip_profile["dhcp_enabled"] is False
969 else True
970 )
971
972 if ip_profile.get("dhcp_start_address"):
973 subnet["allocation_pools"] = []
974 subnet["allocation_pools"].append(dict())
975 subnet["allocation_pools"][0]["start"] = ip_profile[
976 "dhcp_start_address"
977 ]
978
979 if ip_profile.get("dhcp_count"):
980 # parts = ip_profile["dhcp_start_address"].split(".")
981 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
982 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
983 ip_int += ip_profile["dhcp_count"] - 1
984 ip_str = str(netaddr.IPAddress(ip_int))
985 subnet["allocation_pools"][0]["end"] = ip_str
986
987 if (
988 ip_profile.get("ipv6_address_mode")
989 and ip_profile["ip_version"] != "IPv4"
990 ):
991 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
992 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
993 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
994 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
995
996 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
997 self.neutron.create_subnet({"subnet": subnet})
998
999 if net_type == "data" and self.config.get("multisegment_support"):
1000 if self.config.get("l2gw_support"):
1001 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
1002 for l2gw in l2gw_list:
1003 l2gw_conn = {
1004 "l2_gateway_id": l2gw["id"],
1005 "network_id": new_net["network"]["id"],
1006 "segmentation_id": str(vlanID),
1007 }
1008 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
1009 {"l2_gateway_connection": l2gw_conn}
1010 )
1011 created_items[
1012 "l2gwconn:"
1013 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
1014 ] = True
1015
1016 return new_net["network"]["id"], created_items
1017 except Exception as e:
1018 # delete l2gw connections (if any) before deleting the network
1019 for k, v in created_items.items():
1020 if not v: # skip already deleted
1021 continue
1022
1023 try:
1024 k_item, _, k_id = k.partition(":")
1025
1026 if k_item == "l2gwconn":
1027 self.neutron.delete_l2_gateway_connection(k_id)
1028
1029 except (neExceptions.ConnectionFailed, ConnectionError) as e2:
1030 self.logger.error(
1031 "Error deleting l2 gateway connection: {}: {}".format(
1032 type(e2).__name__, e2
1033 )
1034 )
1035 self._format_exception(e2)
1036 except Exception as e2:
1037 self.logger.error(
1038 "Error deleting l2 gateway connection: {}: {}".format(
1039 type(e2).__name__, e2
1040 )
1041 )
1042
1043 if new_net:
1044 self.neutron.delete_network(new_net["network"]["id"])
1045
1046 self._format_exception(e)
1047
1048 def get_network_list(self, filter_dict={}):
1049 """Obtain tenant networks of VIM
1050 Filter_dict can be:
1051 name: network name
1052 id: network uuid
1053 shared: boolean
1054 tenant_id: tenant
1055 admin_state_up: boolean
1056 status: 'ACTIVE'
1057 Returns the network list of dictionaries
1058 """
1059 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1060 try:
1061 self._reload_connection()
1062 filter_dict_os = filter_dict.copy()
1063
1064 if self.api_version3 and "tenant_id" in filter_dict_os:
1065 # TODO check
1066 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1067
1068 net_dict = self.neutron.list_networks(**filter_dict_os)
1069 net_list = net_dict["networks"]
1070 self.__net_os2mano(net_list)
1071
1072 return net_list
1073 except (
1074 neExceptions.ConnectionFailed,
1075 ksExceptions.ClientException,
1076 neExceptions.NeutronException,
1077 ConnectionError,
1078 ) as e:
1079 self._format_exception(e)
1080
1081 def get_network(self, net_id):
1082 """Obtain details of network from VIM
1083 Returns the network information from a network id"""
1084 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1085 filter_dict = {"id": net_id}
1086 net_list = self.get_network_list(filter_dict)
1087
1088 if len(net_list) == 0:
1089 raise vimconn.VimConnNotFoundException(
1090 "Network '{}' not found".format(net_id)
1091 )
1092 elif len(net_list) > 1:
1093 raise vimconn.VimConnConflictException(
1094 "Found more than one network with this criteria"
1095 )
1096
1097 net = net_list[0]
1098 subnets = []
1099 for subnet_id in net.get("subnets", ()):
1100 try:
1101 subnet = self.neutron.show_subnet(subnet_id)
1102 except Exception as e:
1103 self.logger.error(
1104 "osconnector.get_network(): Error getting subnet %s %s"
1105 % (net_id, str(e))
1106 )
1107 subnet = {"id": subnet_id, "fault": str(e)}
1108
1109 subnets.append(subnet)
1110
1111 net["subnets"] = subnets
1112 net["encapsulation"] = net.get("provider:network_type")
1113 net["encapsulation_type"] = net.get("provider:network_type")
1114 net["segmentation_id"] = net.get("provider:segmentation_id")
1115 net["encapsulation_id"] = net.get("provider:segmentation_id")
1116
1117 return net
1118
1119 @catch_any_exception
1120 def delete_network(self, net_id, created_items=None):
1121 """
1122 Removes a tenant network from VIM and its associated elements
1123 :param net_id: VIM identifier of the network, provided by method new_network
1124 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1125 Returns the network identifier or raises an exception upon error or when network is not found
1126 """
1127 self.logger.debug("Deleting network '%s' from VIM", net_id)
1128
1129 if created_items is None:
1130 created_items = {}
1131
1132 try:
1133 self._reload_connection()
1134 # delete l2gw connections (if any) before deleting the network
1135 for k, v in created_items.items():
1136 if not v: # skip already deleted
1137 continue
1138
1139 try:
1140 k_item, _, k_id = k.partition(":")
1141 if k_item == "l2gwconn":
1142 self.neutron.delete_l2_gateway_connection(k_id)
1143
1144 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1145 self.logger.error(
1146 "Error deleting l2 gateway connection: {}: {}".format(
1147 type(e).__name__, e
1148 )
1149 )
1150 self._format_exception(e)
1151 except Exception as e:
1152 self.logger.error(
1153 "Error deleting l2 gateway connection: {}: {}".format(
1154 type(e).__name__, e
1155 )
1156 )
1157
1158 # delete VM ports attached to this networks before the network
1159 ports = self.neutron.list_ports(network_id=net_id)
1160 for p in ports["ports"]:
1161 try:
1162 self.neutron.delete_port(p["id"])
1163
1164 except (neExceptions.ConnectionFailed, ConnectionError) as e:
1165 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1166 # If there is connection error, it raises.
1167 self._format_exception(e)
1168 except Exception as e:
1169 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1170
1171 self.neutron.delete_network(net_id)
1172
1173 return net_id
1174 except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
1175 # If network to be deleted is not found, it does not raise.
1176 self.logger.warning(
1177 f"Error deleting network: {net_id} is not found, {str(e)}"
1178 )
1179
1180 def refresh_nets_status(self, net_list):
1181 """Get the status of the networks
1182 Params: the list of network identifiers
1183 Returns a dictionary with:
1184 net_id: #VIM id of this network
1185 status: #Mandatory. Text with one of:
1186 # DELETED (not found at vim)
1187 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1188 # OTHER (Vim reported other status not understood)
1189 # ERROR (VIM indicates an ERROR status)
1190 # ACTIVE, INACTIVE, DOWN (admin down),
1191 # BUILD (on building process)
1192 #
1193 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1194 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1195 """
1196 net_dict = {}
1197
1198 for net_id in net_list:
1199 net = {}
1200
1201 try:
1202 net_vim = self.get_network(net_id)
1203
1204 if net_vim["status"] in netStatus2manoFormat:
1205 net["status"] = netStatus2manoFormat[net_vim["status"]]
1206 else:
1207 net["status"] = "OTHER"
1208 net["error_msg"] = "VIM status reported " + net_vim["status"]
1209
1210 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1211 net["status"] = "DOWN"
1212
1213 net["vim_info"] = self.serialize(net_vim)
1214
1215 if net_vim.get("fault"): # TODO
1216 net["error_msg"] = str(net_vim["fault"])
1217 except vimconn.VimConnNotFoundException as e:
1218 self.logger.error("Exception getting net status: %s", str(e))
1219 net["status"] = "DELETED"
1220 net["error_msg"] = str(e)
1221 except vimconn.VimConnException as e:
1222 self.logger.error("Exception getting net status: %s", str(e))
1223 net["status"] = "VIM_ERROR"
1224 net["error_msg"] = str(e)
1225 net_dict[net_id] = net
1226 return net_dict
1227
1228 def get_flavor(self, flavor_id):
1229 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1230 self.logger.debug("Getting flavor '%s'", flavor_id)
1231 try:
1232 self._reload_connection()
1233 flavor = self.nova.flavors.find(id=flavor_id)
1234 return flavor.to_dict()
1235
1236 except (
1237 nvExceptions.NotFound,
1238 nvExceptions.ClientException,
1239 ksExceptions.ClientException,
1240 ConnectionError,
1241 ) as e:
1242 self._format_exception(e)
1243
1244 def get_flavor_id_from_data(self, flavor_dict):
1245 """Obtain flavor id that match the flavor description
1246 Returns the flavor_id or raises a vimconnNotFoundException
1247 flavor_dict: contains the required ram, vcpus, disk
1248 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1249 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1250 vimconnNotFoundException is raised
1251 """
1252 exact_match = False if self.config.get("use_existing_flavors") else True
1253
1254 try:
1255 self._reload_connection()
1256 flavor_candidate_id = None
1257 flavor_candidate_data = (10000, 10000, 10000)
1258 flavor_target = (
1259 flavor_dict["ram"],
1260 flavor_dict["vcpus"],
1261 flavor_dict["disk"],
1262 flavor_dict.get("ephemeral", 0),
1263 flavor_dict.get("swap", 0),
1264 )
1265 # numa=None
1266 extended = flavor_dict.get("extended", {})
1267 if extended:
1268 # TODO
1269 raise vimconn.VimConnNotFoundException(
1270 "Flavor with EPA still not implemented"
1271 )
1272 # if len(numas) > 1:
1273 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1274 # numa=numas[0]
1275 # numas = extended.get("numas")
1276 for flavor in self.nova.flavors.list():
1277 epa = flavor.get_keys()
1278
1279 if epa:
1280 continue
1281 # TODO
1282
1283 flavor_data = (
1284 flavor.ram,
1285 flavor.vcpus,
1286 flavor.disk,
1287 flavor.ephemeral,
1288 flavor.swap if isinstance(flavor.swap, int) else 0,
1289 )
1290 if flavor_data == flavor_target:
1291 return flavor.id
1292 elif (
1293 not exact_match
1294 and flavor_target < flavor_data < flavor_candidate_data
1295 ):
1296 flavor_candidate_id = flavor.id
1297 flavor_candidate_data = flavor_data
1298
1299 if not exact_match and flavor_candidate_id:
1300 return flavor_candidate_id
1301
1302 raise vimconn.VimConnNotFoundException(
1303 "Cannot find any flavor matching '{}'".format(flavor_dict)
1304 )
1305 except (
1306 nvExceptions.NotFound,
1307 nvExceptions.BadRequest,
1308 nvExceptions.ClientException,
1309 ksExceptions.ClientException,
1310 ConnectionError,
1311 ) as e:
1312 self._format_exception(e)
1313
1314 @staticmethod
1315 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1316 """Process resource quota and fill up extra_specs.
1317 Args:
1318 quota (dict): Keeping the quota of resurces
1319 prefix (str) Prefix
1320 extra_specs (dict) Dict to be filled to be used during flavor creation
1321
1322 """
1323 if "limit" in quota:
1324 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1325
1326 if "reserve" in quota:
1327 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1328
1329 if "shares" in quota:
1330 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1331 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1332
1333 @staticmethod
1334 def process_numa_memory(
1335 numa: dict, node_id: Optional[int], extra_specs: dict
1336 ) -> None:
1337 """Set the memory in extra_specs.
1338 Args:
1339 numa (dict): A dictionary which includes numa information
1340 node_id (int): ID of numa node
1341 extra_specs (dict): To be filled.
1342
1343 """
1344 if not numa.get("memory"):
1345 return
1346 memory_mb = numa["memory"] * 1024
1347 memory = "hw:numa_mem.{}".format(node_id)
1348 extra_specs[memory] = int(memory_mb)
1349
1350 @staticmethod
1351 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1352 """Set the cpu in extra_specs.
1353 Args:
1354 numa (dict): A dictionary which includes numa information
1355 node_id (int): ID of numa node
1356 extra_specs (dict): To be filled.
1357
1358 """
1359 if not numa.get("vcpu"):
1360 return
1361 vcpu = numa["vcpu"]
1362 cpu = "hw:numa_cpus.{}".format(node_id)
1363 vcpu = ",".join(map(str, vcpu))
1364 extra_specs[cpu] = vcpu
1365
1366 @staticmethod
1367 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1368 """Fill up extra_specs if numa has paired-threads.
1369 Args:
1370 numa (dict): A dictionary which includes numa information
1371 extra_specs (dict): To be filled.
1372
1373 Returns:
1374 threads (int) Number of virtual cpus
1375
1376 """
1377 if not numa.get("paired-threads"):
1378 return
1379
1380 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1381 threads = numa["paired-threads"] * 2
1382 extra_specs["hw:cpu_thread_policy"] = "require"
1383 extra_specs["hw:cpu_policy"] = "dedicated"
1384 return threads
1385
1386 @staticmethod
1387 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1388 """Fill up extra_specs if numa has cores.
1389 Args:
1390 numa (dict): A dictionary which includes numa information
1391 extra_specs (dict): To be filled.
1392
1393 Returns:
1394 cores (int) Number of virtual cpus
1395
1396 """
1397 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1398 # architecture, or a non-SMT architecture will be emulated
1399 if not numa.get("cores"):
1400 return
1401 cores = numa["cores"]
1402 extra_specs["hw:cpu_thread_policy"] = "isolate"
1403 extra_specs["hw:cpu_policy"] = "dedicated"
1404 return cores
1405
1406 @staticmethod
1407 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1408 """Fill up extra_specs if numa has threads.
1409 Args:
1410 numa (dict): A dictionary which includes numa information
1411 extra_specs (dict): To be filled.
1412
1413 Returns:
1414 threads (int) Number of virtual cpus
1415
1416 """
1417 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1418 if not numa.get("threads"):
1419 return
1420 threads = numa["threads"]
1421 extra_specs["hw:cpu_thread_policy"] = "prefer"
1422 extra_specs["hw:cpu_policy"] = "dedicated"
1423 return threads
1424
1425 def _process_numa_parameters_of_flavor(
1426 self, numas: List, extra_specs: Dict
1427 ) -> None:
1428 """Process numa parameters and fill up extra_specs.
1429
1430 Args:
1431 numas (list): List of dictionary which includes numa information
1432 extra_specs (dict): To be filled.
1433
1434 """
1435 numa_nodes = len(numas)
1436 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1437 cpu_cores, cpu_threads = 0, 0
1438
1439 if self.vim_type == "VIO":
1440 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1441
1442 for numa in numas:
1443 if "id" in numa:
1444 node_id = numa["id"]
1445 # overwrite ram and vcpus
1446 # check if key "memory" is present in numa else use ram value at flavor
1447 self.process_numa_memory(numa, node_id, extra_specs)
1448 self.process_numa_vcpu(numa, node_id, extra_specs)
1449
1450 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1451 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1452
1453 if "paired-threads" in numa:
1454 threads = self.process_numa_paired_threads(numa, extra_specs)
1455 cpu_threads += threads
1456
1457 elif "cores" in numa:
1458 cores = self.process_numa_cores(numa, extra_specs)
1459 cpu_cores += cores
1460
1461 elif "threads" in numa:
1462 threads = self.process_numa_threads(numa, extra_specs)
1463 cpu_threads += threads
1464
1465 if cpu_cores:
1466 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1467 if cpu_threads:
1468 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1469
1470 @staticmethod
1471 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1472 """According to number of numa nodes, updates the extra_specs for VIO.
1473
1474 Args:
1475
1476 numa_nodes (int): List keeps the numa node numbers
1477 extra_specs (dict): Extra specs dict to be updated
1478
1479 """
1480 # If there are several numas, we do not define specific affinity.
1481 extra_specs["vmware:latency_sensitivity_level"] = "high"
1482
1483 def _change_flavor_name(
1484 self, name: str, name_suffix: int, flavor_data: dict
1485 ) -> str:
1486 """Change the flavor name if the name already exists.
1487
1488 Args:
1489 name (str): Flavor name to be checked
1490 name_suffix (int): Suffix to be appended to name
1491 flavor_data (dict): Flavor dict
1492
1493 Returns:
1494 name (str): New flavor name to be used
1495
1496 """
1497 # Get used names
1498 fl = self.nova.flavors.list()
1499 fl_names = [f.name for f in fl]
1500
1501 while name in fl_names:
1502 name_suffix += 1
1503 name = flavor_data["name"] + "-" + str(name_suffix)
1504
1505 return name
1506
1507 def _process_extended_config_of_flavor(
1508 self, extended: dict, extra_specs: dict
1509 ) -> None:
1510 """Process the extended dict to fill up extra_specs.
1511 Args:
1512
1513 extended (dict): Keeping the extra specification of flavor
1514 extra_specs (dict) Dict to be filled to be used during flavor creation
1515
1516 """
1517 quotas = {
1518 "cpu-quota": "cpu",
1519 "mem-quota": "memory",
1520 "vif-quota": "vif",
1521 "disk-io-quota": "disk_io",
1522 }
1523
1524 page_sizes = {
1525 "LARGE": "large",
1526 "SMALL": "small",
1527 "SIZE_2MB": "2MB",
1528 "SIZE_1GB": "1GB",
1529 "PREFER_LARGE": "any",
1530 }
1531
1532 policies = {
1533 "cpu-pinning-policy": "hw:cpu_policy",
1534 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1535 "mem-policy": "hw:numa_mempolicy",
1536 }
1537
1538 numas = extended.get("numas")
1539 if numas:
1540 self._process_numa_parameters_of_flavor(numas, extra_specs)
1541
1542 for quota, item in quotas.items():
1543 if quota in extended.keys():
1544 self.process_resource_quota(extended.get(quota), item, extra_specs)
1545
1546 # Set the mempage size as specified in the descriptor
1547 if extended.get("mempage-size"):
1548 if extended["mempage-size"] in page_sizes.keys():
1549 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1550 else:
1551 # Normally, validations in NBI should not allow to this condition.
1552 self.logger.debug(
1553 "Invalid mempage-size %s. Will be ignored",
1554 extended.get("mempage-size"),
1555 )
1556
1557 for policy, hw_policy in policies.items():
1558 if extended.get(policy):
1559 extra_specs[hw_policy] = extended[policy].lower()
1560
1561 @staticmethod
1562 def _get_flavor_details(flavor_data: dict) -> Tuple:
1563 """Returns the details of flavor
1564 Args:
1565 flavor_data (dict): Dictionary that includes required flavor details
1566
1567 Returns:
1568 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1569
1570 """
1571 return (
1572 flavor_data.get("ram", 64),
1573 flavor_data.get("vcpus", 1),
1574 {},
1575 flavor_data.get("extended"),
1576 )
1577
1578 @catch_any_exception
1579 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1580 """Adds a tenant flavor to openstack VIM.
1581 if change_name_if_used is True, it will change name in case of conflict,
1582 because it is not supported name repetition.
1583
1584 Args:
1585 flavor_data (dict): Flavor details to be processed
1586 change_name_if_used (bool): Change name in case of conflict
1587
1588 Returns:
1589 flavor_id (str): flavor identifier
1590
1591 """
1592 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1593 retry = 0
1594 max_retries = 3
1595 name_suffix = 0
1596 name = flavor_data["name"]
1597 while retry < max_retries:
1598 retry += 1
1599 try:
1600 self._reload_connection()
1601
1602 if change_name_if_used:
1603 name = self._change_flavor_name(name, name_suffix, flavor_data)
1604
1605 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1606 flavor_data
1607 )
1608 if extended:
1609 self._process_extended_config_of_flavor(extended, extra_specs)
1610
1611 # Create flavor
1612
1613 new_flavor = self.nova.flavors.create(
1614 name=name,
1615 ram=ram,
1616 vcpus=vcpus,
1617 disk=flavor_data.get("disk", 0),
1618 ephemeral=flavor_data.get("ephemeral", 0),
1619 swap=flavor_data.get("swap", 0),
1620 is_public=flavor_data.get("is_public", True),
1621 )
1622
1623 # Add metadata
1624 if extra_specs:
1625 new_flavor.set_keys(extra_specs)
1626
1627 return new_flavor.id
1628
1629 except nvExceptions.Conflict as e:
1630 if change_name_if_used and retry < max_retries:
1631 continue
1632
1633 self._format_exception(e)
1634
1635 @catch_any_exception
1636 def delete_flavor(self, flavor_id):
1637 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1638 try:
1639 self._reload_connection()
1640 self.nova.flavors.delete(flavor_id)
1641 return flavor_id
1642
1643 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
1644 # If flavor is not found, it does not raise.
1645 self.logger.warning(
1646 f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
1647 )
1648
1649 def new_image(self, image_dict):
1650 """
1651 Adds a tenant image to VIM. imge_dict is a dictionary with:
1652 name: name
1653 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1654 location: path or URI
1655 public: "yes" or "no"
1656 metadata: metadata of the image
1657 Returns the image_id
1658 """
1659 retry = 0
1660 max_retries = 3
1661
1662 while retry < max_retries:
1663 retry += 1
1664 try:
1665 self._reload_connection()
1666
1667 # determine format http://docs.openstack.org/developer/glance/formats.html
1668 if "disk_format" in image_dict:
1669 disk_format = image_dict["disk_format"]
1670 else: # autodiscover based on extension
1671 if image_dict["location"].endswith(".qcow2"):
1672 disk_format = "qcow2"
1673 elif image_dict["location"].endswith(".vhd"):
1674 disk_format = "vhd"
1675 elif image_dict["location"].endswith(".vmdk"):
1676 disk_format = "vmdk"
1677 elif image_dict["location"].endswith(".vdi"):
1678 disk_format = "vdi"
1679 elif image_dict["location"].endswith(".iso"):
1680 disk_format = "iso"
1681 elif image_dict["location"].endswith(".aki"):
1682 disk_format = "aki"
1683 elif image_dict["location"].endswith(".ari"):
1684 disk_format = "ari"
1685 elif image_dict["location"].endswith(".ami"):
1686 disk_format = "ami"
1687 else:
1688 disk_format = "raw"
1689
1690 self.logger.debug(
1691 "new_image: '%s' loading from '%s'",
1692 image_dict["name"],
1693 image_dict["location"],
1694 )
1695 if self.vim_type == "VIO":
1696 container_format = "bare"
1697 if "container_format" in image_dict:
1698 container_format = image_dict["container_format"]
1699
1700 new_image = self.glance.images.create(
1701 name=image_dict["name"],
1702 container_format=container_format,
1703 disk_format=disk_format,
1704 )
1705 else:
1706 new_image = self.glance.images.create(name=image_dict["name"])
1707
1708 if image_dict["location"].startswith("http"):
1709 # TODO there is not a method to direct download. It must be downloaded locally with requests
1710 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1711 else: # local path
1712 with open(image_dict["location"]) as fimage:
1713 self.glance.images.upload(new_image.id, fimage)
1714 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1715 # image_dict.get("public","yes")=="yes",
1716 # container_format="bare", data=fimage, disk_format=disk_format)
1717
1718 metadata_to_load = image_dict.get("metadata")
1719
1720 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1721 # for openstack
1722 if self.vim_type == "VIO":
1723 metadata_to_load["upload_location"] = image_dict["location"]
1724 else:
1725 metadata_to_load["location"] = image_dict["location"]
1726
1727 self.glance.images.update(new_image.id, **metadata_to_load)
1728
1729 return new_image.id
1730 except (
1731 HTTPException,
1732 gl1Exceptions.HTTPException,
1733 gl1Exceptions.CommunicationError,
1734 ConnectionError,
1735 ) as e:
1736 if retry == max_retries:
1737 continue
1738
1739 self._format_exception(e)
1740 except IOError as e: # can not open the file
1741 raise vimconn.VimConnConnectionException(
1742 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1743 http_code=vimconn.HTTP_Bad_Request,
1744 )
1745 except Exception as e:
1746 self._format_exception(e)
1747
1748 @catch_any_exception
1749 def delete_image(self, image_id):
1750 """Deletes a tenant image from openstack VIM. Returns the old id"""
1751 try:
1752 self._reload_connection()
1753 self.glance.images.delete(image_id)
1754
1755 return image_id
1756 except gl1Exceptions.NotFound as e:
1757 # If image is not found, it does not raise.
1758 self.logger.warning(
1759 f"Error deleting image: {image_id} is not found, {str(e)}"
1760 )
1761
1762 @catch_any_exception
1763 def get_image_id_from_path(self, path):
1764 """Get the image id from image path in the VIM database. Returns the image_id"""
1765 self._reload_connection()
1766 images = self.glance.images.list()
1767
1768 for image in images:
1769 if image.metadata.get("location") == path:
1770 return image.id
1771
1772 raise vimconn.VimConnNotFoundException(
1773 "image with location '{}' not found".format(path)
1774 )
1775
1776 def get_image_list(self, filter_dict={}):
1777 """Obtain tenant images from VIM
1778 Filter_dict can be:
1779 id: image id
1780 name: image name
1781 checksum: image checksum
1782 Returns the image list of dictionaries:
1783 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1784 List can be empty
1785 """
1786 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1787 try:
1788 self._reload_connection()
1789 # filter_dict_os = filter_dict.copy()
1790 # First we filter by the available filter fields: name, id. The others are removed.
1791 image_list = self.glance.images.list()
1792 filtered_list = []
1793
1794 for image in image_list:
1795 try:
1796 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1797 continue
1798
1799 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1800 continue
1801
1802 if (
1803 filter_dict.get("checksum")
1804 and image["checksum"] != filter_dict["checksum"]
1805 ):
1806 continue
1807
1808 filtered_list.append(image.copy())
1809 except gl1Exceptions.HTTPNotFound:
1810 pass
1811
1812 return filtered_list
1813
1814 except (
1815 ksExceptions.ClientException,
1816 nvExceptions.ClientException,
1817 gl1Exceptions.CommunicationError,
1818 ConnectionError,
1819 ) as e:
1820 self._format_exception(e)
1821
1822 def __wait_for_vm(self, vm_id, status):
1823 """wait until vm is in the desired status and return True.
1824 If the VM gets in ERROR status, return false.
1825 If the timeout is reached generate an exception"""
1826 elapsed_time = 0
1827 while elapsed_time < server_timeout:
1828 vm_status = self.nova.servers.get(vm_id).status
1829
1830 if vm_status == status:
1831 return True
1832
1833 if vm_status == "ERROR":
1834 return False
1835
1836 time.sleep(5)
1837 elapsed_time += 5
1838
1839 # if we exceeded the timeout rollback
1840 if elapsed_time >= server_timeout:
1841 raise vimconn.VimConnException(
1842 "Timeout waiting for instance " + vm_id + " to get " + status,
1843 http_code=vimconn.HTTP_Request_Timeout,
1844 )
1845
1846 def _get_openstack_availablity_zones(self):
1847 """
1848 Get from openstack availability zones available
1849 :return:
1850 """
1851 try:
1852 openstack_availability_zone = self.nova.availability_zones.list()
1853 openstack_availability_zone = [
1854 str(zone.zoneName)
1855 for zone in openstack_availability_zone
1856 if zone.zoneName != "internal"
1857 ]
1858
1859 return openstack_availability_zone
1860 except Exception:
1861 return None
1862
1863 def _set_availablity_zones(self):
1864 """
1865 Set vim availablity zone
1866 :return:
1867 """
1868 if "availability_zone" in self.config:
1869 vim_availability_zones = self.config.get("availability_zone")
1870
1871 if isinstance(vim_availability_zones, str):
1872 self.availability_zone = [vim_availability_zones]
1873 elif isinstance(vim_availability_zones, list):
1874 self.availability_zone = vim_availability_zones
1875 else:
1876 self.availability_zone = self._get_openstack_availablity_zones()
1877 if "storage_availability_zone" in self.config:
1878 self.storage_availability_zone = self.config.get(
1879 "storage_availability_zone"
1880 )
1881
1882 def _get_vm_availability_zone(
1883 self, availability_zone_index, availability_zone_list
1884 ):
1885 """
1886 Return thge availability zone to be used by the created VM.
1887 :return: The VIM availability zone to be used or None
1888 """
1889 if availability_zone_index is None:
1890 if not self.config.get("availability_zone"):
1891 return None
1892 elif isinstance(self.config.get("availability_zone"), str):
1893 return self.config["availability_zone"]
1894 else:
1895 # TODO consider using a different parameter at config for default AV and AV list match
1896 return self.config["availability_zone"][0]
1897
1898 vim_availability_zones = self.availability_zone
1899 # check if VIM offer enough availability zones describe in the VNFD
1900 if vim_availability_zones and len(availability_zone_list) <= len(
1901 vim_availability_zones
1902 ):
1903 # check if all the names of NFV AV match VIM AV names
1904 match_by_index = False
1905 for av in availability_zone_list:
1906 if av not in vim_availability_zones:
1907 match_by_index = True
1908 break
1909
1910 if match_by_index:
1911 return vim_availability_zones[availability_zone_index]
1912 else:
1913 return availability_zone_list[availability_zone_index]
1914 else:
1915 raise vimconn.VimConnConflictException(
1916 "No enough availability zones at VIM for this deployment"
1917 )
1918
1919 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1920 """Fill up the security_groups in the port_dict.
1921
1922 Args:
1923 net (dict): Network details
1924 port_dict (dict): Port details
1925
1926 """
1927 if (
1928 self.config.get("security_groups")
1929 and net.get("port_security") is not False
1930 and not self.config.get("no_port_security_extension")
1931 ):
1932 if not self.security_groups_id:
1933 self._get_ids_from_name()
1934
1935 port_dict["security_groups"] = self.security_groups_id
1936
1937 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1938 """Fill up the network binding depending on network type in the port_dict.
1939
1940 Args:
1941 net (dict): Network details
1942 port_dict (dict): Port details
1943
1944 """
1945 if not net.get("type"):
1946 raise vimconn.VimConnException("Type is missing in the network details.")
1947
1948 if net["type"] == "virtual":
1949 pass
1950
1951 # For VF
1952 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1953 port_dict["binding:vnic_type"] = "direct"
1954
1955 # VIO specific Changes
1956 if self.vim_type == "VIO":
1957 # Need to create port with port_security_enabled = False and no-security-groups
1958 port_dict["port_security_enabled"] = False
1959 port_dict["provider_security_groups"] = []
1960 port_dict["security_groups"] = []
1961
1962 else:
1963 # For PT PCI-PASSTHROUGH
1964 port_dict["binding:vnic_type"] = "direct-physical"
1965
1966 @staticmethod
1967 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1968 """Set the "ip" parameter in net dictionary.
1969
1970 Args:
1971 new_port (dict): New created port
1972 net (dict): Network details
1973
1974 """
1975 fixed_ips = new_port["port"].get("fixed_ips")
1976
1977 if fixed_ips:
1978 net["ip"] = fixed_ips[0].get("ip_address")
1979 else:
1980 net["ip"] = None
1981
1982 @staticmethod
1983 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1984 """Fill up the mac_address and fixed_ips in port_dict.
1985
1986 Args:
1987 net (dict): Network details
1988 port_dict (dict): Port details
1989
1990 """
1991 if net.get("mac_address"):
1992 port_dict["mac_address"] = net["mac_address"]
1993
1994 ip_dual_list = []
1995 if ip_list := net.get("ip_address"):
1996 if not isinstance(ip_list, list):
1997 ip_list = [ip_list]
1998 for ip in ip_list:
1999 ip_dict = {"ip_address": ip}
2000 ip_dual_list.append(ip_dict)
2001 port_dict["fixed_ips"] = ip_dual_list
2002 # TODO add "subnet_id": <subnet_id>
2003
2004 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
2005 """Create new port using neutron.
2006
2007 Args:
2008 port_dict (dict): Port details
2009 created_items (dict): All created items
2010 net (dict): Network details
2011
2012 Returns:
2013 new_port (dict): New created port
2014
2015 """
2016 new_port = self.neutron.create_port({"port": port_dict})
2017 created_items["port:" + str(new_port["port"]["id"])] = True
2018 net["mac_address"] = new_port["port"]["mac_address"]
2019 net["vim_id"] = new_port["port"]["id"]
2020
2021 return new_port
2022
2023 def _create_port(
2024 self, net: dict, name: str, created_items: dict
2025 ) -> Tuple[dict, dict]:
2026 """Create port using net details.
2027
2028 Args:
2029 net (dict): Network details
2030 name (str): Name to be used as network name if net dict does not include name
2031 created_items (dict): All created items
2032
2033 Returns:
2034 new_port, port New created port, port dictionary
2035
2036 """
2037
2038 port_dict = {
2039 "network_id": net["net_id"],
2040 "name": net.get("name"),
2041 "admin_state_up": True,
2042 }
2043
2044 if not port_dict["name"]:
2045 port_dict["name"] = name
2046
2047 self._prepare_port_dict_security_groups(net, port_dict)
2048
2049 self._prepare_port_dict_binding(net, port_dict)
2050
2051 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2052
2053 new_port = self._create_new_port(port_dict, created_items, net)
2054
2055 vimconnector._set_fixed_ip(new_port, net)
2056
2057 port = {"port-id": new_port["port"]["id"]}
2058
2059 if float(self.nova.api_version.get_string()) >= 2.32:
2060 port["tag"] = new_port["port"]["name"]
2061
2062 return new_port, port
2063
2064 def _prepare_network_for_vminstance(
2065 self,
2066 name: str,
2067 net_list: list,
2068 created_items: dict,
2069 net_list_vim: list,
2070 external_network: list,
2071 no_secured_ports: list,
2072 ) -> None:
2073 """Create port and fill up net dictionary for new VM instance creation.
2074
2075 Args:
2076 name (str): Name of network
2077 net_list (list): List of networks
2078 created_items (dict): All created items belongs to a VM
2079 net_list_vim (list): List of ports
2080 external_network (list): List of external-networks
2081 no_secured_ports (list): Port security disabled ports
2082 """
2083
2084 self._reload_connection()
2085
2086 for net in net_list:
2087 # Skip non-connected iface
2088 if not net.get("net_id"):
2089 continue
2090
2091 new_port, port = self._create_port(net, name, created_items)
2092
2093 net_list_vim.append(port)
2094
2095 if net.get("floating_ip", False):
2096 net["exit_on_floating_ip_error"] = True
2097 external_network.append(net)
2098
2099 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2100 net["exit_on_floating_ip_error"] = False
2101 external_network.append(net)
2102 net["floating_ip"] = self.config.get("use_floating_ip")
2103
2104 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2105 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2106 if net.get("port_security") is False and not self.config.get(
2107 "no_port_security_extension"
2108 ):
2109 no_secured_ports.append(
2110 (
2111 new_port["port"]["id"],
2112 net.get("port_security_disable_strategy"),
2113 )
2114 )
2115
2116 def _prepare_persistent_root_volumes(
2117 self,
2118 name: str,
2119 storage_av_zone: list,
2120 disk: dict,
2121 base_disk_index: int,
2122 block_device_mapping: dict,
2123 existing_vim_volumes: list,
2124 created_items: dict,
2125 ) -> Optional[str]:
2126 """Prepare persistent root volumes for new VM instance.
2127
2128 Args:
2129 name (str): Name of VM instance
2130 storage_av_zone (list): Storage of availability zones
2131 disk (dict): Disk details
2132 base_disk_index (int): Disk index
2133 block_device_mapping (dict): Block device details
2134 existing_vim_volumes (list): Existing disk details
2135 created_items (dict): All created items belongs to VM
2136
2137 Returns:
2138 boot_volume_id (str): ID of boot volume
2139
2140 """
2141 # Disk may include only vim_volume_id or only vim_id."
2142 # Use existing persistent root volume finding with volume_id or vim_id
2143 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2144 if disk.get(key_id):
2145 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2146 existing_vim_volumes.append({"id": disk[key_id]})
2147 else:
2148 # Create persistent root volume
2149 volume = self.cinder.volumes.create(
2150 size=disk["size"],
2151 name=name + "vd" + chr(base_disk_index),
2152 imageRef=disk["image_id"],
2153 # Make sure volume is in the same AZ as the VM to be attached to
2154 availability_zone=storage_av_zone,
2155 )
2156 boot_volume_id = volume.id
2157 self.update_block_device_mapping(
2158 volume=volume,
2159 block_device_mapping=block_device_mapping,
2160 base_disk_index=base_disk_index,
2161 disk=disk,
2162 created_items=created_items,
2163 )
2164
2165 return boot_volume_id
2166
2167 @staticmethod
2168 def update_block_device_mapping(
2169 volume: object,
2170 block_device_mapping: dict,
2171 base_disk_index: int,
2172 disk: dict,
2173 created_items: dict,
2174 ) -> None:
2175 """Add volume information to block device mapping dict.
2176 Args:
2177 volume (object): Created volume object
2178 block_device_mapping (dict): Block device details
2179 base_disk_index (int): Disk index
2180 disk (dict): Disk details
2181 created_items (dict): All created items belongs to VM
2182 """
2183 if not volume:
2184 raise vimconn.VimConnException("Volume is empty.")
2185
2186 if not hasattr(volume, "id"):
2187 raise vimconn.VimConnException(
2188 "Created volume is not valid, does not have id attribute."
2189 )
2190
2191 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2192 if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
2193 return
2194 volume_txt = "volume:" + str(volume.id)
2195 if disk.get("keep"):
2196 volume_txt += ":keep"
2197 created_items[volume_txt] = True
2198
2199 @catch_any_exception
2200 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2201 availability_zone = (
2202 self.storage_availability_zone
2203 if self.storage_availability_zone
2204 else self._get_vm_availability_zone
2205 )
2206 volume = self.cinder.volumes.create(
2207 size=shared_volume_data["size"],
2208 name=shared_volume_data["name"],
2209 volume_type="multiattach",
2210 availability_zone=availability_zone,
2211 )
2212 return volume.name, volume.id
2213
2214 def _prepare_shared_volumes(
2215 self,
2216 name: str,
2217 disk: dict,
2218 base_disk_index: int,
2219 block_device_mapping: dict,
2220 existing_vim_volumes: list,
2221 created_items: dict,
2222 ):
2223 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2224 if volumes.get(disk["name"]):
2225 sv_id = volumes[disk["name"]]
2226 max_retries = 3
2227 vol_status = ""
2228 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2229 while max_retries:
2230 max_retries -= 1
2231 volume = self.cinder.volumes.get(sv_id)
2232 vol_status = volume.status
2233 if volume.status not in ("in-use", "available"):
2234 time.sleep(5)
2235 continue
2236 self.update_block_device_mapping(
2237 volume=volume,
2238 block_device_mapping=block_device_mapping,
2239 base_disk_index=base_disk_index,
2240 disk=disk,
2241 created_items=created_items,
2242 )
2243 return
2244 raise vimconn.VimConnException(
2245 "Shared volume is not prepared, status is: {}".format(vol_status),
2246 http_code=vimconn.HTTP_Internal_Server_Error,
2247 )
2248
2249 def _prepare_non_root_persistent_volumes(
2250 self,
2251 name: str,
2252 disk: dict,
2253 storage_av_zone: list,
2254 block_device_mapping: dict,
2255 base_disk_index: int,
2256 existing_vim_volumes: list,
2257 created_items: dict,
2258 ) -> None:
2259 """Prepare persistent volumes for new VM instance.
2260
2261 Args:
2262 name (str): Name of VM instance
2263 disk (dict): Disk details
2264 storage_av_zone (list): Storage of availability zones
2265 block_device_mapping (dict): Block device details
2266 base_disk_index (int): Disk index
2267 existing_vim_volumes (list): Existing disk details
2268 created_items (dict): All created items belongs to VM
2269 """
2270 # Non-root persistent volumes
2271 # Disk may include only vim_volume_id or only vim_id."
2272 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2273 if disk.get(key_id):
2274 # Use existing persistent volume
2275 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2276 existing_vim_volumes.append({"id": disk[key_id]})
2277 else:
2278 volume_name = f"{name}vd{chr(base_disk_index)}"
2279 volume = self.cinder.volumes.create(
2280 size=disk["size"],
2281 name=volume_name,
2282 # Make sure volume is in the same AZ as the VM to be attached to
2283 availability_zone=storage_av_zone,
2284 )
2285 self.update_block_device_mapping(
2286 volume=volume,
2287 block_device_mapping=block_device_mapping,
2288 base_disk_index=base_disk_index,
2289 disk=disk,
2290 created_items=created_items,
2291 )
2292
2293 def _wait_for_created_volumes_availability(
2294 self, elapsed_time: int, created_items: dict
2295 ) -> Optional[int]:
2296 """Wait till created volumes become available.
2297
2298 Args:
2299 elapsed_time (int): Passed time while waiting
2300 created_items (dict): All created items belongs to VM
2301
2302 Returns:
2303 elapsed_time (int): Time spent while waiting
2304
2305 """
2306 while elapsed_time < volume_timeout:
2307 for created_item in created_items:
2308 v, volume_id = (
2309 created_item.split(":")[0],
2310 created_item.split(":")[1],
2311 )
2312 if v == "volume":
2313 volume = self.cinder.volumes.get(volume_id)
2314 if (
2315 volume.volume_type == "multiattach"
2316 and volume.status == "in-use"
2317 ):
2318 return elapsed_time
2319 elif volume.status != "available":
2320 break
2321 else:
2322 # All ready: break from while
2323 break
2324
2325 time.sleep(5)
2326 elapsed_time += 5
2327
2328 return elapsed_time
2329
2330 def _wait_for_existing_volumes_availability(
2331 self, elapsed_time: int, existing_vim_volumes: list
2332 ) -> Optional[int]:
2333 """Wait till existing volumes become available.
2334
2335 Args:
2336 elapsed_time (int): Passed time while waiting
2337 existing_vim_volumes (list): Existing volume details
2338
2339 Returns:
2340 elapsed_time (int): Time spent while waiting
2341
2342 """
2343
2344 while elapsed_time < volume_timeout:
2345 for volume in existing_vim_volumes:
2346 v = self.cinder.volumes.get(volume["id"])
2347 if v.volume_type == "multiattach" and v.status == "in-use":
2348 return elapsed_time
2349 elif v.status != "available":
2350 break
2351 else: # all ready: break from while
2352 break
2353
2354 time.sleep(5)
2355 elapsed_time += 5
2356
2357 return elapsed_time
2358
2359 def _prepare_disk_for_vminstance(
2360 self,
2361 name: str,
2362 existing_vim_volumes: list,
2363 created_items: dict,
2364 storage_av_zone: list,
2365 block_device_mapping: dict,
2366 disk_list: list = None,
2367 ) -> None:
2368 """Prepare all volumes for new VM instance.
2369
2370 Args:
2371 name (str): Name of Instance
2372 existing_vim_volumes (list): List of existing volumes
2373 created_items (dict): All created items belongs to VM
2374 storage_av_zone (list): Storage availability zone
2375 block_device_mapping (dict): Block devices to be attached to VM
2376 disk_list (list): List of disks
2377
2378 """
2379 # Create additional volumes in case these are present in disk_list
2380 base_disk_index = ord("b")
2381 boot_volume_id = None
2382 elapsed_time = 0
2383 for disk in disk_list:
2384 if "image_id" in disk:
2385 # Root persistent volume
2386 base_disk_index = ord("a")
2387 boot_volume_id = self._prepare_persistent_root_volumes(
2388 name=name,
2389 storage_av_zone=storage_av_zone,
2390 disk=disk,
2391 base_disk_index=base_disk_index,
2392 block_device_mapping=block_device_mapping,
2393 existing_vim_volumes=existing_vim_volumes,
2394 created_items=created_items,
2395 )
2396 elif disk.get("multiattach"):
2397 self._prepare_shared_volumes(
2398 name=name,
2399 disk=disk,
2400 base_disk_index=base_disk_index,
2401 block_device_mapping=block_device_mapping,
2402 existing_vim_volumes=existing_vim_volumes,
2403 created_items=created_items,
2404 )
2405 else:
2406 # Non-root persistent volume
2407 self._prepare_non_root_persistent_volumes(
2408 name=name,
2409 disk=disk,
2410 storage_av_zone=storage_av_zone,
2411 block_device_mapping=block_device_mapping,
2412 base_disk_index=base_disk_index,
2413 existing_vim_volumes=existing_vim_volumes,
2414 created_items=created_items,
2415 )
2416 base_disk_index += 1
2417
2418 # Wait until created volumes are with status available
2419 elapsed_time = self._wait_for_created_volumes_availability(
2420 elapsed_time, created_items
2421 )
2422 # Wait until existing volumes in vim are with status available
2423 elapsed_time = self._wait_for_existing_volumes_availability(
2424 elapsed_time, existing_vim_volumes
2425 )
2426 # If we exceeded the timeout rollback
2427 if elapsed_time >= volume_timeout:
2428 raise vimconn.VimConnException(
2429 "Timeout creating volumes for instance " + name,
2430 http_code=vimconn.HTTP_Request_Timeout,
2431 )
2432 if boot_volume_id:
2433 self.cinder.volumes.set_bootable(boot_volume_id, True)
2434
2435 def _find_the_external_network_for_floating_ip(self):
2436 """Get the external network ip in order to create floating IP.
2437
2438 Returns:
2439 pool_id (str): External network pool ID
2440
2441 """
2442
2443 # Find the external network
2444 external_nets = list()
2445
2446 for net in self.neutron.list_networks()["networks"]:
2447 if net["router:external"]:
2448 external_nets.append(net)
2449
2450 if len(external_nets) == 0:
2451 raise vimconn.VimConnException(
2452 "Cannot create floating_ip automatically since "
2453 "no external network is present",
2454 http_code=vimconn.HTTP_Conflict,
2455 )
2456
2457 if len(external_nets) > 1:
2458 raise vimconn.VimConnException(
2459 "Cannot create floating_ip automatically since "
2460 "multiple external networks are present",
2461 http_code=vimconn.HTTP_Conflict,
2462 )
2463
2464 # Pool ID
2465 return external_nets[0].get("id")
2466
2467 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2468 """Trigger neutron to create a new floating IP using external network ID.
2469
2470 Args:
2471 param (dict): Input parameters to create a floating IP
2472 created_items (dict): All created items belongs to new VM instance
2473
2474 Raises:
2475
2476 VimConnException
2477 """
2478 try:
2479 self.logger.debug("Creating floating IP")
2480 new_floating_ip = self.neutron.create_floatingip(param)
2481 free_floating_ip = new_floating_ip["floatingip"]["id"]
2482 created_items["floating_ip:" + str(free_floating_ip)] = True
2483
2484 except Exception as e:
2485 raise vimconn.VimConnException(
2486 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2487 http_code=vimconn.HTTP_Conflict,
2488 )
2489
2490 def _create_floating_ip(
2491 self, floating_network: dict, server: object, created_items: dict
2492 ) -> None:
2493 """Get the available Pool ID and create a new floating IP.
2494
2495 Args:
2496 floating_network (dict): Dict including external network ID
2497 server (object): Server object
2498 created_items (dict): All created items belongs to new VM instance
2499
2500 """
2501
2502 # Pool_id is available
2503 if (
2504 isinstance(floating_network["floating_ip"], str)
2505 and floating_network["floating_ip"].lower() != "true"
2506 ):
2507 pool_id = floating_network["floating_ip"]
2508
2509 # Find the Pool_id
2510 else:
2511 pool_id = self._find_the_external_network_for_floating_ip()
2512
2513 param = {
2514 "floatingip": {
2515 "floating_network_id": pool_id,
2516 "tenant_id": server.tenant_id,
2517 }
2518 }
2519
2520 self._neutron_create_float_ip(param, created_items)
2521
2522 def _find_floating_ip(
2523 self,
2524 server: object,
2525 floating_ips: list,
2526 floating_network: dict,
2527 ) -> Optional[str]:
2528 """Find the available free floating IPs if there are.
2529
2530 Args:
2531 server (object): Server object
2532 floating_ips (list): List of floating IPs
2533 floating_network (dict): Details of floating network such as ID
2534
2535 Returns:
2536 free_floating_ip (str): Free floating ip address
2537
2538 """
2539 for fip in floating_ips:
2540 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2541 continue
2542
2543 if isinstance(floating_network["floating_ip"], str):
2544 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2545 continue
2546
2547 return fip["id"]
2548
2549 def _assign_floating_ip(
2550 self, free_floating_ip: str, floating_network: dict
2551 ) -> Dict:
2552 """Assign the free floating ip address to port.
2553
2554 Args:
2555 free_floating_ip (str): Floating IP to be assigned
2556 floating_network (dict): ID of floating network
2557
2558 Returns:
2559 fip (dict) (dict): Floating ip details
2560
2561 """
2562 # The vim_id key contains the neutron.port_id
2563 self.neutron.update_floatingip(
2564 free_floating_ip,
2565 {"floatingip": {"port_id": floating_network["vim_id"]}},
2566 )
2567 # For race condition ensure not re-assigned to other VM after 5 seconds
2568 time.sleep(5)
2569
2570 return self.neutron.show_floatingip(free_floating_ip)
2571
2572 def _get_free_floating_ip(
2573 self, server: object, floating_network: dict
2574 ) -> Optional[str]:
2575 """Get the free floating IP address.
2576
2577 Args:
2578 server (object): Server Object
2579 floating_network (dict): Floating network details
2580
2581 Returns:
2582 free_floating_ip (str): Free floating ip addr
2583
2584 """
2585
2586 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2587
2588 # Randomize
2589 random.shuffle(floating_ips)
2590
2591 return self._find_floating_ip(server, floating_ips, floating_network)
2592
2593 def _prepare_external_network_for_vminstance(
2594 self,
2595 external_network: list,
2596 server: object,
2597 created_items: dict,
2598 vm_start_time: float,
2599 ) -> None:
2600 """Assign floating IP address for VM instance.
2601
2602 Args:
2603 external_network (list): ID of External network
2604 server (object): Server Object
2605 created_items (dict): All created items belongs to new VM instance
2606 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2607
2608 Raises:
2609 VimConnException
2610
2611 """
2612 for floating_network in external_network:
2613 try:
2614 assigned = False
2615 floating_ip_retries = 3
2616 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2617 # several times
2618 while not assigned:
2619 free_floating_ip = self._get_free_floating_ip(
2620 server, floating_network
2621 )
2622
2623 if not free_floating_ip:
2624 self._create_floating_ip(
2625 floating_network, server, created_items
2626 )
2627
2628 try:
2629 # For race condition ensure not already assigned
2630 fip = self.neutron.show_floatingip(free_floating_ip)
2631
2632 if fip["floatingip"].get("port_id"):
2633 continue
2634
2635 # Assign floating ip
2636 fip = self._assign_floating_ip(
2637 free_floating_ip, floating_network
2638 )
2639
2640 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2641 self.logger.warning(
2642 "floating_ip {} re-assigned to other port".format(
2643 free_floating_ip
2644 )
2645 )
2646 continue
2647
2648 self.logger.debug(
2649 "Assigned floating_ip {} to VM {}".format(
2650 free_floating_ip, server.id
2651 )
2652 )
2653
2654 assigned = True
2655
2656 except Exception as e:
2657 # Openstack need some time after VM creation to assign an IP. So retry if fails
2658 vm_status = self.nova.servers.get(server.id).status
2659
2660 if vm_status not in ("ACTIVE", "ERROR"):
2661 if time.time() - vm_start_time < server_timeout:
2662 time.sleep(5)
2663 continue
2664 elif floating_ip_retries > 0:
2665 floating_ip_retries -= 1
2666 continue
2667
2668 raise vimconn.VimConnException(
2669 "Cannot create floating_ip: {} {}".format(
2670 type(e).__name__, e
2671 ),
2672 http_code=vimconn.HTTP_Conflict,
2673 )
2674
2675 except Exception as e:
2676 if not floating_network["exit_on_floating_ip_error"]:
2677 self.logger.error("Cannot create floating_ip. %s", str(e))
2678 continue
2679
2680 raise
2681
2682 def _update_port_security_for_vminstance(
2683 self,
2684 no_secured_ports: list,
2685 server: object,
2686 ) -> None:
2687 """Updates the port security according to no_secured_ports list.
2688
2689 Args:
2690 no_secured_ports (list): List of ports that security will be disabled
2691 server (object): Server Object
2692
2693 Raises:
2694 VimConnException
2695
2696 """
2697 # Wait until the VM is active and then disable the port-security
2698 if no_secured_ports:
2699 self.__wait_for_vm(server.id, "ACTIVE")
2700
2701 for port in no_secured_ports:
2702 port_update = {
2703 "port": {"port_security_enabled": False, "security_groups": None}
2704 }
2705
2706 if port[1] == "allow-address-pairs":
2707 port_update = {
2708 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2709 }
2710
2711 try:
2712 self.neutron.update_port(port[0], port_update)
2713
2714 except Exception:
2715 raise vimconn.VimConnException(
2716 "It was not possible to disable port security for port {}".format(
2717 port[0]
2718 )
2719 )
2720
2721 def new_vminstance(
2722 self,
2723 name: str,
2724 description: str,
2725 start: bool,
2726 image_id: str,
2727 flavor_id: str,
2728 affinity_group_list: list,
2729 net_list: list,
2730 cloud_config=None,
2731 disk_list=None,
2732 availability_zone_index=None,
2733 availability_zone_list=None,
2734 ) -> tuple:
2735 """Adds a VM instance to VIM.
2736
2737 Args:
2738 name (str): name of VM
2739 description (str): description
2740 start (bool): indicates if VM must start or boot in pause mode. Ignored
2741 image_id (str) image uuid
2742 flavor_id (str) flavor uuid
2743 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2744 net_list (list): list of interfaces, each one is a dictionary with:
2745 name: name of network
2746 net_id: network uuid to connect
2747 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2748 model: interface model, ignored #TODO
2749 mac_address: used for SR-IOV ifaces #TODO for other types
2750 use: 'data', 'bridge', 'mgmt'
2751 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2752 vim_id: filled/added by this function
2753 floating_ip: True/False (or it can be None)
2754 port_security: True/False
2755 cloud_config (dict): (optional) dictionary with:
2756 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2757 users: (optional) list of users to be inserted, each item is a dict with:
2758 name: (mandatory) user name,
2759 key-pairs: (optional) list of strings with the public key to be inserted to the user
2760 user-data: (optional) string is a text script to be passed directly to cloud-init
2761 config-files: (optional). List of files to be transferred. Each item is a dict with:
2762 dest: (mandatory) string with the destination absolute path
2763 encoding: (optional, by default text). Can be one of:
2764 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2765 content : (mandatory) string with the content of the file
2766 permissions: (optional) string with file permissions, typically octal notation '0644'
2767 owner: (optional) file owner, string with the format 'owner:group'
2768 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2769 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2770 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2771 size: (mandatory) string with the size of the disk in GB
2772 vim_id: (optional) should use this existing volume id
2773 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2774 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2775 availability_zone_index is None
2776 #TODO ip, security groups
2777
2778 Returns:
2779 A tuple with the instance identifier and created_items or raises an exception on error
2780 created_items can be None or a dictionary where this method can include key-values that will be passed to
2781 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2782 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2783 as not present.
2784
2785 """
2786 self.logger.debug(
2787 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2788 image_id,
2789 flavor_id,
2790 str(net_list),
2791 )
2792 server = None
2793 created_items = {}
2794 net_list_vim = []
2795 # list of external networks to be connected to instance, later on used to create floating_ip
2796 external_network = []
2797 # List of ports with port-security disabled
2798 no_secured_ports = []
2799 block_device_mapping = {}
2800 existing_vim_volumes = []
2801 server_group_id = None
2802 scheduller_hints = {}
2803
2804 try:
2805 # Check the Openstack Connection
2806 self._reload_connection()
2807
2808 # Prepare network list
2809 self._prepare_network_for_vminstance(
2810 name=name,
2811 net_list=net_list,
2812 created_items=created_items,
2813 net_list_vim=net_list_vim,
2814 external_network=external_network,
2815 no_secured_ports=no_secured_ports,
2816 )
2817
2818 # Cloud config
2819 config_drive, userdata = self._create_user_data(cloud_config)
2820
2821 # Get availability Zone
2822 vm_av_zone = self._get_vm_availability_zone(
2823 availability_zone_index, availability_zone_list
2824 )
2825
2826 storage_av_zone = (
2827 self.storage_availability_zone
2828 if self.storage_availability_zone
2829 else vm_av_zone
2830 )
2831
2832 if disk_list:
2833 # Prepare disks
2834 self._prepare_disk_for_vminstance(
2835 name=name,
2836 existing_vim_volumes=existing_vim_volumes,
2837 created_items=created_items,
2838 storage_av_zone=storage_av_zone,
2839 block_device_mapping=block_device_mapping,
2840 disk_list=disk_list,
2841 )
2842
2843 if affinity_group_list:
2844 # Only first id on the list will be used. Openstack restriction
2845 server_group_id = affinity_group_list[0]["affinity_group_id"]
2846 scheduller_hints["group"] = server_group_id
2847
2848 self.logger.debug(
2849 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2850 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2851 "block_device_mapping={}, server_group={})".format(
2852 name,
2853 image_id,
2854 flavor_id,
2855 net_list_vim,
2856 self.config.get("security_groups"),
2857 vm_av_zone,
2858 self.config.get("keypair"),
2859 userdata,
2860 config_drive,
2861 block_device_mapping,
2862 server_group_id,
2863 )
2864 )
2865 # Create VM
2866 server = self.nova.servers.create(
2867 name=name,
2868 image=image_id,
2869 flavor=flavor_id,
2870 nics=net_list_vim,
2871 security_groups=self.config.get("security_groups"),
2872 # TODO remove security_groups in future versions. Already at neutron port
2873 availability_zone=vm_av_zone,
2874 key_name=self.config.get("keypair"),
2875 userdata=userdata,
2876 config_drive=config_drive,
2877 block_device_mapping=block_device_mapping,
2878 scheduler_hints=scheduller_hints,
2879 )
2880
2881 vm_start_time = time.time()
2882
2883 self._update_port_security_for_vminstance(no_secured_ports, server)
2884
2885 self._prepare_external_network_for_vminstance(
2886 external_network=external_network,
2887 server=server,
2888 created_items=created_items,
2889 vm_start_time=vm_start_time,
2890 )
2891
2892 return server.id, created_items
2893
2894 except Exception as e:
2895 server_id = None
2896 if server:
2897 server_id = server.id
2898
2899 try:
2900 created_items = self.remove_keep_tag_from_persistent_volumes(
2901 created_items
2902 )
2903
2904 self.delete_vminstance(server_id, created_items)
2905
2906 except Exception as e2:
2907 self.logger.error("new_vminstance rollback fail {}".format(e2))
2908
2909 self._format_exception(e)
2910
2911 @staticmethod
2912 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2913 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2914
2915 Args:
2916 created_items (dict): All created items belongs to VM
2917
2918 Returns:
2919 updated_created_items (dict): Dict which does not include keep flag for volumes.
2920
2921 """
2922 return {
2923 key.replace(":keep", ""): value for (key, value) in created_items.items()
2924 }
2925
2926 def get_vminstance(self, vm_id):
2927 """Returns the VM instance information from VIM"""
2928 return self._find_nova_server(vm_id)
2929
2930 @catch_any_exception
2931 def get_vminstance_console(self, vm_id, console_type="vnc"):
2932 """
2933 Get a console for the virtual machine
2934 Params:
2935 vm_id: uuid of the VM
2936 console_type, can be:
2937 "novnc" (by default), "xvpvnc" for VNC types,
2938 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2939 Returns dict with the console parameters:
2940 protocol: ssh, ftp, http, https, ...
2941 server: usually ip address
2942 port: the http, ssh, ... port
2943 suffix: extra text, e.g. the http path and query string
2944 """
2945 self.logger.debug("Getting VM CONSOLE from VIM")
2946 self._reload_connection()
2947 server = self.nova.servers.find(id=vm_id)
2948
2949 if console_type is None or console_type == "novnc":
2950 console_dict = server.get_vnc_console("novnc")
2951 elif console_type == "xvpvnc":
2952 console_dict = server.get_vnc_console(console_type)
2953 elif console_type == "rdp-html5":
2954 console_dict = server.get_rdp_console(console_type)
2955 elif console_type == "spice-html5":
2956 console_dict = server.get_spice_console(console_type)
2957 else:
2958 raise vimconn.VimConnException(
2959 "console type '{}' not allowed".format(console_type),
2960 http_code=vimconn.HTTP_Bad_Request,
2961 )
2962
2963 console_dict1 = console_dict.get("console")
2964
2965 if console_dict1:
2966 console_url = console_dict1.get("url")
2967
2968 if console_url:
2969 # parse console_url
2970 protocol_index = console_url.find("//")
2971 suffix_index = (
2972 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2973 )
2974 port_index = (
2975 console_url[protocol_index + 2 : suffix_index].find(":")
2976 + protocol_index
2977 + 2
2978 )
2979
2980 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2981 return (
2982 -vimconn.HTTP_Internal_Server_Error,
2983 "Unexpected response from VIM",
2984 )
2985
2986 console_dict = {
2987 "protocol": console_url[0:protocol_index],
2988 "server": console_url[protocol_index + 2 : port_index],
2989 "port": console_url[port_index:suffix_index],
2990 "suffix": console_url[suffix_index + 1 :],
2991 }
2992 protocol_index += 2
2993
2994 return console_dict
2995 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2996
2997 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2998 """Neutron delete ports by id.
2999 Args:
3000 k_id (str): Port id in the VIM
3001 """
3002 try:
3003 self.neutron.delete_port(k_id)
3004
3005 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3006 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
3007 # If there is connection error, raise.
3008 self._format_exception(e)
3009 except Exception as e:
3010 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
3011
3012 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
3013 """Cinder delete volume by id.
3014 Args:
3015 shared_volume_vim_id (str): ID of shared volume in VIM
3016 """
3017 elapsed_time = 0
3018 try:
3019 while elapsed_time < server_timeout:
3020 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3021 if vol_status == "available":
3022 self.cinder.volumes.delete(shared_volume_vim_id)
3023 return True
3024
3025 time.sleep(5)
3026 elapsed_time += 5
3027
3028 if elapsed_time >= server_timeout:
3029 raise vimconn.VimConnException(
3030 "Timeout waiting for volume "
3031 + shared_volume_vim_id
3032 + " to be available",
3033 http_code=vimconn.HTTP_Request_Timeout,
3034 )
3035
3036 except Exception as e:
3037 self.logger.error(
3038 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3039 )
3040 self._format_exception(e)
3041
3042 def _delete_volumes_by_id_wth_cinder(
3043 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3044 ) -> bool:
3045 """Cinder delete volume by id.
3046 Args:
3047 k (str): Full item name in created_items
3048 k_id (str): ID of floating ip in VIM
3049 volumes_to_hold (list): Volumes not to delete
3050 created_items (dict): All created items belongs to VM
3051 """
3052 try:
3053 if k_id in volumes_to_hold:
3054 return False
3055
3056 if self.cinder.volumes.get(k_id).status != "available":
3057 return True
3058
3059 else:
3060 self.cinder.volumes.delete(k_id)
3061 created_items[k] = None
3062
3063 except (cExceptions.ConnectionError, ConnectionError) as e:
3064 self.logger.error(
3065 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3066 )
3067 self._format_exception(e)
3068 except Exception as e:
3069 self.logger.error(
3070 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3071 )
3072
3073 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3074 """Neutron delete floating ip by id.
3075 Args:
3076 k (str): Full item name in created_items
3077 k_id (str): ID of floating ip in VIM
3078 created_items (dict): All created items belongs to VM
3079 """
3080 try:
3081 self.neutron.delete_floatingip(k_id)
3082 created_items[k] = None
3083
3084 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3085 self.logger.error(
3086 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3087 )
3088 self._format_exception(e)
3089 except Exception as e:
3090 self.logger.error(
3091 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3092 )
3093
3094 @staticmethod
3095 def _get_item_name_id(k: str) -> Tuple[str, str]:
3096 k_item, _, k_id = k.partition(":")
3097 return k_item, k_id
3098
3099 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3100 """Delete VM ports attached to the networks before deleting virtual machine.
3101 Args:
3102 created_items (dict): All created items belongs to VM
3103 """
3104
3105 for k, v in created_items.items():
3106 if not v: # skip already deleted
3107 continue
3108
3109 try:
3110 k_item, k_id = self._get_item_name_id(k)
3111 if k_item == "port":
3112 self._delete_ports_by_id_wth_neutron(k_id)
3113
3114 except (neExceptions.ConnectionFailed, ConnectionError) as e:
3115 self.logger.error(
3116 "Error deleting port: {}: {}".format(type(e).__name__, e)
3117 )
3118 self._format_exception(e)
3119 except Exception as e:
3120 self.logger.error(
3121 "Error deleting port: {}: {}".format(type(e).__name__, e)
3122 )
3123
3124 def _delete_created_items(
3125 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3126 ) -> bool:
3127 """Delete Volumes and floating ip if they exist in created_items."""
3128 for k, v in created_items.items():
3129 if not v: # skip already deleted
3130 continue
3131
3132 try:
3133 k_item, k_id = self._get_item_name_id(k)
3134 if k_item == "volume":
3135 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3136 k, k_id, volumes_to_hold, created_items
3137 )
3138
3139 if unavailable_vol:
3140 keep_waiting = True
3141
3142 elif k_item == "floating_ip":
3143 self._delete_floating_ip_by_id(k, k_id, created_items)
3144
3145 except (
3146 cExceptions.ConnectionError,
3147 neExceptions.ConnectionFailed,
3148 ConnectionError,
3149 AttributeError,
3150 TypeError,
3151 ) as e:
3152 self.logger.error("Error deleting {}: {}".format(k, e))
3153 self._format_exception(e)
3154
3155 except Exception as e:
3156 self.logger.error("Error deleting {}: {}".format(k, e))
3157
3158 return keep_waiting
3159
3160 @staticmethod
3161 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3162 """Remove the volumes which has key flag from created_items
3163
3164 Args:
3165 created_items (dict): All created items belongs to VM
3166
3167 Returns:
3168 created_items (dict): Persistent volumes eliminated created_items
3169 """
3170 return {
3171 key: value
3172 for (key, value) in created_items.items()
3173 if len(key.split(":")) == 2
3174 }
3175
3176 @catch_any_exception
3177 def delete_vminstance(
3178 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3179 ) -> None:
3180 """Removes a VM instance from VIM. Returns the old identifier.
3181 Args:
3182 vm_id (str): Identifier of VM instance
3183 created_items (dict): All created items belongs to VM
3184 volumes_to_hold (list): Volumes_to_hold
3185 """
3186 if created_items is None:
3187 created_items = {}
3188 if volumes_to_hold is None:
3189 volumes_to_hold = []
3190
3191 try:
3192 created_items = self._extract_items_wth_keep_flag_from_created_items(
3193 created_items
3194 )
3195
3196 self._reload_connection()
3197
3198 # Delete VM ports attached to the networks before the virtual machine
3199 if created_items:
3200 self._delete_vm_ports_attached_to_network(created_items)
3201
3202 if vm_id:
3203 self.nova.servers.delete(vm_id)
3204
3205 # Although having detached, volumes should have in active status before deleting.
3206 # We ensure in this loop
3207 keep_waiting = True
3208 elapsed_time = 0
3209
3210 while keep_waiting and elapsed_time < volume_timeout:
3211 keep_waiting = False
3212
3213 # Delete volumes and floating IP.
3214 keep_waiting = self._delete_created_items(
3215 created_items, volumes_to_hold, keep_waiting
3216 )
3217
3218 if keep_waiting:
3219 time.sleep(1)
3220 elapsed_time += 1
3221 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
3222 # If VM does not exist, it does not raise
3223 self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
3224
3225 def refresh_vms_status(self, vm_list):
3226 """Get the status of the virtual machines and their interfaces/ports
3227 Params: the list of VM identifiers
3228 Returns a dictionary with:
3229 vm_id: #VIM id of this Virtual Machine
3230 status: #Mandatory. Text with one of:
3231 # DELETED (not found at vim)
3232 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3233 # OTHER (Vim reported other status not understood)
3234 # ERROR (VIM indicates an ERROR status)
3235 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3236 # CREATING (on building process), ERROR
3237 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3238 #
3239 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3240 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3241 interfaces:
3242 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3243 mac_address: #Text format XX:XX:XX:XX:XX:XX
3244 vim_net_id: #network id where this interface is connected
3245 vim_interface_id: #interface/port VIM id
3246 ip_address: #null, or text with IPv4, IPv6 address
3247 compute_node: #identification of compute node where PF,VF interface is allocated
3248 pci: #PCI address of the NIC that hosts the PF,VF
3249 vlan: #physical VLAN used for VF
3250 """
3251 vm_dict = {}
3252 self.logger.debug(
3253 "refresh_vms status: Getting tenant VM instance information from VIM"
3254 )
3255 for vm_id in vm_list:
3256 vm = {}
3257
3258 try:
3259 vm_vim = self.get_vminstance(vm_id)
3260
3261 if vm_vim["status"] in vmStatus2manoFormat:
3262 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3263 else:
3264 vm["status"] = "OTHER"
3265 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3266
3267 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3268 vm_vim.pop("user_data", None)
3269 vm["vim_info"] = self.serialize(vm_vim)
3270
3271 vm["interfaces"] = []
3272 if vm_vim.get("fault"):
3273 vm["error_msg"] = str(vm_vim["fault"])
3274
3275 # get interfaces
3276 try:
3277 self._reload_connection()
3278 port_dict = self.neutron.list_ports(device_id=vm_id)
3279
3280 for port in port_dict["ports"]:
3281 interface = {}
3282 interface["vim_info"] = self.serialize(port)
3283 interface["mac_address"] = port.get("mac_address")
3284 interface["vim_net_id"] = port["network_id"]
3285 interface["vim_interface_id"] = port["id"]
3286 # check if OS-EXT-SRV-ATTR:host is there,
3287 # in case of non-admin credentials, it will be missing
3288
3289 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3290 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3291
3292 interface["pci"] = None
3293
3294 # check if binding:profile is there,
3295 # in case of non-admin credentials, it will be missing
3296 if port.get("binding:profile"):
3297 if port["binding:profile"].get("pci_slot"):
3298 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3299 # the slot to 0x00
3300 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3301 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3302 pci = port["binding:profile"]["pci_slot"]
3303 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3304 interface["pci"] = pci
3305
3306 interface["vlan"] = None
3307
3308 if port.get("binding:vif_details"):
3309 interface["vlan"] = port["binding:vif_details"].get("vlan")
3310
3311 # Get vlan from network in case not present in port for those old openstacks and cases where
3312 # it is needed vlan at PT
3313 if not interface["vlan"]:
3314 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3315 network = self.neutron.show_network(port["network_id"])
3316
3317 if (
3318 network["network"].get("provider:network_type")
3319 == "vlan"
3320 ):
3321 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3322 interface["vlan"] = network["network"].get(
3323 "provider:segmentation_id"
3324 )
3325
3326 ips = []
3327 # look for floating ip address
3328 try:
3329 floating_ip_dict = self.neutron.list_floatingips(
3330 port_id=port["id"]
3331 )
3332
3333 if floating_ip_dict.get("floatingips"):
3334 ips.append(
3335 floating_ip_dict["floatingips"][0].get(
3336 "floating_ip_address"
3337 )
3338 )
3339 except Exception:
3340 pass
3341
3342 for subnet in port["fixed_ips"]:
3343 ips.append(subnet["ip_address"])
3344
3345 interface["ip_address"] = ";".join(ips)
3346 vm["interfaces"].append(interface)
3347 except Exception as e:
3348 self.logger.error(
3349 "Error getting vm interface information {}: {}".format(
3350 type(e).__name__, e
3351 ),
3352 exc_info=True,
3353 )
3354 except vimconn.VimConnNotFoundException as e:
3355 self.logger.error("Exception getting vm status: %s", str(e))
3356 vm["status"] = "DELETED"
3357 vm["error_msg"] = str(e)
3358 except vimconn.VimConnException as e:
3359 self.logger.error("Exception getting vm status: %s", str(e))
3360 vm["status"] = "VIM_ERROR"
3361 vm["error_msg"] = str(e)
3362
3363 vm_dict[vm_id] = vm
3364
3365 return vm_dict
3366
3367 @catch_any_exception
3368 def action_vminstance(self, vm_id, action_dict, created_items={}):
3369 """Send and action over a VM instance from VIM
3370 Returns None or the console dict if the action was successfully sent to the VIM
3371 """
3372 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3373 self._reload_connection()
3374 server = self.nova.servers.find(id=vm_id)
3375 if "start" in action_dict:
3376 if action_dict["start"] == "rebuild":
3377 server.rebuild()
3378 else:
3379 if server.status == "PAUSED":
3380 server.unpause()
3381 elif server.status == "SUSPENDED":
3382 server.resume()
3383 elif server.status == "SHUTOFF":
3384 server.start()
3385 else:
3386 self.logger.debug(
3387 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3388 )
3389 raise vimconn.VimConnException(
3390 "Cannot 'start' instance while it is in active state",
3391 http_code=vimconn.HTTP_Bad_Request,
3392 )
3393 elif "pause" in action_dict:
3394 server.pause()
3395 elif "resume" in action_dict:
3396 server.resume()
3397 elif "shutoff" in action_dict or "shutdown" in action_dict:
3398 self.logger.debug("server status %s", server.status)
3399 if server.status == "ACTIVE":
3400 server.stop()
3401 else:
3402 self.logger.debug("ERROR: VM is not in Active state")
3403 raise vimconn.VimConnException(
3404 "VM is not in active state, stop operation is not allowed",
3405 http_code=vimconn.HTTP_Bad_Request,
3406 )
3407 elif "forceOff" in action_dict:
3408 server.stop() # TODO
3409 elif "terminate" in action_dict:
3410 server.delete()
3411 elif "createImage" in action_dict:
3412 server.create_image()
3413 # "path":path_schema,
3414 # "description":description_schema,
3415 # "name":name_schema,
3416 # "metadata":metadata_schema,
3417 # "imageRef": id_schema,
3418 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3419 elif "rebuild" in action_dict:
3420 server.rebuild(server.image["id"])
3421 elif "reboot" in action_dict:
3422 server.reboot() # reboot_type="SOFT"
3423 elif "console" in action_dict:
3424 console_type = action_dict["console"]
3425
3426 if console_type is None or console_type == "novnc":
3427 console_dict = server.get_vnc_console("novnc")
3428 elif console_type == "xvpvnc":
3429 console_dict = server.get_vnc_console(console_type)
3430 elif console_type == "rdp-html5":
3431 console_dict = server.get_rdp_console(console_type)
3432 elif console_type == "spice-html5":
3433 console_dict = server.get_spice_console(console_type)
3434 else:
3435 raise vimconn.VimConnException(
3436 "console type '{}' not allowed".format(console_type),
3437 http_code=vimconn.HTTP_Bad_Request,
3438 )
3439
3440 try:
3441 console_url = console_dict["console"]["url"]
3442 # parse console_url
3443 protocol_index = console_url.find("//")
3444 suffix_index = (
3445 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3446 )
3447 port_index = (
3448 console_url[protocol_index + 2 : suffix_index].find(":")
3449 + protocol_index
3450 + 2
3451 )
3452
3453 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3454 raise vimconn.VimConnException(
3455 "Unexpected response from VIM " + str(console_dict)
3456 )
3457
3458 console_dict2 = {
3459 "protocol": console_url[0:protocol_index],
3460 "server": console_url[protocol_index + 2 : port_index],
3461 "port": int(console_url[port_index + 1 : suffix_index]),
3462 "suffix": console_url[suffix_index + 1 :],
3463 }
3464
3465 return console_dict2
3466 except Exception:
3467 raise vimconn.VimConnException(
3468 "Unexpected response from VIM " + str(console_dict)
3469 )
3470
3471 return None
3472
3473 # ###### VIO Specific Changes #########
3474 def _generate_vlanID(self):
3475 """
3476 Method to get unused vlanID
3477 Args:
3478 None
3479 Returns:
3480 vlanID
3481 """
3482 # Get used VLAN IDs
3483 usedVlanIDs = []
3484 networks = self.get_network_list()
3485
3486 for net in networks:
3487 if net.get("provider:segmentation_id"):
3488 usedVlanIDs.append(net.get("provider:segmentation_id"))
3489
3490 used_vlanIDs = set(usedVlanIDs)
3491
3492 # find unused VLAN ID
3493 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3494 try:
3495 start_vlanid, end_vlanid = map(
3496 int, vlanID_range.replace(" ", "").split("-")
3497 )
3498
3499 for vlanID in range(start_vlanid, end_vlanid + 1):
3500 if vlanID not in used_vlanIDs:
3501 return vlanID
3502 except Exception as exp:
3503 raise vimconn.VimConnException(
3504 "Exception {} occurred while generating VLAN ID.".format(exp)
3505 )
3506 else:
3507 raise vimconn.VimConnConflictException(
3508 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3509 self.config.get("dataplane_net_vlan_range")
3510 )
3511 )
3512
3513 def _generate_multisegment_vlanID(self):
3514 """
3515 Method to get unused vlanID
3516 Args:
3517 None
3518 Returns:
3519 vlanID
3520 """
3521 # Get used VLAN IDs
3522 usedVlanIDs = []
3523 networks = self.get_network_list()
3524 for net in networks:
3525 if net.get("provider:network_type") == "vlan" and net.get(
3526 "provider:segmentation_id"
3527 ):
3528 usedVlanIDs.append(net.get("provider:segmentation_id"))
3529 elif net.get("segments"):
3530 for segment in net.get("segments"):
3531 if segment.get("provider:network_type") == "vlan" and segment.get(
3532 "provider:segmentation_id"
3533 ):
3534 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3535
3536 used_vlanIDs = set(usedVlanIDs)
3537
3538 # find unused VLAN ID
3539 for vlanID_range in self.config.get("multisegment_vlan_range"):
3540 try:
3541 start_vlanid, end_vlanid = map(
3542 int, vlanID_range.replace(" ", "").split("-")
3543 )
3544
3545 for vlanID in range(start_vlanid, end_vlanid + 1):
3546 if vlanID not in used_vlanIDs:
3547 return vlanID
3548 except Exception as exp:
3549 raise vimconn.VimConnException(
3550 "Exception {} occurred while generating VLAN ID.".format(exp)
3551 )
3552 else:
3553 raise vimconn.VimConnConflictException(
3554 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3555 self.config.get("multisegment_vlan_range")
3556 )
3557 )
3558
3559 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3560 """
3561 Method to validate user given vlanID ranges
3562 Args: None
3563 Returns: None
3564 """
3565 for vlanID_range in input_vlan_range:
3566 vlan_range = vlanID_range.replace(" ", "")
3567 # validate format
3568 vlanID_pattern = r"(\d)*-(\d)*$"
3569 match_obj = re.match(vlanID_pattern, vlan_range)
3570 if not match_obj:
3571 raise vimconn.VimConnConflictException(
3572 "Invalid VLAN range for {}: {}.You must provide "
3573 "'{}' in format [start_ID - end_ID].".format(
3574 text_vlan_range, vlanID_range, text_vlan_range
3575 )
3576 )
3577
3578 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3579 if start_vlanid <= 0:
3580 raise vimconn.VimConnConflictException(
3581 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3582 "networks valid IDs are 1 to 4094 ".format(
3583 text_vlan_range, vlanID_range
3584 )
3585 )
3586
3587 if end_vlanid > 4094:
3588 raise vimconn.VimConnConflictException(
3589 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3590 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3591 text_vlan_range, vlanID_range
3592 )
3593 )
3594
3595 if start_vlanid > end_vlanid:
3596 raise vimconn.VimConnConflictException(
3597 "Invalid VLAN range for {}: {}. You must provide '{}'"
3598 " in format start_ID - end_ID and start_ID < end_ID ".format(
3599 text_vlan_range, vlanID_range, text_vlan_range
3600 )
3601 )
3602
3603 def get_hosts_info(self):
3604 """Get the information of deployed hosts
3605 Returns the hosts content"""
3606 if self.debug:
3607 print("osconnector: Getting Host info from VIM")
3608
3609 try:
3610 h_list = []
3611 self._reload_connection()
3612 hypervisors = self.nova.hypervisors.list()
3613
3614 for hype in hypervisors:
3615 h_list.append(hype.to_dict())
3616
3617 return 1, {"hosts": h_list}
3618 except nvExceptions.NotFound as e:
3619 error_value = -vimconn.HTTP_Not_Found
3620 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3621 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3622 error_value = -vimconn.HTTP_Bad_Request
3623 error_text = (
3624 type(e).__name__
3625 + ": "
3626 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3627 )
3628
3629 # TODO insert exception vimconn.HTTP_Unauthorized
3630 # if reaching here is because an exception
3631 self.logger.debug("get_hosts_info " + error_text)
3632
3633 return error_value, error_text
3634
3635 def get_hosts(self, vim_tenant):
3636 """Get the hosts and deployed instances
3637 Returns the hosts content"""
3638 r, hype_dict = self.get_hosts_info()
3639
3640 if r < 0:
3641 return r, hype_dict
3642
3643 hypervisors = hype_dict["hosts"]
3644
3645 try:
3646 servers = self.nova.servers.list()
3647 for hype in hypervisors:
3648 for server in servers:
3649 if (
3650 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3651 == hype["hypervisor_hostname"]
3652 ):
3653 if "vm" in hype:
3654 hype["vm"].append(server.id)
3655 else:
3656 hype["vm"] = [server.id]
3657
3658 return 1, hype_dict
3659 except nvExceptions.NotFound as e:
3660 error_value = -vimconn.HTTP_Not_Found
3661 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3662 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3663 error_value = -vimconn.HTTP_Bad_Request
3664 error_text = (
3665 type(e).__name__
3666 + ": "
3667 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3668 )
3669
3670 # TODO insert exception vimconn.HTTP_Unauthorized
3671 # if reaching here is because an exception
3672 self.logger.debug("get_hosts " + error_text)
3673
3674 return error_value, error_text
3675
3676 @catch_any_exception
3677 def new_affinity_group(self, affinity_group_data):
3678 """Adds a server group to VIM
3679 affinity_group_data contains a dictionary with information, keys:
3680 name: name in VIM for the server group
3681 type: affinity or anti-affinity
3682 scope: Only nfvi-node allowed
3683 Returns the server group identifier"""
3684 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3685 name = affinity_group_data["name"]
3686 policy = affinity_group_data["type"]
3687 self._reload_connection()
3688 new_server_group = self.nova.server_groups.create(name, policy)
3689 return new_server_group.id
3690
3691 @catch_any_exception
3692 def get_affinity_group(self, affinity_group_id):
3693 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3694 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3695 self._reload_connection()
3696 server_group = self.nova.server_groups.find(id=affinity_group_id)
3697 return server_group.to_dict()
3698
3699 @catch_any_exception
3700 def delete_affinity_group(self, affinity_group_id):
3701 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3702 self.logger.debug("Getting server group '%s'", affinity_group_id)
3703 self._reload_connection()
3704 self.nova.server_groups.delete(affinity_group_id)
3705 return affinity_group_id
3706
3707 @catch_any_exception
3708 def get_vdu_state(self, vm_id, host_is_required=False) -> list:
3709 """Getting the state of a VDU.
3710 Args:
3711 vm_id (str): ID of an instance
3712 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
3713 and if this is set to True, it raises KeyError.
3714 Returns:
3715 vdu_data (list): VDU details including state, flavor, host_info, AZ
3716 """
3717 self.logger.debug("Getting the status of VM")
3718 self.logger.debug("VIM VM ID %s", vm_id)
3719 self._reload_connection()
3720 server_dict = self._find_nova_server(vm_id)
3721 srv_attr = "OS-EXT-SRV-ATTR:host"
3722 host_info = (
3723 server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
3724 )
3725 vdu_data = [
3726 server_dict["status"],
3727 server_dict["flavor"]["id"],
3728 host_info,
3729 server_dict["OS-EXT-AZ:availability_zone"],
3730 ]
3731 self.logger.debug("vdu_data %s", vdu_data)
3732 return vdu_data
3733
3734 def check_compute_availability(self, host, server_flavor_details):
3735 self._reload_connection()
3736 hypervisor_search = self.nova.hypervisors.search(
3737 hypervisor_match=host, servers=True
3738 )
3739 for hypervisor in hypervisor_search:
3740 hypervisor_id = hypervisor.to_dict()["id"]
3741 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3742 hypervisor_dict = hypervisor_details.to_dict()
3743 hypervisor_temp = json.dumps(hypervisor_dict)
3744 hypervisor_json = json.loads(hypervisor_temp)
3745 resources_available = [
3746 hypervisor_json["free_ram_mb"],
3747 hypervisor_json["disk_available_least"],
3748 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3749 ]
3750 compute_available = all(
3751 x > y for x, y in zip(resources_available, server_flavor_details)
3752 )
3753 if compute_available:
3754 return host
3755
3756 def check_availability_zone(
3757 self, old_az, server_flavor_details, old_host, host=None
3758 ):
3759 self._reload_connection()
3760 az_check = {"zone_check": False, "compute_availability": None}
3761 aggregates_list = self.nova.aggregates.list()
3762 for aggregate in aggregates_list:
3763 aggregate_details = aggregate.to_dict()
3764 aggregate_temp = json.dumps(aggregate_details)
3765 aggregate_json = json.loads(aggregate_temp)
3766 if aggregate_json["availability_zone"] == old_az:
3767 hosts_list = aggregate_json["hosts"]
3768 if host is not None:
3769 if host in hosts_list:
3770 az_check["zone_check"] = True
3771 available_compute_id = self.check_compute_availability(
3772 host, server_flavor_details
3773 )
3774 if available_compute_id is not None:
3775 az_check["compute_availability"] = available_compute_id
3776 else:
3777 for check_host in hosts_list:
3778 if check_host != old_host:
3779 available_compute_id = self.check_compute_availability(
3780 check_host, server_flavor_details
3781 )
3782 if available_compute_id is not None:
3783 az_check["zone_check"] = True
3784 az_check["compute_availability"] = available_compute_id
3785 break
3786 else:
3787 az_check["zone_check"] = True
3788 return az_check
3789
3790 @catch_any_exception
3791 def migrate_instance(self, vm_id, compute_host=None):
3792 """
3793 Migrate a vdu
3794 param:
3795 vm_id: ID of an instance
3796 compute_host: Host to migrate the vdu to
3797 """
3798 self._reload_connection()
3799 vm_state = False
3800 instance_state = self.get_vdu_state(vm_id, host_is_required=True)
3801 server_flavor_id = instance_state[1]
3802 server_hypervisor_name = instance_state[2]
3803 server_availability_zone = instance_state[3]
3804 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3805 server_flavor_details = [
3806 server_flavor["ram"],
3807 server_flavor["disk"],
3808 server_flavor["vcpus"],
3809 ]
3810 if compute_host == server_hypervisor_name:
3811 raise vimconn.VimConnException(
3812 "Unable to migrate instance '{}' to the same host '{}'".format(
3813 vm_id, compute_host
3814 ),
3815 http_code=vimconn.HTTP_Bad_Request,
3816 )
3817 az_status = self.check_availability_zone(
3818 server_availability_zone,
3819 server_flavor_details,
3820 server_hypervisor_name,
3821 compute_host,
3822 )
3823 availability_zone_check = az_status["zone_check"]
3824 available_compute_id = az_status.get("compute_availability")
3825
3826 if availability_zone_check is False:
3827 raise vimconn.VimConnException(
3828 "Unable to migrate instance '{}' to a different availability zone".format(
3829 vm_id
3830 ),
3831 http_code=vimconn.HTTP_Bad_Request,
3832 )
3833 if available_compute_id is not None:
3834 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
3835 self.nova.servers.live_migrate(
3836 server=vm_id,
3837 host=available_compute_id,
3838 block_migration=True,
3839 )
3840 state = "MIGRATING"
3841 changed_compute_host = ""
3842 if state == "MIGRATING":
3843 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3844 changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
3845 2
3846 ]
3847 if vm_state and changed_compute_host == available_compute_id:
3848 self.logger.debug(
3849 "Instance '{}' migrated to the new compute host '{}'".format(
3850 vm_id, changed_compute_host
3851 )
3852 )
3853 return state, available_compute_id
3854 else:
3855 raise vimconn.VimConnException(
3856 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3857 vm_id, available_compute_id
3858 ),
3859 http_code=vimconn.HTTP_Bad_Request,
3860 )
3861 else:
3862 raise vimconn.VimConnException(
3863 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3864 available_compute_id
3865 ),
3866 http_code=vimconn.HTTP_Bad_Request,
3867 )
3868
3869 @catch_any_exception
3870 def resize_instance(self, vm_id, new_flavor_id):
3871 """
3872 For resizing the vm based on the given
3873 flavor details
3874 param:
3875 vm_id : ID of an instance
3876 new_flavor_id : Flavor id to be resized
3877 Return the status of a resized instance
3878 """
3879 self._reload_connection()
3880 self.logger.debug("resize the flavor of an instance")
3881 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3882 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3883 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3884 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3885 if old_flavor_disk > new_flavor_disk:
3886 raise nvExceptions.BadRequest(
3887 400,
3888 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3889 )
3890 else:
3891 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3892 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3893 if vm_state:
3894 instance_resized_status = self.confirm_resize(vm_id)
3895 return instance_resized_status
3896 else:
3897 raise nvExceptions.BadRequest(
3898 409,
3899 message="Cannot 'resize' vm_state is in ERROR",
3900 )
3901
3902 else:
3903 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3904 raise nvExceptions.BadRequest(
3905 409,
3906 message="Cannot 'resize' instance while it is in vm_state resized",
3907 )
3908
3909 def confirm_resize(self, vm_id):
3910 """
3911 Confirm the resize of an instance
3912 param:
3913 vm_id: ID of an instance
3914 """
3915 self._reload_connection()
3916 self.nova.servers.confirm_resize(server=vm_id)
3917 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3918 self.__wait_for_vm(vm_id, "ACTIVE")
3919 instance_status = self.get_vdu_state(vm_id)[0]
3920 return instance_status
3921
3922 def get_monitoring_data(self):
3923 try:
3924 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3925 self._reload_connection()
3926 all_servers = self.nova.servers.list(detailed=True)
3927 try:
3928 for server in all_servers:
3929 if server.flavor.get("original_name"):
3930 server.flavor["id"] = self.nova.flavors.find(
3931 name=server.flavor["original_name"]
3932 ).id
3933 except nClient.exceptions.NotFound as e:
3934 self.logger.warning(str(e.message))
3935 all_ports = self.neutron.list_ports()
3936 return all_servers, all_ports
3937 except Exception as e:
3938 raise vimconn.VimConnException(
3939 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3940 )