9d474371b3bc94af33660a173009effc02159c23
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.60"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 if message_error == "OS-EXT-SRV-ATTR:host":
571 tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
572 raise vimconn.VimConnInsufficientCredentials(
573 type(exception).__name__ + ": " + message_error + tip
574 )
575 raise vimconn.VimConnException(
576 type(exception).__name__ + ": " + message_error
577 )
578
579 elif isinstance(
580 exception,
581 (
582 nvExceptions.ClientException,
583 ksExceptions.ClientException,
584 neExceptions.NeutronException,
585 ),
586 ):
587 raise vimconn.VimConnUnexpectedResponse(
588 type(exception).__name__ + ": " + message_error
589 )
590 elif isinstance(exception, nvExceptions.Conflict):
591 raise vimconn.VimConnConflictException(
592 type(exception).__name__ + ": " + message_error
593 )
594 elif isinstance(exception, vimconn.VimConnException):
595 raise exception
596 else: # ()
597 self.logger.error("General Exception " + message_error, exc_info=True)
598
599 raise vimconn.VimConnConnectionException(
600 type(exception).__name__ + ": " + message_error
601 )
602
603 def _get_ids_from_name(self):
604 """
605 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
606 :return: None
607 """
608 # get tenant_id if only tenant_name is supplied
609 self._reload_connection()
610
611 if not self.my_tenant_id:
612 raise vimconn.VimConnConnectionException(
613 "Error getting tenant information from name={} id={}".format(
614 self.tenant_name, self.tenant_id
615 )
616 )
617
618 if self.config.get("security_groups") and not self.security_groups_id:
619 # convert from name to id
620 neutron_sg_list = self.neutron.list_security_groups(
621 tenant_id=self.my_tenant_id
622 )["security_groups"]
623
624 self.security_groups_id = []
625 for sg in self.config.get("security_groups"):
626 for neutron_sg in neutron_sg_list:
627 if sg in (neutron_sg["id"], neutron_sg["name"]):
628 self.security_groups_id.append(neutron_sg["id"])
629 break
630 else:
631 self.security_groups_id = None
632
633 raise vimconn.VimConnConnectionException(
634 "Not found security group {} for this tenant".format(sg)
635 )
636
637 def _find_nova_server(self, vm_id):
638 """
639 Returns the VM instance from Openstack and completes it with flavor ID
640 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
641 """
642 try:
643 self._reload_connection()
644 server = self.nova.servers.find(id=vm_id)
645 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
646 server_dict = server.to_dict()
647 try:
648 if server_dict["flavor"].get("original_name"):
649 server_dict["flavor"]["id"] = self.nova.flavors.find(
650 name=server_dict["flavor"]["original_name"]
651 ).id
652 except nClient.exceptions.NotFound as e:
653 self.logger.warning(str(e.message))
654 return server_dict
655 except (
656 ksExceptions.ClientException,
657 nvExceptions.ClientException,
658 nvExceptions.NotFound,
659 ConnectionError,
660 ) as e:
661 self._format_exception(e)
662
663 def check_vim_connectivity(self):
664 # just get network list to check connectivity and credentials
665 self.get_network_list(filter_dict={})
666
667 def get_tenant_list(self, filter_dict={}):
668 """Obtain tenants of VIM
669 filter_dict can contain the following keys:
670 name: filter by tenant name
671 id: filter by tenant uuid/id
672 <other VIM specific>
673 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
674 """
675 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
676
677 try:
678 self._reload_connection()
679
680 if self.api_version3:
681 project_class_list = self.keystone.projects.list(
682 name=filter_dict.get("name")
683 )
684 else:
685 project_class_list = self.keystone.tenants.findall(**filter_dict)
686
687 project_list = []
688
689 for project in project_class_list:
690 if filter_dict.get("id") and filter_dict["id"] != project.id:
691 continue
692
693 project_list.append(project.to_dict())
694
695 return project_list
696 except (
697 ksExceptions.ConnectionError,
698 ksExceptions.ClientException,
699 ConnectionError,
700 ) as e:
701 self._format_exception(e)
702
703 def new_tenant(self, tenant_name, tenant_description):
704 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
705 self.logger.debug("Adding a new tenant name: %s", tenant_name)
706
707 try:
708 self._reload_connection()
709
710 if self.api_version3:
711 project = self.keystone.projects.create(
712 tenant_name,
713 self.config.get("project_domain_id", "default"),
714 description=tenant_description,
715 is_domain=False,
716 )
717 else:
718 project = self.keystone.tenants.create(tenant_name, tenant_description)
719
720 return project.id
721 except (
722 ksExceptions.ConnectionError,
723 ksExceptions.ClientException,
724 ksExceptions.BadRequest,
725 ConnectionError,
726 ) as e:
727 self._format_exception(e)
728
729 def delete_tenant(self, tenant_id):
730 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
731 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
732
733 try:
734 self._reload_connection()
735
736 if self.api_version3:
737 self.keystone.projects.delete(tenant_id)
738 else:
739 self.keystone.tenants.delete(tenant_id)
740
741 return tenant_id
742 except (
743 ksExceptions.ConnectionError,
744 ksExceptions.ClientException,
745 ksExceptions.NotFound,
746 ConnectionError,
747 ) as e:
748 self._format_exception(e)
749
750 def new_network(
751 self,
752 net_name,
753 net_type,
754 ip_profile=None,
755 shared=False,
756 provider_network_profile=None,
757 ):
758 """Adds a tenant network to VIM
759 Params:
760 'net_name': name of the network
761 'net_type': one of:
762 'bridge': overlay isolated network
763 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
764 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
765 'ip_profile': is a dict containing the IP parameters of the network
766 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
767 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
768 'gateway_address': (Optional) ip_schema, that is X.X.X.X
769 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
770 'dhcp_enabled': True or False
771 'dhcp_start_address': ip_schema, first IP to grant
772 'dhcp_count': number of IPs to grant.
773 'shared': if this network can be seen/use by other tenants/organization
774 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
775 physical-network: physnet-label}
776 Returns a tuple with the network identifier and created_items, or raises an exception on error
777 created_items can be None or a dictionary where this method can include key-values that will be passed to
778 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
779 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
780 as not present.
781 """
782 self.logger.debug(
783 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
784 )
785 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
786
787 try:
788 vlan = None
789
790 if provider_network_profile:
791 vlan = provider_network_profile.get("segmentation-id")
792
793 new_net = None
794 created_items = {}
795 self._reload_connection()
796 network_dict = {"name": net_name, "admin_state_up": True}
797
798 if net_type in ("data", "ptp") or provider_network_profile:
799 provider_physical_network = None
800
801 if provider_network_profile and provider_network_profile.get(
802 "physical-network"
803 ):
804 provider_physical_network = provider_network_profile.get(
805 "physical-network"
806 )
807
808 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
809 # or not declared, just ignore the checking
810 if (
811 isinstance(
812 self.config.get("dataplane_physical_net"), (tuple, list)
813 )
814 and provider_physical_network
815 not in self.config["dataplane_physical_net"]
816 ):
817 raise vimconn.VimConnConflictException(
818 "Invalid parameter 'provider-network:physical-network' "
819 "for network creation. '{}' is not one of the declared "
820 "list at VIM_config:dataplane_physical_net".format(
821 provider_physical_network
822 )
823 )
824
825 # use the default dataplane_physical_net
826 if not provider_physical_network:
827 provider_physical_network = self.config.get(
828 "dataplane_physical_net"
829 )
830
831 # if it is non empty list, use the first value. If it is a string use the value directly
832 if (
833 isinstance(provider_physical_network, (tuple, list))
834 and provider_physical_network
835 ):
836 provider_physical_network = provider_physical_network[0]
837
838 if not provider_physical_network:
839 raise vimconn.VimConnConflictException(
840 "missing information needed for underlay networks. Provide "
841 "'dataplane_physical_net' configuration at VIM or use the NS "
842 "instantiation parameter 'provider-network.physical-network'"
843 " for the VLD"
844 )
845
846 if not self.config.get("multisegment_support"):
847 network_dict[
848 "provider:physical_network"
849 ] = provider_physical_network
850
851 if (
852 provider_network_profile
853 and "network-type" in provider_network_profile
854 ):
855 network_dict[
856 "provider:network_type"
857 ] = provider_network_profile["network-type"]
858 else:
859 network_dict["provider:network_type"] = self.config.get(
860 "dataplane_network_type", "vlan"
861 )
862
863 if vlan:
864 network_dict["provider:segmentation_id"] = vlan
865 else:
866 # Multi-segment case
867 segment_list = []
868 segment1_dict = {
869 "provider:physical_network": "",
870 "provider:network_type": "vxlan",
871 }
872 segment_list.append(segment1_dict)
873 segment2_dict = {
874 "provider:physical_network": provider_physical_network,
875 "provider:network_type": "vlan",
876 }
877
878 if vlan:
879 segment2_dict["provider:segmentation_id"] = vlan
880 elif self.config.get("multisegment_vlan_range"):
881 vlanID = self._generate_multisegment_vlanID()
882 segment2_dict["provider:segmentation_id"] = vlanID
883
884 # else
885 # raise vimconn.VimConnConflictException(
886 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
887 # network")
888 segment_list.append(segment2_dict)
889 network_dict["segments"] = segment_list
890
891 # VIO Specific Changes. It needs a concrete VLAN
892 if self.vim_type == "VIO" and vlan is None:
893 if self.config.get("dataplane_net_vlan_range") is None:
894 raise vimconn.VimConnConflictException(
895 "You must provide 'dataplane_net_vlan_range' in format "
896 "[start_ID - end_ID] at VIM_config for creating underlay "
897 "networks"
898 )
899
900 network_dict["provider:segmentation_id"] = self._generate_vlanID()
901
902 network_dict["shared"] = shared
903
904 if self.config.get("disable_network_port_security"):
905 network_dict["port_security_enabled"] = False
906
907 if self.config.get("neutron_availability_zone_hints"):
908 hints = self.config.get("neutron_availability_zone_hints")
909
910 if isinstance(hints, str):
911 hints = [hints]
912
913 network_dict["availability_zone_hints"] = hints
914
915 new_net = self.neutron.create_network({"network": network_dict})
916 # print new_net
917 # create subnetwork, even if there is no profile
918
919 if not ip_profile:
920 ip_profile = {}
921
922 if not ip_profile.get("subnet_address"):
923 # Fake subnet is required
924 subnet_rand = random.SystemRandom().randint(0, 255)
925 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
926
927 if "ip_version" not in ip_profile:
928 ip_profile["ip_version"] = "IPv4"
929
930 subnet = {
931 "name": net_name + "-subnet",
932 "network_id": new_net["network"]["id"],
933 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
934 "cidr": ip_profile["subnet_address"],
935 }
936
937 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
938 if ip_profile.get("gateway_address"):
939 subnet["gateway_ip"] = ip_profile["gateway_address"]
940 else:
941 subnet["gateway_ip"] = None
942
943 if ip_profile.get("dns_address"):
944 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
945
946 if "dhcp_enabled" in ip_profile:
947 subnet["enable_dhcp"] = (
948 False
949 if ip_profile["dhcp_enabled"] == "false"
950 or ip_profile["dhcp_enabled"] is False
951 else True
952 )
953
954 if ip_profile.get("dhcp_start_address"):
955 subnet["allocation_pools"] = []
956 subnet["allocation_pools"].append(dict())
957 subnet["allocation_pools"][0]["start"] = ip_profile[
958 "dhcp_start_address"
959 ]
960
961 if ip_profile.get("dhcp_count"):
962 # parts = ip_profile["dhcp_start_address"].split(".")
963 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
964 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
965 ip_int += ip_profile["dhcp_count"] - 1
966 ip_str = str(netaddr.IPAddress(ip_int))
967 subnet["allocation_pools"][0]["end"] = ip_str
968
969 if (
970 ip_profile.get("ipv6_address_mode")
971 and ip_profile["ip_version"] != "IPv4"
972 ):
973 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
974 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
975 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
976 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
977
978 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
979 self.neutron.create_subnet({"subnet": subnet})
980
981 if net_type == "data" and self.config.get("multisegment_support"):
982 if self.config.get("l2gw_support"):
983 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
984 for l2gw in l2gw_list:
985 l2gw_conn = {
986 "l2_gateway_id": l2gw["id"],
987 "network_id": new_net["network"]["id"],
988 "segmentation_id": str(vlanID),
989 }
990 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
991 {"l2_gateway_connection": l2gw_conn}
992 )
993 created_items[
994 "l2gwconn:"
995 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
996 ] = True
997
998 return new_net["network"]["id"], created_items
999 except Exception as e:
1000 # delete l2gw connections (if any) before deleting the network
1001 for k, v in created_items.items():
1002 if not v: # skip already deleted
1003 continue
1004
1005 try:
1006 k_item, _, k_id = k.partition(":")
1007
1008 if k_item == "l2gwconn":
1009 self.neutron.delete_l2_gateway_connection(k_id)
1010 except Exception as e2:
1011 self.logger.error(
1012 "Error deleting l2 gateway connection: {}: {}".format(
1013 type(e2).__name__, e2
1014 )
1015 )
1016
1017 if new_net:
1018 self.neutron.delete_network(new_net["network"]["id"])
1019
1020 self._format_exception(e)
1021
1022 def get_network_list(self, filter_dict={}):
1023 """Obtain tenant networks of VIM
1024 Filter_dict can be:
1025 name: network name
1026 id: network uuid
1027 shared: boolean
1028 tenant_id: tenant
1029 admin_state_up: boolean
1030 status: 'ACTIVE'
1031 Returns the network list of dictionaries
1032 """
1033 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1034
1035 try:
1036 self._reload_connection()
1037 filter_dict_os = filter_dict.copy()
1038
1039 if self.api_version3 and "tenant_id" in filter_dict_os:
1040 # TODO check
1041 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1042
1043 net_dict = self.neutron.list_networks(**filter_dict_os)
1044 net_list = net_dict["networks"]
1045 self.__net_os2mano(net_list)
1046
1047 return net_list
1048 except (
1049 neExceptions.ConnectionFailed,
1050 ksExceptions.ClientException,
1051 neExceptions.NeutronException,
1052 ConnectionError,
1053 ) as e:
1054 self._format_exception(e)
1055
1056 def get_network(self, net_id):
1057 """Obtain details of network from VIM
1058 Returns the network information from a network id"""
1059 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1060 filter_dict = {"id": net_id}
1061 net_list = self.get_network_list(filter_dict)
1062
1063 if len(net_list) == 0:
1064 raise vimconn.VimConnNotFoundException(
1065 "Network '{}' not found".format(net_id)
1066 )
1067 elif len(net_list) > 1:
1068 raise vimconn.VimConnConflictException(
1069 "Found more than one network with this criteria"
1070 )
1071
1072 net = net_list[0]
1073 subnets = []
1074 for subnet_id in net.get("subnets", ()):
1075 try:
1076 subnet = self.neutron.show_subnet(subnet_id)
1077 except Exception as e:
1078 self.logger.error(
1079 "osconnector.get_network(): Error getting subnet %s %s"
1080 % (net_id, str(e))
1081 )
1082 subnet = {"id": subnet_id, "fault": str(e)}
1083
1084 subnets.append(subnet)
1085
1086 net["subnets"] = subnets
1087 net["encapsulation"] = net.get("provider:network_type")
1088 net["encapsulation_type"] = net.get("provider:network_type")
1089 net["segmentation_id"] = net.get("provider:segmentation_id")
1090 net["encapsulation_id"] = net.get("provider:segmentation_id")
1091
1092 return net
1093
1094 def delete_network(self, net_id, created_items=None):
1095 """
1096 Removes a tenant network from VIM and its associated elements
1097 :param net_id: VIM identifier of the network, provided by method new_network
1098 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1099 Returns the network identifier or raises an exception upon error or when network is not found
1100 """
1101 self.logger.debug("Deleting network '%s' from VIM", net_id)
1102
1103 if created_items is None:
1104 created_items = {}
1105
1106 try:
1107 self._reload_connection()
1108 # delete l2gw connections (if any) before deleting the network
1109 for k, v in created_items.items():
1110 if not v: # skip already deleted
1111 continue
1112
1113 try:
1114 k_item, _, k_id = k.partition(":")
1115 if k_item == "l2gwconn":
1116 self.neutron.delete_l2_gateway_connection(k_id)
1117 except Exception as e:
1118 self.logger.error(
1119 "Error deleting l2 gateway connection: {}: {}".format(
1120 type(e).__name__, e
1121 )
1122 )
1123
1124 # delete VM ports attached to this networks before the network
1125 ports = self.neutron.list_ports(network_id=net_id)
1126 for p in ports["ports"]:
1127 try:
1128 self.neutron.delete_port(p["id"])
1129 except Exception as e:
1130 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1131
1132 self.neutron.delete_network(net_id)
1133
1134 return net_id
1135 except (
1136 neExceptions.ConnectionFailed,
1137 neExceptions.NetworkNotFoundClient,
1138 neExceptions.NeutronException,
1139 ksExceptions.ClientException,
1140 neExceptions.NeutronException,
1141 ConnectionError,
1142 ) as e:
1143 self._format_exception(e)
1144
1145 def refresh_nets_status(self, net_list):
1146 """Get the status of the networks
1147 Params: the list of network identifiers
1148 Returns a dictionary with:
1149 net_id: #VIM id of this network
1150 status: #Mandatory. Text with one of:
1151 # DELETED (not found at vim)
1152 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1153 # OTHER (Vim reported other status not understood)
1154 # ERROR (VIM indicates an ERROR status)
1155 # ACTIVE, INACTIVE, DOWN (admin down),
1156 # BUILD (on building process)
1157 #
1158 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1159 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1160 """
1161 net_dict = {}
1162
1163 for net_id in net_list:
1164 net = {}
1165
1166 try:
1167 net_vim = self.get_network(net_id)
1168
1169 if net_vim["status"] in netStatus2manoFormat:
1170 net["status"] = netStatus2manoFormat[net_vim["status"]]
1171 else:
1172 net["status"] = "OTHER"
1173 net["error_msg"] = "VIM status reported " + net_vim["status"]
1174
1175 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1176 net["status"] = "DOWN"
1177
1178 net["vim_info"] = self.serialize(net_vim)
1179
1180 if net_vim.get("fault"): # TODO
1181 net["error_msg"] = str(net_vim["fault"])
1182 except vimconn.VimConnNotFoundException as e:
1183 self.logger.error("Exception getting net status: %s", str(e))
1184 net["status"] = "DELETED"
1185 net["error_msg"] = str(e)
1186 except vimconn.VimConnException as e:
1187 self.logger.error("Exception getting net status: %s", str(e))
1188 net["status"] = "VIM_ERROR"
1189 net["error_msg"] = str(e)
1190 net_dict[net_id] = net
1191 return net_dict
1192
1193 def get_flavor(self, flavor_id):
1194 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1195 self.logger.debug("Getting flavor '%s'", flavor_id)
1196
1197 try:
1198 self._reload_connection()
1199 flavor = self.nova.flavors.find(id=flavor_id)
1200 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1201
1202 return flavor.to_dict()
1203 except (
1204 nvExceptions.NotFound,
1205 nvExceptions.ClientException,
1206 ksExceptions.ClientException,
1207 ConnectionError,
1208 ) as e:
1209 self._format_exception(e)
1210
1211 def get_flavor_id_from_data(self, flavor_dict):
1212 """Obtain flavor id that match the flavor description
1213 Returns the flavor_id or raises a vimconnNotFoundException
1214 flavor_dict: contains the required ram, vcpus, disk
1215 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1216 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1217 vimconnNotFoundException is raised
1218 """
1219 exact_match = False if self.config.get("use_existing_flavors") else True
1220
1221 try:
1222 self._reload_connection()
1223 flavor_candidate_id = None
1224 flavor_candidate_data = (10000, 10000, 10000)
1225 flavor_target = (
1226 flavor_dict["ram"],
1227 flavor_dict["vcpus"],
1228 flavor_dict["disk"],
1229 flavor_dict.get("ephemeral", 0),
1230 flavor_dict.get("swap", 0),
1231 )
1232 # numa=None
1233 extended = flavor_dict.get("extended", {})
1234 if extended:
1235 # TODO
1236 raise vimconn.VimConnNotFoundException(
1237 "Flavor with EPA still not implemented"
1238 )
1239 # if len(numas) > 1:
1240 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1241 # numa=numas[0]
1242 # numas = extended.get("numas")
1243 for flavor in self.nova.flavors.list():
1244 epa = flavor.get_keys()
1245
1246 if epa:
1247 continue
1248 # TODO
1249
1250 flavor_data = (
1251 flavor.ram,
1252 flavor.vcpus,
1253 flavor.disk,
1254 flavor.ephemeral,
1255 flavor.swap if isinstance(flavor.swap, int) else 0,
1256 )
1257 if flavor_data == flavor_target:
1258 return flavor.id
1259 elif (
1260 not exact_match
1261 and flavor_target < flavor_data < flavor_candidate_data
1262 ):
1263 flavor_candidate_id = flavor.id
1264 flavor_candidate_data = flavor_data
1265
1266 if not exact_match and flavor_candidate_id:
1267 return flavor_candidate_id
1268
1269 raise vimconn.VimConnNotFoundException(
1270 "Cannot find any flavor matching '{}'".format(flavor_dict)
1271 )
1272 except (
1273 nvExceptions.NotFound,
1274 nvExceptions.ClientException,
1275 ksExceptions.ClientException,
1276 ConnectionError,
1277 ) as e:
1278 self._format_exception(e)
1279
1280 @staticmethod
1281 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1282 """Process resource quota and fill up extra_specs.
1283 Args:
1284 quota (dict): Keeping the quota of resurces
1285 prefix (str) Prefix
1286 extra_specs (dict) Dict to be filled to be used during flavor creation
1287
1288 """
1289 if "limit" in quota:
1290 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1291
1292 if "reserve" in quota:
1293 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1294
1295 if "shares" in quota:
1296 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1297 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1298
1299 @staticmethod
1300 def process_numa_memory(
1301 numa: dict, node_id: Optional[int], extra_specs: dict
1302 ) -> None:
1303 """Set the memory in extra_specs.
1304 Args:
1305 numa (dict): A dictionary which includes numa information
1306 node_id (int): ID of numa node
1307 extra_specs (dict): To be filled.
1308
1309 """
1310 if not numa.get("memory"):
1311 return
1312 memory_mb = numa["memory"] * 1024
1313 memory = "hw:numa_mem.{}".format(node_id)
1314 extra_specs[memory] = int(memory_mb)
1315
1316 @staticmethod
1317 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1318 """Set the cpu in extra_specs.
1319 Args:
1320 numa (dict): A dictionary which includes numa information
1321 node_id (int): ID of numa node
1322 extra_specs (dict): To be filled.
1323
1324 """
1325 if not numa.get("vcpu"):
1326 return
1327 vcpu = numa["vcpu"]
1328 cpu = "hw:numa_cpus.{}".format(node_id)
1329 vcpu = ",".join(map(str, vcpu))
1330 extra_specs[cpu] = vcpu
1331
1332 @staticmethod
1333 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1334 """Fill up extra_specs if numa has paired-threads.
1335 Args:
1336 numa (dict): A dictionary which includes numa information
1337 extra_specs (dict): To be filled.
1338
1339 Returns:
1340 threads (int) Number of virtual cpus
1341
1342 """
1343 if not numa.get("paired-threads"):
1344 return
1345
1346 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1347 threads = numa["paired-threads"] * 2
1348 extra_specs["hw:cpu_thread_policy"] = "require"
1349 extra_specs["hw:cpu_policy"] = "dedicated"
1350 return threads
1351
1352 @staticmethod
1353 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1354 """Fill up extra_specs if numa has cores.
1355 Args:
1356 numa (dict): A dictionary which includes numa information
1357 extra_specs (dict): To be filled.
1358
1359 Returns:
1360 cores (int) Number of virtual cpus
1361
1362 """
1363 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1364 # architecture, or a non-SMT architecture will be emulated
1365 if not numa.get("cores"):
1366 return
1367 cores = numa["cores"]
1368 extra_specs["hw:cpu_thread_policy"] = "isolate"
1369 extra_specs["hw:cpu_policy"] = "dedicated"
1370 return cores
1371
1372 @staticmethod
1373 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1374 """Fill up extra_specs if numa has threads.
1375 Args:
1376 numa (dict): A dictionary which includes numa information
1377 extra_specs (dict): To be filled.
1378
1379 Returns:
1380 threads (int) Number of virtual cpus
1381
1382 """
1383 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1384 if not numa.get("threads"):
1385 return
1386 threads = numa["threads"]
1387 extra_specs["hw:cpu_thread_policy"] = "prefer"
1388 extra_specs["hw:cpu_policy"] = "dedicated"
1389 return threads
1390
1391 def _process_numa_parameters_of_flavor(
1392 self, numas: List, extra_specs: Dict
1393 ) -> None:
1394 """Process numa parameters and fill up extra_specs.
1395
1396 Args:
1397 numas (list): List of dictionary which includes numa information
1398 extra_specs (dict): To be filled.
1399
1400 """
1401 numa_nodes = len(numas)
1402 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1403 cpu_cores, cpu_threads = 0, 0
1404
1405 if self.vim_type == "VIO":
1406 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1407
1408 for numa in numas:
1409 if "id" in numa:
1410 node_id = numa["id"]
1411 # overwrite ram and vcpus
1412 # check if key "memory" is present in numa else use ram value at flavor
1413 self.process_numa_memory(numa, node_id, extra_specs)
1414 self.process_numa_vcpu(numa, node_id, extra_specs)
1415
1416 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1417 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1418
1419 if "paired-threads" in numa:
1420 threads = self.process_numa_paired_threads(numa, extra_specs)
1421 cpu_threads += threads
1422
1423 elif "cores" in numa:
1424 cores = self.process_numa_cores(numa, extra_specs)
1425 cpu_cores += cores
1426
1427 elif "threads" in numa:
1428 threads = self.process_numa_threads(numa, extra_specs)
1429 cpu_threads += threads
1430
1431 if cpu_cores:
1432 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1433 if cpu_threads:
1434 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1435
1436 @staticmethod
1437 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1438 """According to number of numa nodes, updates the extra_specs for VIO.
1439
1440 Args:
1441
1442 numa_nodes (int): List keeps the numa node numbers
1443 extra_specs (dict): Extra specs dict to be updated
1444
1445 """
1446 # If there are several numas, we do not define specific affinity.
1447 extra_specs["vmware:latency_sensitivity_level"] = "high"
1448
1449 def _change_flavor_name(
1450 self, name: str, name_suffix: int, flavor_data: dict
1451 ) -> str:
1452 """Change the flavor name if the name already exists.
1453
1454 Args:
1455 name (str): Flavor name to be checked
1456 name_suffix (int): Suffix to be appended to name
1457 flavor_data (dict): Flavor dict
1458
1459 Returns:
1460 name (str): New flavor name to be used
1461
1462 """
1463 # Get used names
1464 fl = self.nova.flavors.list()
1465 fl_names = [f.name for f in fl]
1466
1467 while name in fl_names:
1468 name_suffix += 1
1469 name = flavor_data["name"] + "-" + str(name_suffix)
1470
1471 return name
1472
1473 def _process_extended_config_of_flavor(
1474 self, extended: dict, extra_specs: dict
1475 ) -> None:
1476 """Process the extended dict to fill up extra_specs.
1477 Args:
1478
1479 extended (dict): Keeping the extra specification of flavor
1480 extra_specs (dict) Dict to be filled to be used during flavor creation
1481
1482 """
1483 quotas = {
1484 "cpu-quota": "cpu",
1485 "mem-quota": "memory",
1486 "vif-quota": "vif",
1487 "disk-io-quota": "disk_io",
1488 }
1489
1490 page_sizes = {
1491 "LARGE": "large",
1492 "SMALL": "small",
1493 "SIZE_2MB": "2MB",
1494 "SIZE_1GB": "1GB",
1495 "PREFER_LARGE": "any",
1496 }
1497
1498 policies = {
1499 "cpu-pinning-policy": "hw:cpu_policy",
1500 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1501 "mem-policy": "hw:numa_mempolicy",
1502 }
1503
1504 numas = extended.get("numas")
1505 if numas:
1506 self._process_numa_parameters_of_flavor(numas, extra_specs)
1507
1508 for quota, item in quotas.items():
1509 if quota in extended.keys():
1510 self.process_resource_quota(extended.get(quota), item, extra_specs)
1511
1512 # Set the mempage size as specified in the descriptor
1513 if extended.get("mempage-size"):
1514 if extended["mempage-size"] in page_sizes.keys():
1515 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1516 else:
1517 # Normally, validations in NBI should not allow to this condition.
1518 self.logger.debug(
1519 "Invalid mempage-size %s. Will be ignored",
1520 extended.get("mempage-size"),
1521 )
1522
1523 for policy, hw_policy in policies.items():
1524 if extended.get(policy):
1525 extra_specs[hw_policy] = extended[policy].lower()
1526
1527 @staticmethod
1528 def _get_flavor_details(flavor_data: dict) -> Tuple:
1529 """Returns the details of flavor
1530 Args:
1531 flavor_data (dict): Dictionary that includes required flavor details
1532
1533 Returns:
1534 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1535
1536 """
1537 return (
1538 flavor_data.get("ram", 64),
1539 flavor_data.get("vcpus", 1),
1540 {},
1541 flavor_data.get("extended"),
1542 )
1543
1544 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1545 """Adds a tenant flavor to openstack VIM.
1546 if change_name_if_used is True, it will change name in case of conflict,
1547 because it is not supported name repetition.
1548
1549 Args:
1550 flavor_data (dict): Flavor details to be processed
1551 change_name_if_used (bool): Change name in case of conflict
1552
1553 Returns:
1554 flavor_id (str): flavor identifier
1555
1556 """
1557 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1558 retry = 0
1559 max_retries = 3
1560 name_suffix = 0
1561
1562 try:
1563 name = flavor_data["name"]
1564 while retry < max_retries:
1565 retry += 1
1566 try:
1567 self._reload_connection()
1568
1569 if change_name_if_used:
1570 name = self._change_flavor_name(name, name_suffix, flavor_data)
1571
1572 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1573 flavor_data
1574 )
1575 if extended:
1576 self._process_extended_config_of_flavor(extended, extra_specs)
1577
1578 # Create flavor
1579
1580 new_flavor = self.nova.flavors.create(
1581 name=name,
1582 ram=ram,
1583 vcpus=vcpus,
1584 disk=flavor_data.get("disk", 0),
1585 ephemeral=flavor_data.get("ephemeral", 0),
1586 swap=flavor_data.get("swap", 0),
1587 is_public=flavor_data.get("is_public", True),
1588 )
1589
1590 # Add metadata
1591 if extra_specs:
1592 new_flavor.set_keys(extra_specs)
1593
1594 return new_flavor.id
1595
1596 except nvExceptions.Conflict as e:
1597 if change_name_if_used and retry < max_retries:
1598 continue
1599
1600 self._format_exception(e)
1601
1602 except (
1603 ksExceptions.ClientException,
1604 nvExceptions.ClientException,
1605 ConnectionError,
1606 KeyError,
1607 ) as e:
1608 self._format_exception(e)
1609
1610 def delete_flavor(self, flavor_id):
1611 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1612 try:
1613 self._reload_connection()
1614 self.nova.flavors.delete(flavor_id)
1615
1616 return flavor_id
1617 # except nvExceptions.BadRequest as e:
1618 except (
1619 nvExceptions.NotFound,
1620 ksExceptions.ClientException,
1621 nvExceptions.ClientException,
1622 ConnectionError,
1623 ) as e:
1624 self._format_exception(e)
1625
1626 def new_image(self, image_dict):
1627 """
1628 Adds a tenant image to VIM. imge_dict is a dictionary with:
1629 name: name
1630 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1631 location: path or URI
1632 public: "yes" or "no"
1633 metadata: metadata of the image
1634 Returns the image_id
1635 """
1636 retry = 0
1637 max_retries = 3
1638
1639 while retry < max_retries:
1640 retry += 1
1641 try:
1642 self._reload_connection()
1643
1644 # determine format http://docs.openstack.org/developer/glance/formats.html
1645 if "disk_format" in image_dict:
1646 disk_format = image_dict["disk_format"]
1647 else: # autodiscover based on extension
1648 if image_dict["location"].endswith(".qcow2"):
1649 disk_format = "qcow2"
1650 elif image_dict["location"].endswith(".vhd"):
1651 disk_format = "vhd"
1652 elif image_dict["location"].endswith(".vmdk"):
1653 disk_format = "vmdk"
1654 elif image_dict["location"].endswith(".vdi"):
1655 disk_format = "vdi"
1656 elif image_dict["location"].endswith(".iso"):
1657 disk_format = "iso"
1658 elif image_dict["location"].endswith(".aki"):
1659 disk_format = "aki"
1660 elif image_dict["location"].endswith(".ari"):
1661 disk_format = "ari"
1662 elif image_dict["location"].endswith(".ami"):
1663 disk_format = "ami"
1664 else:
1665 disk_format = "raw"
1666
1667 self.logger.debug(
1668 "new_image: '%s' loading from '%s'",
1669 image_dict["name"],
1670 image_dict["location"],
1671 )
1672 if self.vim_type == "VIO":
1673 container_format = "bare"
1674 if "container_format" in image_dict:
1675 container_format = image_dict["container_format"]
1676
1677 new_image = self.glance.images.create(
1678 name=image_dict["name"],
1679 container_format=container_format,
1680 disk_format=disk_format,
1681 )
1682 else:
1683 new_image = self.glance.images.create(name=image_dict["name"])
1684
1685 if image_dict["location"].startswith("http"):
1686 # TODO there is not a method to direct download. It must be downloaded locally with requests
1687 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1688 else: # local path
1689 with open(image_dict["location"]) as fimage:
1690 self.glance.images.upload(new_image.id, fimage)
1691 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1692 # image_dict.get("public","yes")=="yes",
1693 # container_format="bare", data=fimage, disk_format=disk_format)
1694
1695 metadata_to_load = image_dict.get("metadata")
1696
1697 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1698 # for openstack
1699 if self.vim_type == "VIO":
1700 metadata_to_load["upload_location"] = image_dict["location"]
1701 else:
1702 metadata_to_load["location"] = image_dict["location"]
1703
1704 self.glance.images.update(new_image.id, **metadata_to_load)
1705
1706 return new_image.id
1707 except (
1708 nvExceptions.Conflict,
1709 ksExceptions.ClientException,
1710 nvExceptions.ClientException,
1711 ) as e:
1712 self._format_exception(e)
1713 except (
1714 HTTPException,
1715 gl1Exceptions.HTTPException,
1716 gl1Exceptions.CommunicationError,
1717 ConnectionError,
1718 ) as e:
1719 if retry == max_retries:
1720 continue
1721
1722 self._format_exception(e)
1723 except IOError as e: # can not open the file
1724 raise vimconn.VimConnConnectionException(
1725 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1726 http_code=vimconn.HTTP_Bad_Request,
1727 )
1728
1729 def delete_image(self, image_id):
1730 """Deletes a tenant image from openstack VIM. Returns the old id"""
1731 try:
1732 self._reload_connection()
1733 self.glance.images.delete(image_id)
1734
1735 return image_id
1736 except (
1737 nvExceptions.NotFound,
1738 ksExceptions.ClientException,
1739 nvExceptions.ClientException,
1740 gl1Exceptions.CommunicationError,
1741 gl1Exceptions.HTTPNotFound,
1742 ConnectionError,
1743 ) as e: # TODO remove
1744 self._format_exception(e)
1745
1746 def get_image_id_from_path(self, path):
1747 """Get the image id from image path in the VIM database. Returns the image_id"""
1748 try:
1749 self._reload_connection()
1750 images = self.glance.images.list()
1751
1752 for image in images:
1753 if image.metadata.get("location") == path:
1754 return image.id
1755
1756 raise vimconn.VimConnNotFoundException(
1757 "image with location '{}' not found".format(path)
1758 )
1759 except (
1760 ksExceptions.ClientException,
1761 nvExceptions.ClientException,
1762 gl1Exceptions.CommunicationError,
1763 ConnectionError,
1764 ) as e:
1765 self._format_exception(e)
1766
1767 def get_image_list(self, filter_dict={}):
1768 """Obtain tenant images from VIM
1769 Filter_dict can be:
1770 id: image id
1771 name: image name
1772 checksum: image checksum
1773 Returns the image list of dictionaries:
1774 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1775 List can be empty
1776 """
1777 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1778
1779 try:
1780 self._reload_connection()
1781 # filter_dict_os = filter_dict.copy()
1782 # First we filter by the available filter fields: name, id. The others are removed.
1783 image_list = self.glance.images.list()
1784 filtered_list = []
1785
1786 for image in image_list:
1787 try:
1788 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1789 continue
1790
1791 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1792 continue
1793
1794 if (
1795 filter_dict.get("checksum")
1796 and image["checksum"] != filter_dict["checksum"]
1797 ):
1798 continue
1799
1800 filtered_list.append(image.copy())
1801 except gl1Exceptions.HTTPNotFound:
1802 pass
1803
1804 return filtered_list
1805 except (
1806 ksExceptions.ClientException,
1807 nvExceptions.ClientException,
1808 gl1Exceptions.CommunicationError,
1809 ConnectionError,
1810 ) as e:
1811 self._format_exception(e)
1812
1813 def __wait_for_vm(self, vm_id, status):
1814 """wait until vm is in the desired status and return True.
1815 If the VM gets in ERROR status, return false.
1816 If the timeout is reached generate an exception"""
1817 elapsed_time = 0
1818 while elapsed_time < server_timeout:
1819 vm_status = self.nova.servers.get(vm_id).status
1820
1821 if vm_status == status:
1822 return True
1823
1824 if vm_status == "ERROR":
1825 return False
1826
1827 time.sleep(5)
1828 elapsed_time += 5
1829
1830 # if we exceeded the timeout rollback
1831 if elapsed_time >= server_timeout:
1832 raise vimconn.VimConnException(
1833 "Timeout waiting for instance " + vm_id + " to get " + status,
1834 http_code=vimconn.HTTP_Request_Timeout,
1835 )
1836
1837 def _get_openstack_availablity_zones(self):
1838 """
1839 Get from openstack availability zones available
1840 :return:
1841 """
1842 try:
1843 openstack_availability_zone = self.nova.availability_zones.list()
1844 openstack_availability_zone = [
1845 str(zone.zoneName)
1846 for zone in openstack_availability_zone
1847 if zone.zoneName != "internal"
1848 ]
1849
1850 return openstack_availability_zone
1851 except Exception:
1852 return None
1853
1854 def _set_availablity_zones(self):
1855 """
1856 Set vim availablity zone
1857 :return:
1858 """
1859 if "availability_zone" in self.config:
1860 vim_availability_zones = self.config.get("availability_zone")
1861
1862 if isinstance(vim_availability_zones, str):
1863 self.availability_zone = [vim_availability_zones]
1864 elif isinstance(vim_availability_zones, list):
1865 self.availability_zone = vim_availability_zones
1866 else:
1867 self.availability_zone = self._get_openstack_availablity_zones()
1868
1869 def _get_vm_availability_zone(
1870 self, availability_zone_index, availability_zone_list
1871 ):
1872 """
1873 Return thge availability zone to be used by the created VM.
1874 :return: The VIM availability zone to be used or None
1875 """
1876 if availability_zone_index is None:
1877 if not self.config.get("availability_zone"):
1878 return None
1879 elif isinstance(self.config.get("availability_zone"), str):
1880 return self.config["availability_zone"]
1881 else:
1882 # TODO consider using a different parameter at config for default AV and AV list match
1883 return self.config["availability_zone"][0]
1884
1885 vim_availability_zones = self.availability_zone
1886 # check if VIM offer enough availability zones describe in the VNFD
1887 if vim_availability_zones and len(availability_zone_list) <= len(
1888 vim_availability_zones
1889 ):
1890 # check if all the names of NFV AV match VIM AV names
1891 match_by_index = False
1892 for av in availability_zone_list:
1893 if av not in vim_availability_zones:
1894 match_by_index = True
1895 break
1896
1897 if match_by_index:
1898 return vim_availability_zones[availability_zone_index]
1899 else:
1900 return availability_zone_list[availability_zone_index]
1901 else:
1902 raise vimconn.VimConnConflictException(
1903 "No enough availability zones at VIM for this deployment"
1904 )
1905
1906 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1907 """Fill up the security_groups in the port_dict.
1908
1909 Args:
1910 net (dict): Network details
1911 port_dict (dict): Port details
1912
1913 """
1914 if (
1915 self.config.get("security_groups")
1916 and net.get("port_security") is not False
1917 and not self.config.get("no_port_security_extension")
1918 ):
1919 if not self.security_groups_id:
1920 self._get_ids_from_name()
1921
1922 port_dict["security_groups"] = self.security_groups_id
1923
1924 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1925 """Fill up the network binding depending on network type in the port_dict.
1926
1927 Args:
1928 net (dict): Network details
1929 port_dict (dict): Port details
1930
1931 """
1932 if not net.get("type"):
1933 raise vimconn.VimConnException("Type is missing in the network details.")
1934
1935 if net["type"] == "virtual":
1936 pass
1937
1938 # For VF
1939 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1940 port_dict["binding:vnic_type"] = "direct"
1941
1942 # VIO specific Changes
1943 if self.vim_type == "VIO":
1944 # Need to create port with port_security_enabled = False and no-security-groups
1945 port_dict["port_security_enabled"] = False
1946 port_dict["provider_security_groups"] = []
1947 port_dict["security_groups"] = []
1948
1949 else:
1950 # For PT PCI-PASSTHROUGH
1951 port_dict["binding:vnic_type"] = "direct-physical"
1952
1953 @staticmethod
1954 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1955 """Set the "ip" parameter in net dictionary.
1956
1957 Args:
1958 new_port (dict): New created port
1959 net (dict): Network details
1960
1961 """
1962 fixed_ips = new_port["port"].get("fixed_ips")
1963
1964 if fixed_ips:
1965 net["ip"] = fixed_ips[0].get("ip_address")
1966 else:
1967 net["ip"] = None
1968
1969 @staticmethod
1970 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1971 """Fill up the mac_address and fixed_ips in port_dict.
1972
1973 Args:
1974 net (dict): Network details
1975 port_dict (dict): Port details
1976
1977 """
1978 if net.get("mac_address"):
1979 port_dict["mac_address"] = net["mac_address"]
1980
1981 ip_dual_list = []
1982 if ip_list := net.get("ip_address"):
1983 if not isinstance(ip_list, list):
1984 ip_list = [ip_list]
1985 for ip in ip_list:
1986 ip_dict = {"ip_address": ip}
1987 ip_dual_list.append(ip_dict)
1988 port_dict["fixed_ips"] = ip_dual_list
1989 # TODO add "subnet_id": <subnet_id>
1990
1991 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1992 """Create new port using neutron.
1993
1994 Args:
1995 port_dict (dict): Port details
1996 created_items (dict): All created items
1997 net (dict): Network details
1998
1999 Returns:
2000 new_port (dict): New created port
2001
2002 """
2003 new_port = self.neutron.create_port({"port": port_dict})
2004 created_items["port:" + str(new_port["port"]["id"])] = True
2005 net["mac_address"] = new_port["port"]["mac_address"]
2006 net["vim_id"] = new_port["port"]["id"]
2007
2008 return new_port
2009
2010 def _create_port(
2011 self, net: dict, name: str, created_items: dict
2012 ) -> Tuple[dict, dict]:
2013 """Create port using net details.
2014
2015 Args:
2016 net (dict): Network details
2017 name (str): Name to be used as network name if net dict does not include name
2018 created_items (dict): All created items
2019
2020 Returns:
2021 new_port, port New created port, port dictionary
2022
2023 """
2024
2025 port_dict = {
2026 "network_id": net["net_id"],
2027 "name": net.get("name"),
2028 "admin_state_up": True,
2029 }
2030
2031 if not port_dict["name"]:
2032 port_dict["name"] = name
2033
2034 self._prepare_port_dict_security_groups(net, port_dict)
2035
2036 self._prepare_port_dict_binding(net, port_dict)
2037
2038 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2039
2040 new_port = self._create_new_port(port_dict, created_items, net)
2041
2042 vimconnector._set_fixed_ip(new_port, net)
2043
2044 port = {"port-id": new_port["port"]["id"]}
2045
2046 if float(self.nova.api_version.get_string()) >= 2.32:
2047 port["tag"] = new_port["port"]["name"]
2048
2049 return new_port, port
2050
2051 def _prepare_network_for_vminstance(
2052 self,
2053 name: str,
2054 net_list: list,
2055 created_items: dict,
2056 net_list_vim: list,
2057 external_network: list,
2058 no_secured_ports: list,
2059 ) -> None:
2060 """Create port and fill up net dictionary for new VM instance creation.
2061
2062 Args:
2063 name (str): Name of network
2064 net_list (list): List of networks
2065 created_items (dict): All created items belongs to a VM
2066 net_list_vim (list): List of ports
2067 external_network (list): List of external-networks
2068 no_secured_ports (list): Port security disabled ports
2069 """
2070
2071 self._reload_connection()
2072
2073 for net in net_list:
2074 # Skip non-connected iface
2075 if not net.get("net_id"):
2076 continue
2077
2078 new_port, port = self._create_port(net, name, created_items)
2079
2080 net_list_vim.append(port)
2081
2082 if net.get("floating_ip", False):
2083 net["exit_on_floating_ip_error"] = True
2084 external_network.append(net)
2085
2086 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2087 net["exit_on_floating_ip_error"] = False
2088 external_network.append(net)
2089 net["floating_ip"] = self.config.get("use_floating_ip")
2090
2091 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2092 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2093 if net.get("port_security") is False and not self.config.get(
2094 "no_port_security_extension"
2095 ):
2096 no_secured_ports.append(
2097 (
2098 new_port["port"]["id"],
2099 net.get("port_security_disable_strategy"),
2100 )
2101 )
2102
2103 def _prepare_persistent_root_volumes(
2104 self,
2105 name: str,
2106 vm_av_zone: list,
2107 disk: dict,
2108 base_disk_index: int,
2109 block_device_mapping: dict,
2110 existing_vim_volumes: list,
2111 created_items: dict,
2112 ) -> Optional[str]:
2113 """Prepare persistent root volumes for new VM instance.
2114
2115 Args:
2116 name (str): Name of VM instance
2117 vm_av_zone (list): List of availability zones
2118 disk (dict): Disk details
2119 base_disk_index (int): Disk index
2120 block_device_mapping (dict): Block device details
2121 existing_vim_volumes (list): Existing disk details
2122 created_items (dict): All created items belongs to VM
2123
2124 Returns:
2125 boot_volume_id (str): ID of boot volume
2126
2127 """
2128 # Disk may include only vim_volume_id or only vim_id."
2129 # Use existing persistent root volume finding with volume_id or vim_id
2130 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2131
2132 if disk.get(key_id):
2133 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2134 existing_vim_volumes.append({"id": disk[key_id]})
2135
2136 else:
2137 # Create persistent root volume
2138 volume = self.cinder.volumes.create(
2139 size=disk["size"],
2140 name=name + "vd" + chr(base_disk_index),
2141 imageRef=disk["image_id"],
2142 # Make sure volume is in the same AZ as the VM to be attached to
2143 availability_zone=vm_av_zone,
2144 )
2145 boot_volume_id = volume.id
2146 self.update_block_device_mapping(
2147 volume=volume,
2148 block_device_mapping=block_device_mapping,
2149 base_disk_index=base_disk_index,
2150 disk=disk,
2151 created_items=created_items,
2152 )
2153
2154 return boot_volume_id
2155
2156 @staticmethod
2157 def update_block_device_mapping(
2158 volume: object,
2159 block_device_mapping: dict,
2160 base_disk_index: int,
2161 disk: dict,
2162 created_items: dict,
2163 ) -> None:
2164 """Add volume information to block device mapping dict.
2165 Args:
2166 volume (object): Created volume object
2167 block_device_mapping (dict): Block device details
2168 base_disk_index (int): Disk index
2169 disk (dict): Disk details
2170 created_items (dict): All created items belongs to VM
2171 """
2172 if not volume:
2173 raise vimconn.VimConnException("Volume is empty.")
2174
2175 if not hasattr(volume, "id"):
2176 raise vimconn.VimConnException(
2177 "Created volume is not valid, does not have id attribute."
2178 )
2179
2180 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2181 if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
2182 return
2183 volume_txt = "volume:" + str(volume.id)
2184 if disk.get("keep"):
2185 volume_txt += ":keep"
2186 created_items[volume_txt] = True
2187
2188 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2189 try:
2190 volume = self.cinder.volumes.create(
2191 size=shared_volume_data["size"],
2192 name=shared_volume_data["name"],
2193 volume_type="multiattach",
2194 )
2195 return (volume.name, volume.id)
2196 except (ConnectionError, KeyError) as e:
2197 self._format_exception(e)
2198
2199 def _prepare_shared_volumes(
2200 self,
2201 name: str,
2202 disk: dict,
2203 base_disk_index: int,
2204 block_device_mapping: dict,
2205 existing_vim_volumes: list,
2206 created_items: dict,
2207 ):
2208 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2209 if volumes.get(disk["name"]):
2210 sv_id = volumes[disk["name"]]
2211 max_retries = 3
2212 vol_status = ""
2213 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2214 while max_retries:
2215 max_retries -= 1
2216 volume = self.cinder.volumes.get(sv_id)
2217 vol_status = volume.status
2218 if volume.status not in ("in-use", "available"):
2219 time.sleep(5)
2220 continue
2221 self.update_block_device_mapping(
2222 volume=volume,
2223 block_device_mapping=block_device_mapping,
2224 base_disk_index=base_disk_index,
2225 disk=disk,
2226 created_items=created_items,
2227 )
2228 return
2229 raise vimconn.VimConnException(
2230 "Shared volume is not prepared, status is: {}".format(vol_status),
2231 http_code=vimconn.HTTP_Internal_Server_Error,
2232 )
2233
2234 def _prepare_non_root_persistent_volumes(
2235 self,
2236 name: str,
2237 disk: dict,
2238 vm_av_zone: list,
2239 block_device_mapping: dict,
2240 base_disk_index: int,
2241 existing_vim_volumes: list,
2242 created_items: dict,
2243 ) -> None:
2244 """Prepare persistent volumes for new VM instance.
2245
2246 Args:
2247 name (str): Name of VM instance
2248 disk (dict): Disk details
2249 vm_av_zone (list): List of availability zones
2250 block_device_mapping (dict): Block device details
2251 base_disk_index (int): Disk index
2252 existing_vim_volumes (list): Existing disk details
2253 created_items (dict): All created items belongs to VM
2254 """
2255 # Non-root persistent volumes
2256 # Disk may include only vim_volume_id or only vim_id."
2257 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2258 if disk.get(key_id):
2259 # Use existing persistent volume
2260 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2261 existing_vim_volumes.append({"id": disk[key_id]})
2262 else:
2263 volume_name = f"{name}vd{chr(base_disk_index)}"
2264 volume = self.cinder.volumes.create(
2265 size=disk["size"],
2266 name=volume_name,
2267 # Make sure volume is in the same AZ as the VM to be attached to
2268 availability_zone=vm_av_zone,
2269 )
2270 self.update_block_device_mapping(
2271 volume=volume,
2272 block_device_mapping=block_device_mapping,
2273 base_disk_index=base_disk_index,
2274 disk=disk,
2275 created_items=created_items,
2276 )
2277
2278 def _wait_for_created_volumes_availability(
2279 self, elapsed_time: int, created_items: dict
2280 ) -> Optional[int]:
2281 """Wait till created volumes become available.
2282
2283 Args:
2284 elapsed_time (int): Passed time while waiting
2285 created_items (dict): All created items belongs to VM
2286
2287 Returns:
2288 elapsed_time (int): Time spent while waiting
2289
2290 """
2291 while elapsed_time < volume_timeout:
2292 for created_item in created_items:
2293 v, volume_id = (
2294 created_item.split(":")[0],
2295 created_item.split(":")[1],
2296 )
2297 if v == "volume":
2298 volume = self.cinder.volumes.get(volume_id)
2299 if (
2300 volume.volume_type == "multiattach"
2301 and volume.status == "in-use"
2302 ):
2303 return elapsed_time
2304 elif volume.status != "available":
2305 break
2306 else:
2307 # All ready: break from while
2308 break
2309
2310 time.sleep(5)
2311 elapsed_time += 5
2312
2313 return elapsed_time
2314
2315 def _wait_for_existing_volumes_availability(
2316 self, elapsed_time: int, existing_vim_volumes: list
2317 ) -> Optional[int]:
2318 """Wait till existing volumes become available.
2319
2320 Args:
2321 elapsed_time (int): Passed time while waiting
2322 existing_vim_volumes (list): Existing volume details
2323
2324 Returns:
2325 elapsed_time (int): Time spent while waiting
2326
2327 """
2328
2329 while elapsed_time < volume_timeout:
2330 for volume in existing_vim_volumes:
2331 v = self.cinder.volumes.get(volume["id"])
2332 if v.volume_type == "multiattach" and v.status == "in-use":
2333 return elapsed_time
2334 elif v.status != "available":
2335 break
2336 else: # all ready: break from while
2337 break
2338
2339 time.sleep(5)
2340 elapsed_time += 5
2341
2342 return elapsed_time
2343
2344 def _prepare_disk_for_vminstance(
2345 self,
2346 name: str,
2347 existing_vim_volumes: list,
2348 created_items: dict,
2349 vm_av_zone: list,
2350 block_device_mapping: dict,
2351 disk_list: list = None,
2352 ) -> None:
2353 """Prepare all volumes for new VM instance.
2354
2355 Args:
2356 name (str): Name of Instance
2357 existing_vim_volumes (list): List of existing volumes
2358 created_items (dict): All created items belongs to VM
2359 vm_av_zone (list): VM availability zone
2360 block_device_mapping (dict): Block devices to be attached to VM
2361 disk_list (list): List of disks
2362
2363 """
2364 # Create additional volumes in case these are present in disk_list
2365 base_disk_index = ord("b")
2366 boot_volume_id = None
2367 elapsed_time = 0
2368 for disk in disk_list:
2369 if "image_id" in disk:
2370 # Root persistent volume
2371 base_disk_index = ord("a")
2372 boot_volume_id = self._prepare_persistent_root_volumes(
2373 name=name,
2374 vm_av_zone=vm_av_zone,
2375 disk=disk,
2376 base_disk_index=base_disk_index,
2377 block_device_mapping=block_device_mapping,
2378 existing_vim_volumes=existing_vim_volumes,
2379 created_items=created_items,
2380 )
2381 elif disk.get("multiattach"):
2382 self._prepare_shared_volumes(
2383 name=name,
2384 disk=disk,
2385 base_disk_index=base_disk_index,
2386 block_device_mapping=block_device_mapping,
2387 existing_vim_volumes=existing_vim_volumes,
2388 created_items=created_items,
2389 )
2390 else:
2391 # Non-root persistent volume
2392 self._prepare_non_root_persistent_volumes(
2393 name=name,
2394 disk=disk,
2395 vm_av_zone=vm_av_zone,
2396 block_device_mapping=block_device_mapping,
2397 base_disk_index=base_disk_index,
2398 existing_vim_volumes=existing_vim_volumes,
2399 created_items=created_items,
2400 )
2401 base_disk_index += 1
2402
2403 # Wait until created volumes are with status available
2404 elapsed_time = self._wait_for_created_volumes_availability(
2405 elapsed_time, created_items
2406 )
2407 # Wait until existing volumes in vim are with status available
2408 elapsed_time = self._wait_for_existing_volumes_availability(
2409 elapsed_time, existing_vim_volumes
2410 )
2411 # If we exceeded the timeout rollback
2412 if elapsed_time >= volume_timeout:
2413 raise vimconn.VimConnException(
2414 "Timeout creating volumes for instance " + name,
2415 http_code=vimconn.HTTP_Request_Timeout,
2416 )
2417 if boot_volume_id:
2418 self.cinder.volumes.set_bootable(boot_volume_id, True)
2419
2420 def _find_the_external_network_for_floating_ip(self):
2421 """Get the external network ip in order to create floating IP.
2422
2423 Returns:
2424 pool_id (str): External network pool ID
2425
2426 """
2427
2428 # Find the external network
2429 external_nets = list()
2430
2431 for net in self.neutron.list_networks()["networks"]:
2432 if net["router:external"]:
2433 external_nets.append(net)
2434
2435 if len(external_nets) == 0:
2436 raise vimconn.VimConnException(
2437 "Cannot create floating_ip automatically since "
2438 "no external network is present",
2439 http_code=vimconn.HTTP_Conflict,
2440 )
2441
2442 if len(external_nets) > 1:
2443 raise vimconn.VimConnException(
2444 "Cannot create floating_ip automatically since "
2445 "multiple external networks are present",
2446 http_code=vimconn.HTTP_Conflict,
2447 )
2448
2449 # Pool ID
2450 return external_nets[0].get("id")
2451
2452 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2453 """Trigger neutron to create a new floating IP using external network ID.
2454
2455 Args:
2456 param (dict): Input parameters to create a floating IP
2457 created_items (dict): All created items belongs to new VM instance
2458
2459 Raises:
2460
2461 VimConnException
2462 """
2463 try:
2464 self.logger.debug("Creating floating IP")
2465 new_floating_ip = self.neutron.create_floatingip(param)
2466 free_floating_ip = new_floating_ip["floatingip"]["id"]
2467 created_items["floating_ip:" + str(free_floating_ip)] = True
2468
2469 except Exception as e:
2470 raise vimconn.VimConnException(
2471 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2472 http_code=vimconn.HTTP_Conflict,
2473 )
2474
2475 def _create_floating_ip(
2476 self, floating_network: dict, server: object, created_items: dict
2477 ) -> None:
2478 """Get the available Pool ID and create a new floating IP.
2479
2480 Args:
2481 floating_network (dict): Dict including external network ID
2482 server (object): Server object
2483 created_items (dict): All created items belongs to new VM instance
2484
2485 """
2486
2487 # Pool_id is available
2488 if (
2489 isinstance(floating_network["floating_ip"], str)
2490 and floating_network["floating_ip"].lower() != "true"
2491 ):
2492 pool_id = floating_network["floating_ip"]
2493
2494 # Find the Pool_id
2495 else:
2496 pool_id = self._find_the_external_network_for_floating_ip()
2497
2498 param = {
2499 "floatingip": {
2500 "floating_network_id": pool_id,
2501 "tenant_id": server.tenant_id,
2502 }
2503 }
2504
2505 self._neutron_create_float_ip(param, created_items)
2506
2507 def _find_floating_ip(
2508 self,
2509 server: object,
2510 floating_ips: list,
2511 floating_network: dict,
2512 ) -> Optional[str]:
2513 """Find the available free floating IPs if there are.
2514
2515 Args:
2516 server (object): Server object
2517 floating_ips (list): List of floating IPs
2518 floating_network (dict): Details of floating network such as ID
2519
2520 Returns:
2521 free_floating_ip (str): Free floating ip address
2522
2523 """
2524 for fip in floating_ips:
2525 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2526 continue
2527
2528 if isinstance(floating_network["floating_ip"], str):
2529 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2530 continue
2531
2532 return fip["id"]
2533
2534 def _assign_floating_ip(
2535 self, free_floating_ip: str, floating_network: dict
2536 ) -> Dict:
2537 """Assign the free floating ip address to port.
2538
2539 Args:
2540 free_floating_ip (str): Floating IP to be assigned
2541 floating_network (dict): ID of floating network
2542
2543 Returns:
2544 fip (dict) (dict): Floating ip details
2545
2546 """
2547 # The vim_id key contains the neutron.port_id
2548 self.neutron.update_floatingip(
2549 free_floating_ip,
2550 {"floatingip": {"port_id": floating_network["vim_id"]}},
2551 )
2552 # For race condition ensure not re-assigned to other VM after 5 seconds
2553 time.sleep(5)
2554
2555 return self.neutron.show_floatingip(free_floating_ip)
2556
2557 def _get_free_floating_ip(
2558 self, server: object, floating_network: dict
2559 ) -> Optional[str]:
2560 """Get the free floating IP address.
2561
2562 Args:
2563 server (object): Server Object
2564 floating_network (dict): Floating network details
2565
2566 Returns:
2567 free_floating_ip (str): Free floating ip addr
2568
2569 """
2570
2571 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2572
2573 # Randomize
2574 random.shuffle(floating_ips)
2575
2576 return self._find_floating_ip(server, floating_ips, floating_network)
2577
2578 def _prepare_external_network_for_vminstance(
2579 self,
2580 external_network: list,
2581 server: object,
2582 created_items: dict,
2583 vm_start_time: float,
2584 ) -> None:
2585 """Assign floating IP address for VM instance.
2586
2587 Args:
2588 external_network (list): ID of External network
2589 server (object): Server Object
2590 created_items (dict): All created items belongs to new VM instance
2591 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2592
2593 Raises:
2594 VimConnException
2595
2596 """
2597 for floating_network in external_network:
2598 try:
2599 assigned = False
2600 floating_ip_retries = 3
2601 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2602 # several times
2603 while not assigned:
2604 free_floating_ip = self._get_free_floating_ip(
2605 server, floating_network
2606 )
2607
2608 if not free_floating_ip:
2609 self._create_floating_ip(
2610 floating_network, server, created_items
2611 )
2612
2613 try:
2614 # For race condition ensure not already assigned
2615 fip = self.neutron.show_floatingip(free_floating_ip)
2616
2617 if fip["floatingip"].get("port_id"):
2618 continue
2619
2620 # Assign floating ip
2621 fip = self._assign_floating_ip(
2622 free_floating_ip, floating_network
2623 )
2624
2625 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2626 self.logger.warning(
2627 "floating_ip {} re-assigned to other port".format(
2628 free_floating_ip
2629 )
2630 )
2631 continue
2632
2633 self.logger.debug(
2634 "Assigned floating_ip {} to VM {}".format(
2635 free_floating_ip, server.id
2636 )
2637 )
2638
2639 assigned = True
2640
2641 except Exception as e:
2642 # Openstack need some time after VM creation to assign an IP. So retry if fails
2643 vm_status = self.nova.servers.get(server.id).status
2644
2645 if vm_status not in ("ACTIVE", "ERROR"):
2646 if time.time() - vm_start_time < server_timeout:
2647 time.sleep(5)
2648 continue
2649 elif floating_ip_retries > 0:
2650 floating_ip_retries -= 1
2651 continue
2652
2653 raise vimconn.VimConnException(
2654 "Cannot create floating_ip: {} {}".format(
2655 type(e).__name__, e
2656 ),
2657 http_code=vimconn.HTTP_Conflict,
2658 )
2659
2660 except Exception as e:
2661 if not floating_network["exit_on_floating_ip_error"]:
2662 self.logger.error("Cannot create floating_ip. %s", str(e))
2663 continue
2664
2665 raise
2666
2667 def _update_port_security_for_vminstance(
2668 self,
2669 no_secured_ports: list,
2670 server: object,
2671 ) -> None:
2672 """Updates the port security according to no_secured_ports list.
2673
2674 Args:
2675 no_secured_ports (list): List of ports that security will be disabled
2676 server (object): Server Object
2677
2678 Raises:
2679 VimConnException
2680
2681 """
2682 # Wait until the VM is active and then disable the port-security
2683 if no_secured_ports:
2684 self.__wait_for_vm(server.id, "ACTIVE")
2685
2686 for port in no_secured_ports:
2687 port_update = {
2688 "port": {"port_security_enabled": False, "security_groups": None}
2689 }
2690
2691 if port[1] == "allow-address-pairs":
2692 port_update = {
2693 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2694 }
2695
2696 try:
2697 self.neutron.update_port(port[0], port_update)
2698
2699 except Exception:
2700 raise vimconn.VimConnException(
2701 "It was not possible to disable port security for port {}".format(
2702 port[0]
2703 )
2704 )
2705
2706 def new_vminstance(
2707 self,
2708 name: str,
2709 description: str,
2710 start: bool,
2711 image_id: str,
2712 flavor_id: str,
2713 affinity_group_list: list,
2714 net_list: list,
2715 cloud_config=None,
2716 disk_list=None,
2717 availability_zone_index=None,
2718 availability_zone_list=None,
2719 ) -> tuple:
2720 """Adds a VM instance to VIM.
2721
2722 Args:
2723 name (str): name of VM
2724 description (str): description
2725 start (bool): indicates if VM must start or boot in pause mode. Ignored
2726 image_id (str) image uuid
2727 flavor_id (str) flavor uuid
2728 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2729 net_list (list): list of interfaces, each one is a dictionary with:
2730 name: name of network
2731 net_id: network uuid to connect
2732 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2733 model: interface model, ignored #TODO
2734 mac_address: used for SR-IOV ifaces #TODO for other types
2735 use: 'data', 'bridge', 'mgmt'
2736 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2737 vim_id: filled/added by this function
2738 floating_ip: True/False (or it can be None)
2739 port_security: True/False
2740 cloud_config (dict): (optional) dictionary with:
2741 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2742 users: (optional) list of users to be inserted, each item is a dict with:
2743 name: (mandatory) user name,
2744 key-pairs: (optional) list of strings with the public key to be inserted to the user
2745 user-data: (optional) string is a text script to be passed directly to cloud-init
2746 config-files: (optional). List of files to be transferred. Each item is a dict with:
2747 dest: (mandatory) string with the destination absolute path
2748 encoding: (optional, by default text). Can be one of:
2749 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2750 content : (mandatory) string with the content of the file
2751 permissions: (optional) string with file permissions, typically octal notation '0644'
2752 owner: (optional) file owner, string with the format 'owner:group'
2753 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2754 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2755 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2756 size: (mandatory) string with the size of the disk in GB
2757 vim_id: (optional) should use this existing volume id
2758 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2759 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2760 availability_zone_index is None
2761 #TODO ip, security groups
2762
2763 Returns:
2764 A tuple with the instance identifier and created_items or raises an exception on error
2765 created_items can be None or a dictionary where this method can include key-values that will be passed to
2766 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2767 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2768 as not present.
2769
2770 """
2771 self.logger.debug(
2772 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2773 image_id,
2774 flavor_id,
2775 str(net_list),
2776 )
2777
2778 try:
2779 server = None
2780 created_items = {}
2781 net_list_vim = []
2782 # list of external networks to be connected to instance, later on used to create floating_ip
2783 external_network = []
2784 # List of ports with port-security disabled
2785 no_secured_ports = []
2786 block_device_mapping = {}
2787 existing_vim_volumes = []
2788 server_group_id = None
2789 scheduller_hints = {}
2790
2791 # Check the Openstack Connection
2792 self._reload_connection()
2793
2794 # Prepare network list
2795 self._prepare_network_for_vminstance(
2796 name=name,
2797 net_list=net_list,
2798 created_items=created_items,
2799 net_list_vim=net_list_vim,
2800 external_network=external_network,
2801 no_secured_ports=no_secured_ports,
2802 )
2803
2804 # Cloud config
2805 config_drive, userdata = self._create_user_data(cloud_config)
2806
2807 # Get availability Zone
2808 vm_av_zone = self._get_vm_availability_zone(
2809 availability_zone_index, availability_zone_list
2810 )
2811
2812 if disk_list:
2813 # Prepare disks
2814 self._prepare_disk_for_vminstance(
2815 name=name,
2816 existing_vim_volumes=existing_vim_volumes,
2817 created_items=created_items,
2818 vm_av_zone=vm_av_zone,
2819 block_device_mapping=block_device_mapping,
2820 disk_list=disk_list,
2821 )
2822
2823 if affinity_group_list:
2824 # Only first id on the list will be used. Openstack restriction
2825 server_group_id = affinity_group_list[0]["affinity_group_id"]
2826 scheduller_hints["group"] = server_group_id
2827
2828 self.logger.debug(
2829 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2830 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2831 "block_device_mapping={}, server_group={})".format(
2832 name,
2833 image_id,
2834 flavor_id,
2835 net_list_vim,
2836 self.config.get("security_groups"),
2837 vm_av_zone,
2838 self.config.get("keypair"),
2839 userdata,
2840 config_drive,
2841 block_device_mapping,
2842 server_group_id,
2843 )
2844 )
2845 # Create VM
2846 server = self.nova.servers.create(
2847 name=name,
2848 image=image_id,
2849 flavor=flavor_id,
2850 nics=net_list_vim,
2851 security_groups=self.config.get("security_groups"),
2852 # TODO remove security_groups in future versions. Already at neutron port
2853 availability_zone=vm_av_zone,
2854 key_name=self.config.get("keypair"),
2855 userdata=userdata,
2856 config_drive=config_drive,
2857 block_device_mapping=block_device_mapping,
2858 scheduler_hints=scheduller_hints,
2859 )
2860
2861 vm_start_time = time.time()
2862
2863 self._update_port_security_for_vminstance(no_secured_ports, server)
2864
2865 self._prepare_external_network_for_vminstance(
2866 external_network=external_network,
2867 server=server,
2868 created_items=created_items,
2869 vm_start_time=vm_start_time,
2870 )
2871
2872 return server.id, created_items
2873
2874 except Exception as e:
2875 server_id = None
2876 if server:
2877 server_id = server.id
2878
2879 try:
2880 created_items = self.remove_keep_tag_from_persistent_volumes(
2881 created_items
2882 )
2883
2884 self.delete_vminstance(server_id, created_items)
2885
2886 except Exception as e2:
2887 self.logger.error("new_vminstance rollback fail {}".format(e2))
2888
2889 self._format_exception(e)
2890
2891 @staticmethod
2892 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2893 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2894
2895 Args:
2896 created_items (dict): All created items belongs to VM
2897
2898 Returns:
2899 updated_created_items (dict): Dict which does not include keep flag for volumes.
2900
2901 """
2902 return {
2903 key.replace(":keep", ""): value for (key, value) in created_items.items()
2904 }
2905
2906 def get_vminstance(self, vm_id):
2907 """Returns the VM instance information from VIM"""
2908 return self._find_nova_server(vm_id)
2909
2910 def get_vminstance_console(self, vm_id, console_type="vnc"):
2911 """
2912 Get a console for the virtual machine
2913 Params:
2914 vm_id: uuid of the VM
2915 console_type, can be:
2916 "novnc" (by default), "xvpvnc" for VNC types,
2917 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2918 Returns dict with the console parameters:
2919 protocol: ssh, ftp, http, https, ...
2920 server: usually ip address
2921 port: the http, ssh, ... port
2922 suffix: extra text, e.g. the http path and query string
2923 """
2924 self.logger.debug("Getting VM CONSOLE from VIM")
2925
2926 try:
2927 self._reload_connection()
2928 server = self.nova.servers.find(id=vm_id)
2929
2930 if console_type is None or console_type == "novnc":
2931 console_dict = server.get_vnc_console("novnc")
2932 elif console_type == "xvpvnc":
2933 console_dict = server.get_vnc_console(console_type)
2934 elif console_type == "rdp-html5":
2935 console_dict = server.get_rdp_console(console_type)
2936 elif console_type == "spice-html5":
2937 console_dict = server.get_spice_console(console_type)
2938 else:
2939 raise vimconn.VimConnException(
2940 "console type '{}' not allowed".format(console_type),
2941 http_code=vimconn.HTTP_Bad_Request,
2942 )
2943
2944 console_dict1 = console_dict.get("console")
2945
2946 if console_dict1:
2947 console_url = console_dict1.get("url")
2948
2949 if console_url:
2950 # parse console_url
2951 protocol_index = console_url.find("//")
2952 suffix_index = (
2953 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2954 )
2955 port_index = (
2956 console_url[protocol_index + 2 : suffix_index].find(":")
2957 + protocol_index
2958 + 2
2959 )
2960
2961 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2962 return (
2963 -vimconn.HTTP_Internal_Server_Error,
2964 "Unexpected response from VIM",
2965 )
2966
2967 console_dict = {
2968 "protocol": console_url[0:protocol_index],
2969 "server": console_url[protocol_index + 2 : port_index],
2970 "port": console_url[port_index:suffix_index],
2971 "suffix": console_url[suffix_index + 1 :],
2972 }
2973 protocol_index += 2
2974
2975 return console_dict
2976 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2977 except (
2978 nvExceptions.NotFound,
2979 ksExceptions.ClientException,
2980 nvExceptions.ClientException,
2981 nvExceptions.BadRequest,
2982 ConnectionError,
2983 ) as e:
2984 self._format_exception(e)
2985
2986 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2987 """Neutron delete ports by id.
2988 Args:
2989 k_id (str): Port id in the VIM
2990 """
2991 try:
2992 self.neutron.delete_port(k_id)
2993
2994 except Exception as e:
2995 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2996
2997 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
2998 """Cinder delete volume by id.
2999 Args:
3000 shared_volume_vim_id (str): ID of shared volume in VIM
3001 """
3002 elapsed_time = 0
3003 try:
3004 while elapsed_time < server_timeout:
3005 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3006 if vol_status == "available":
3007 self.cinder.volumes.delete(shared_volume_vim_id)
3008 return True
3009
3010 time.sleep(5)
3011 elapsed_time += 5
3012
3013 if elapsed_time >= server_timeout:
3014 raise vimconn.VimConnException(
3015 "Timeout waiting for volume "
3016 + shared_volume_vim_id
3017 + " to be available",
3018 http_code=vimconn.HTTP_Request_Timeout,
3019 )
3020
3021 except Exception as e:
3022 self.logger.error(
3023 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3024 )
3025 self._format_exception(e)
3026
3027 def _delete_volumes_by_id_wth_cinder(
3028 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3029 ) -> bool:
3030 """Cinder delete volume by id.
3031 Args:
3032 k (str): Full item name in created_items
3033 k_id (str): ID of floating ip in VIM
3034 volumes_to_hold (list): Volumes not to delete
3035 created_items (dict): All created items belongs to VM
3036 """
3037 try:
3038 if k_id in volumes_to_hold:
3039 return
3040
3041 if self.cinder.volumes.get(k_id).status != "available":
3042 return True
3043
3044 else:
3045 self.cinder.volumes.delete(k_id)
3046 created_items[k] = None
3047
3048 except Exception as e:
3049 self.logger.error(
3050 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3051 )
3052
3053 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3054 """Neutron delete floating ip by id.
3055 Args:
3056 k (str): Full item name in created_items
3057 k_id (str): ID of floating ip in VIM
3058 created_items (dict): All created items belongs to VM
3059 """
3060 try:
3061 self.neutron.delete_floatingip(k_id)
3062 created_items[k] = None
3063
3064 except Exception as e:
3065 self.logger.error(
3066 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3067 )
3068
3069 @staticmethod
3070 def _get_item_name_id(k: str) -> Tuple[str, str]:
3071 k_item, _, k_id = k.partition(":")
3072 return k_item, k_id
3073
3074 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3075 """Delete VM ports attached to the networks before deleting virtual machine.
3076 Args:
3077 created_items (dict): All created items belongs to VM
3078 """
3079
3080 for k, v in created_items.items():
3081 if not v: # skip already deleted
3082 continue
3083
3084 try:
3085 k_item, k_id = self._get_item_name_id(k)
3086 if k_item == "port":
3087 self._delete_ports_by_id_wth_neutron(k_id)
3088
3089 except Exception as e:
3090 self.logger.error(
3091 "Error deleting port: {}: {}".format(type(e).__name__, e)
3092 )
3093
3094 def _delete_created_items(
3095 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3096 ) -> bool:
3097 """Delete Volumes and floating ip if they exist in created_items."""
3098 for k, v in created_items.items():
3099 if not v: # skip already deleted
3100 continue
3101
3102 try:
3103 k_item, k_id = self._get_item_name_id(k)
3104 if k_item == "volume":
3105 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3106 k, k_id, volumes_to_hold, created_items
3107 )
3108
3109 if unavailable_vol:
3110 keep_waiting = True
3111
3112 elif k_item == "floating_ip":
3113 self._delete_floating_ip_by_id(k, k_id, created_items)
3114
3115 except Exception as e:
3116 self.logger.error("Error deleting {}: {}".format(k, e))
3117
3118 return keep_waiting
3119
3120 @staticmethod
3121 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3122 """Remove the volumes which has key flag from created_items
3123
3124 Args:
3125 created_items (dict): All created items belongs to VM
3126
3127 Returns:
3128 created_items (dict): Persistent volumes eliminated created_items
3129 """
3130 return {
3131 key: value
3132 for (key, value) in created_items.items()
3133 if len(key.split(":")) == 2
3134 }
3135
3136 def delete_vminstance(
3137 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3138 ) -> None:
3139 """Removes a VM instance from VIM. Returns the old identifier.
3140 Args:
3141 vm_id (str): Identifier of VM instance
3142 created_items (dict): All created items belongs to VM
3143 volumes_to_hold (list): Volumes_to_hold
3144 """
3145 if created_items is None:
3146 created_items = {}
3147 if volumes_to_hold is None:
3148 volumes_to_hold = []
3149
3150 try:
3151 created_items = self._extract_items_wth_keep_flag_from_created_items(
3152 created_items
3153 )
3154
3155 self._reload_connection()
3156
3157 # Delete VM ports attached to the networks before the virtual machine
3158 if created_items:
3159 self._delete_vm_ports_attached_to_network(created_items)
3160
3161 if vm_id:
3162 self.nova.servers.delete(vm_id)
3163
3164 # Although having detached, volumes should have in active status before deleting.
3165 # We ensure in this loop
3166 keep_waiting = True
3167 elapsed_time = 0
3168
3169 while keep_waiting and elapsed_time < volume_timeout:
3170 keep_waiting = False
3171
3172 # Delete volumes and floating IP.
3173 keep_waiting = self._delete_created_items(
3174 created_items, volumes_to_hold, keep_waiting
3175 )
3176
3177 if keep_waiting:
3178 time.sleep(1)
3179 elapsed_time += 1
3180
3181 except (
3182 nvExceptions.NotFound,
3183 ksExceptions.ClientException,
3184 nvExceptions.ClientException,
3185 ConnectionError,
3186 ) as e:
3187 self._format_exception(e)
3188
3189 def refresh_vms_status(self, vm_list):
3190 """Get the status of the virtual machines and their interfaces/ports
3191 Params: the list of VM identifiers
3192 Returns a dictionary with:
3193 vm_id: #VIM id of this Virtual Machine
3194 status: #Mandatory. Text with one of:
3195 # DELETED (not found at vim)
3196 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3197 # OTHER (Vim reported other status not understood)
3198 # ERROR (VIM indicates an ERROR status)
3199 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3200 # CREATING (on building process), ERROR
3201 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3202 #
3203 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3204 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3205 interfaces:
3206 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3207 mac_address: #Text format XX:XX:XX:XX:XX:XX
3208 vim_net_id: #network id where this interface is connected
3209 vim_interface_id: #interface/port VIM id
3210 ip_address: #null, or text with IPv4, IPv6 address
3211 compute_node: #identification of compute node where PF,VF interface is allocated
3212 pci: #PCI address of the NIC that hosts the PF,VF
3213 vlan: #physical VLAN used for VF
3214 """
3215 vm_dict = {}
3216 self.logger.debug(
3217 "refresh_vms status: Getting tenant VM instance information from VIM"
3218 )
3219
3220 for vm_id in vm_list:
3221 vm = {}
3222
3223 try:
3224 vm_vim = self.get_vminstance(vm_id)
3225
3226 if vm_vim["status"] in vmStatus2manoFormat:
3227 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3228 else:
3229 vm["status"] = "OTHER"
3230 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3231
3232 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3233 vm_vim.pop("user_data", None)
3234 vm["vim_info"] = self.serialize(vm_vim)
3235
3236 vm["interfaces"] = []
3237 if vm_vim.get("fault"):
3238 vm["error_msg"] = str(vm_vim["fault"])
3239
3240 # get interfaces
3241 try:
3242 self._reload_connection()
3243 port_dict = self.neutron.list_ports(device_id=vm_id)
3244
3245 for port in port_dict["ports"]:
3246 interface = {}
3247 interface["vim_info"] = self.serialize(port)
3248 interface["mac_address"] = port.get("mac_address")
3249 interface["vim_net_id"] = port["network_id"]
3250 interface["vim_interface_id"] = port["id"]
3251 # check if OS-EXT-SRV-ATTR:host is there,
3252 # in case of non-admin credentials, it will be missing
3253
3254 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3255 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3256
3257 interface["pci"] = None
3258
3259 # check if binding:profile is there,
3260 # in case of non-admin credentials, it will be missing
3261 if port.get("binding:profile"):
3262 if port["binding:profile"].get("pci_slot"):
3263 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3264 # the slot to 0x00
3265 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3266 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3267 pci = port["binding:profile"]["pci_slot"]
3268 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3269 interface["pci"] = pci
3270
3271 interface["vlan"] = None
3272
3273 if port.get("binding:vif_details"):
3274 interface["vlan"] = port["binding:vif_details"].get("vlan")
3275
3276 # Get vlan from network in case not present in port for those old openstacks and cases where
3277 # it is needed vlan at PT
3278 if not interface["vlan"]:
3279 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3280 network = self.neutron.show_network(port["network_id"])
3281
3282 if (
3283 network["network"].get("provider:network_type")
3284 == "vlan"
3285 ):
3286 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3287 interface["vlan"] = network["network"].get(
3288 "provider:segmentation_id"
3289 )
3290
3291 ips = []
3292 # look for floating ip address
3293 try:
3294 floating_ip_dict = self.neutron.list_floatingips(
3295 port_id=port["id"]
3296 )
3297
3298 if floating_ip_dict.get("floatingips"):
3299 ips.append(
3300 floating_ip_dict["floatingips"][0].get(
3301 "floating_ip_address"
3302 )
3303 )
3304 except Exception:
3305 pass
3306
3307 for subnet in port["fixed_ips"]:
3308 ips.append(subnet["ip_address"])
3309
3310 interface["ip_address"] = ";".join(ips)
3311 vm["interfaces"].append(interface)
3312 except Exception as e:
3313 self.logger.error(
3314 "Error getting vm interface information {}: {}".format(
3315 type(e).__name__, e
3316 ),
3317 exc_info=True,
3318 )
3319 except vimconn.VimConnNotFoundException as e:
3320 self.logger.error("Exception getting vm status: %s", str(e))
3321 vm["status"] = "DELETED"
3322 vm["error_msg"] = str(e)
3323 except vimconn.VimConnException as e:
3324 self.logger.error("Exception getting vm status: %s", str(e))
3325 vm["status"] = "VIM_ERROR"
3326 vm["error_msg"] = str(e)
3327
3328 vm_dict[vm_id] = vm
3329
3330 return vm_dict
3331
3332 def action_vminstance(self, vm_id, action_dict, created_items={}):
3333 """Send and action over a VM instance from VIM
3334 Returns None or the console dict if the action was successfully sent to the VIM
3335 """
3336 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3337
3338 try:
3339 self._reload_connection()
3340 server = self.nova.servers.find(id=vm_id)
3341
3342 if "start" in action_dict:
3343 if action_dict["start"] == "rebuild":
3344 server.rebuild()
3345 else:
3346 if server.status == "PAUSED":
3347 server.unpause()
3348 elif server.status == "SUSPENDED":
3349 server.resume()
3350 elif server.status == "SHUTOFF":
3351 server.start()
3352 else:
3353 self.logger.debug(
3354 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3355 )
3356 raise vimconn.VimConnException(
3357 "Cannot 'start' instance while it is in active state",
3358 http_code=vimconn.HTTP_Bad_Request,
3359 )
3360
3361 elif "pause" in action_dict:
3362 server.pause()
3363 elif "resume" in action_dict:
3364 server.resume()
3365 elif "shutoff" in action_dict or "shutdown" in action_dict:
3366 self.logger.debug("server status %s", server.status)
3367 if server.status == "ACTIVE":
3368 server.stop()
3369 else:
3370 self.logger.debug("ERROR: VM is not in Active state")
3371 raise vimconn.VimConnException(
3372 "VM is not in active state, stop operation is not allowed",
3373 http_code=vimconn.HTTP_Bad_Request,
3374 )
3375 elif "forceOff" in action_dict:
3376 server.stop() # TODO
3377 elif "terminate" in action_dict:
3378 server.delete()
3379 elif "createImage" in action_dict:
3380 server.create_image()
3381 # "path":path_schema,
3382 # "description":description_schema,
3383 # "name":name_schema,
3384 # "metadata":metadata_schema,
3385 # "imageRef": id_schema,
3386 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3387 elif "rebuild" in action_dict:
3388 server.rebuild(server.image["id"])
3389 elif "reboot" in action_dict:
3390 server.reboot() # reboot_type="SOFT"
3391 elif "console" in action_dict:
3392 console_type = action_dict["console"]
3393
3394 if console_type is None or console_type == "novnc":
3395 console_dict = server.get_vnc_console("novnc")
3396 elif console_type == "xvpvnc":
3397 console_dict = server.get_vnc_console(console_type)
3398 elif console_type == "rdp-html5":
3399 console_dict = server.get_rdp_console(console_type)
3400 elif console_type == "spice-html5":
3401 console_dict = server.get_spice_console(console_type)
3402 else:
3403 raise vimconn.VimConnException(
3404 "console type '{}' not allowed".format(console_type),
3405 http_code=vimconn.HTTP_Bad_Request,
3406 )
3407
3408 try:
3409 console_url = console_dict["console"]["url"]
3410 # parse console_url
3411 protocol_index = console_url.find("//")
3412 suffix_index = (
3413 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3414 )
3415 port_index = (
3416 console_url[protocol_index + 2 : suffix_index].find(":")
3417 + protocol_index
3418 + 2
3419 )
3420
3421 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3422 raise vimconn.VimConnException(
3423 "Unexpected response from VIM " + str(console_dict)
3424 )
3425
3426 console_dict2 = {
3427 "protocol": console_url[0:protocol_index],
3428 "server": console_url[protocol_index + 2 : port_index],
3429 "port": int(console_url[port_index + 1 : suffix_index]),
3430 "suffix": console_url[suffix_index + 1 :],
3431 }
3432
3433 return console_dict2
3434 except Exception:
3435 raise vimconn.VimConnException(
3436 "Unexpected response from VIM " + str(console_dict)
3437 )
3438
3439 return None
3440 except (
3441 ksExceptions.ClientException,
3442 nvExceptions.ClientException,
3443 nvExceptions.NotFound,
3444 ConnectionError,
3445 ) as e:
3446 self._format_exception(e)
3447 # TODO insert exception vimconn.HTTP_Unauthorized
3448
3449 # ###### VIO Specific Changes #########
3450 def _generate_vlanID(self):
3451 """
3452 Method to get unused vlanID
3453 Args:
3454 None
3455 Returns:
3456 vlanID
3457 """
3458 # Get used VLAN IDs
3459 usedVlanIDs = []
3460 networks = self.get_network_list()
3461
3462 for net in networks:
3463 if net.get("provider:segmentation_id"):
3464 usedVlanIDs.append(net.get("provider:segmentation_id"))
3465
3466 used_vlanIDs = set(usedVlanIDs)
3467
3468 # find unused VLAN ID
3469 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3470 try:
3471 start_vlanid, end_vlanid = map(
3472 int, vlanID_range.replace(" ", "").split("-")
3473 )
3474
3475 for vlanID in range(start_vlanid, end_vlanid + 1):
3476 if vlanID not in used_vlanIDs:
3477 return vlanID
3478 except Exception as exp:
3479 raise vimconn.VimConnException(
3480 "Exception {} occurred while generating VLAN ID.".format(exp)
3481 )
3482 else:
3483 raise vimconn.VimConnConflictException(
3484 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3485 self.config.get("dataplane_net_vlan_range")
3486 )
3487 )
3488
3489 def _generate_multisegment_vlanID(self):
3490 """
3491 Method to get unused vlanID
3492 Args:
3493 None
3494 Returns:
3495 vlanID
3496 """
3497 # Get used VLAN IDs
3498 usedVlanIDs = []
3499 networks = self.get_network_list()
3500 for net in networks:
3501 if net.get("provider:network_type") == "vlan" and net.get(
3502 "provider:segmentation_id"
3503 ):
3504 usedVlanIDs.append(net.get("provider:segmentation_id"))
3505 elif net.get("segments"):
3506 for segment in net.get("segments"):
3507 if segment.get("provider:network_type") == "vlan" and segment.get(
3508 "provider:segmentation_id"
3509 ):
3510 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3511
3512 used_vlanIDs = set(usedVlanIDs)
3513
3514 # find unused VLAN ID
3515 for vlanID_range in self.config.get("multisegment_vlan_range"):
3516 try:
3517 start_vlanid, end_vlanid = map(
3518 int, vlanID_range.replace(" ", "").split("-")
3519 )
3520
3521 for vlanID in range(start_vlanid, end_vlanid + 1):
3522 if vlanID not in used_vlanIDs:
3523 return vlanID
3524 except Exception as exp:
3525 raise vimconn.VimConnException(
3526 "Exception {} occurred while generating VLAN ID.".format(exp)
3527 )
3528 else:
3529 raise vimconn.VimConnConflictException(
3530 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3531 self.config.get("multisegment_vlan_range")
3532 )
3533 )
3534
3535 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3536 """
3537 Method to validate user given vlanID ranges
3538 Args: None
3539 Returns: None
3540 """
3541 for vlanID_range in input_vlan_range:
3542 vlan_range = vlanID_range.replace(" ", "")
3543 # validate format
3544 vlanID_pattern = r"(\d)*-(\d)*$"
3545 match_obj = re.match(vlanID_pattern, vlan_range)
3546 if not match_obj:
3547 raise vimconn.VimConnConflictException(
3548 "Invalid VLAN range for {}: {}.You must provide "
3549 "'{}' in format [start_ID - end_ID].".format(
3550 text_vlan_range, vlanID_range, text_vlan_range
3551 )
3552 )
3553
3554 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3555 if start_vlanid <= 0:
3556 raise vimconn.VimConnConflictException(
3557 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3558 "networks valid IDs are 1 to 4094 ".format(
3559 text_vlan_range, vlanID_range
3560 )
3561 )
3562
3563 if end_vlanid > 4094:
3564 raise vimconn.VimConnConflictException(
3565 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3566 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3567 text_vlan_range, vlanID_range
3568 )
3569 )
3570
3571 if start_vlanid > end_vlanid:
3572 raise vimconn.VimConnConflictException(
3573 "Invalid VLAN range for {}: {}. You must provide '{}'"
3574 " in format start_ID - end_ID and start_ID < end_ID ".format(
3575 text_vlan_range, vlanID_range, text_vlan_range
3576 )
3577 )
3578
3579 def get_hosts_info(self):
3580 """Get the information of deployed hosts
3581 Returns the hosts content"""
3582 if self.debug:
3583 print("osconnector: Getting Host info from VIM")
3584
3585 try:
3586 h_list = []
3587 self._reload_connection()
3588 hypervisors = self.nova.hypervisors.list()
3589
3590 for hype in hypervisors:
3591 h_list.append(hype.to_dict())
3592
3593 return 1, {"hosts": h_list}
3594 except nvExceptions.NotFound as e:
3595 error_value = -vimconn.HTTP_Not_Found
3596 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3597 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3598 error_value = -vimconn.HTTP_Bad_Request
3599 error_text = (
3600 type(e).__name__
3601 + ": "
3602 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3603 )
3604
3605 # TODO insert exception vimconn.HTTP_Unauthorized
3606 # if reaching here is because an exception
3607 self.logger.debug("get_hosts_info " + error_text)
3608
3609 return error_value, error_text
3610
3611 def get_hosts(self, vim_tenant):
3612 """Get the hosts and deployed instances
3613 Returns the hosts content"""
3614 r, hype_dict = self.get_hosts_info()
3615
3616 if r < 0:
3617 return r, hype_dict
3618
3619 hypervisors = hype_dict["hosts"]
3620
3621 try:
3622 servers = self.nova.servers.list()
3623 for hype in hypervisors:
3624 for server in servers:
3625 if (
3626 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3627 == hype["hypervisor_hostname"]
3628 ):
3629 if "vm" in hype:
3630 hype["vm"].append(server.id)
3631 else:
3632 hype["vm"] = [server.id]
3633
3634 return 1, hype_dict
3635 except nvExceptions.NotFound as e:
3636 error_value = -vimconn.HTTP_Not_Found
3637 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3638 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3639 error_value = -vimconn.HTTP_Bad_Request
3640 error_text = (
3641 type(e).__name__
3642 + ": "
3643 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3644 )
3645
3646 # TODO insert exception vimconn.HTTP_Unauthorized
3647 # if reaching here is because an exception
3648 self.logger.debug("get_hosts " + error_text)
3649
3650 return error_value, error_text
3651
3652 def new_affinity_group(self, affinity_group_data):
3653 """Adds a server group to VIM
3654 affinity_group_data contains a dictionary with information, keys:
3655 name: name in VIM for the server group
3656 type: affinity or anti-affinity
3657 scope: Only nfvi-node allowed
3658 Returns the server group identifier"""
3659 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3660
3661 try:
3662 name = affinity_group_data["name"]
3663 policy = affinity_group_data["type"]
3664
3665 self._reload_connection()
3666 new_server_group = self.nova.server_groups.create(name, policy)
3667
3668 return new_server_group.id
3669 except (
3670 ksExceptions.ClientException,
3671 nvExceptions.ClientException,
3672 ConnectionError,
3673 KeyError,
3674 ) as e:
3675 self._format_exception(e)
3676
3677 def get_affinity_group(self, affinity_group_id):
3678 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3679 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3680 try:
3681 self._reload_connection()
3682 server_group = self.nova.server_groups.find(id=affinity_group_id)
3683
3684 return server_group.to_dict()
3685 except (
3686 nvExceptions.NotFound,
3687 nvExceptions.ClientException,
3688 ksExceptions.ClientException,
3689 ConnectionError,
3690 ) as e:
3691 self._format_exception(e)
3692
3693 def delete_affinity_group(self, affinity_group_id):
3694 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3695 self.logger.debug("Getting server group '%s'", affinity_group_id)
3696 try:
3697 self._reload_connection()
3698 self.nova.server_groups.delete(affinity_group_id)
3699
3700 return affinity_group_id
3701 except (
3702 nvExceptions.NotFound,
3703 ksExceptions.ClientException,
3704 nvExceptions.ClientException,
3705 ConnectionError,
3706 ) as e:
3707 self._format_exception(e)
3708
3709 def get_vdu_state(self, vm_id, host_is_required=False) -> list:
3710 """Getting the state of a VDU.
3711 Args:
3712 vm_id (str): ID of an instance
3713 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
3714 and if this is set to True, it raises KeyError.
3715 Returns:
3716 vdu_data (list): VDU details including state, flavor, host_info, AZ
3717 """
3718 self.logger.debug("Getting the status of VM")
3719 self.logger.debug("VIM VM ID %s", vm_id)
3720 try:
3721 self._reload_connection()
3722 server_dict = self._find_nova_server(vm_id)
3723 srv_attr = "OS-EXT-SRV-ATTR:host"
3724 host_info = (
3725 server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
3726 )
3727 vdu_data = [
3728 server_dict["status"],
3729 server_dict["flavor"]["id"],
3730 host_info,
3731 server_dict["OS-EXT-AZ:availability_zone"],
3732 ]
3733 self.logger.debug("vdu_data %s", vdu_data)
3734 return vdu_data
3735
3736 except Exception as e:
3737 self._format_exception(e)
3738
3739 def check_compute_availability(self, host, server_flavor_details):
3740 self._reload_connection()
3741 hypervisor_search = self.nova.hypervisors.search(
3742 hypervisor_match=host, servers=True
3743 )
3744 for hypervisor in hypervisor_search:
3745 hypervisor_id = hypervisor.to_dict()["id"]
3746 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3747 hypervisor_dict = hypervisor_details.to_dict()
3748 hypervisor_temp = json.dumps(hypervisor_dict)
3749 hypervisor_json = json.loads(hypervisor_temp)
3750 resources_available = [
3751 hypervisor_json["free_ram_mb"],
3752 hypervisor_json["disk_available_least"],
3753 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3754 ]
3755 compute_available = all(
3756 x > y for x, y in zip(resources_available, server_flavor_details)
3757 )
3758 if compute_available:
3759 return host
3760
3761 def check_availability_zone(
3762 self, old_az, server_flavor_details, old_host, host=None
3763 ):
3764 self._reload_connection()
3765 az_check = {"zone_check": False, "compute_availability": None}
3766 aggregates_list = self.nova.aggregates.list()
3767 for aggregate in aggregates_list:
3768 aggregate_details = aggregate.to_dict()
3769 aggregate_temp = json.dumps(aggregate_details)
3770 aggregate_json = json.loads(aggregate_temp)
3771 if aggregate_json["availability_zone"] == old_az:
3772 hosts_list = aggregate_json["hosts"]
3773 if host is not None:
3774 if host in hosts_list:
3775 az_check["zone_check"] = True
3776 available_compute_id = self.check_compute_availability(
3777 host, server_flavor_details
3778 )
3779 if available_compute_id is not None:
3780 az_check["compute_availability"] = available_compute_id
3781 else:
3782 for check_host in hosts_list:
3783 if check_host != old_host:
3784 available_compute_id = self.check_compute_availability(
3785 check_host, server_flavor_details
3786 )
3787 if available_compute_id is not None:
3788 az_check["zone_check"] = True
3789 az_check["compute_availability"] = available_compute_id
3790 break
3791 else:
3792 az_check["zone_check"] = True
3793 return az_check
3794
3795 def migrate_instance(self, vm_id, compute_host=None):
3796 """
3797 Migrate a vdu
3798 param:
3799 vm_id: ID of an instance
3800 compute_host: Host to migrate the vdu to
3801 """
3802 self._reload_connection()
3803 vm_state = False
3804 instance_state = self.get_vdu_state(vm_id, host_is_required=True)
3805 server_flavor_id = instance_state[1]
3806 server_hypervisor_name = instance_state[2]
3807 server_availability_zone = instance_state[3]
3808 try:
3809 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3810 server_flavor_details = [
3811 server_flavor["ram"],
3812 server_flavor["disk"],
3813 server_flavor["vcpus"],
3814 ]
3815 if compute_host == server_hypervisor_name:
3816 raise vimconn.VimConnException(
3817 "Unable to migrate instance '{}' to the same host '{}'".format(
3818 vm_id, compute_host
3819 ),
3820 http_code=vimconn.HTTP_Bad_Request,
3821 )
3822 az_status = self.check_availability_zone(
3823 server_availability_zone,
3824 server_flavor_details,
3825 server_hypervisor_name,
3826 compute_host,
3827 )
3828 availability_zone_check = az_status["zone_check"]
3829 available_compute_id = az_status.get("compute_availability")
3830
3831 if availability_zone_check is False:
3832 raise vimconn.VimConnException(
3833 "Unable to migrate instance '{}' to a different availability zone".format(
3834 vm_id
3835 ),
3836 http_code=vimconn.HTTP_Bad_Request,
3837 )
3838 if available_compute_id is not None:
3839 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
3840 self.nova.servers.live_migrate(
3841 server=vm_id,
3842 host=available_compute_id,
3843 block_migration=True,
3844 )
3845 state = "MIGRATING"
3846 changed_compute_host = ""
3847 if state == "MIGRATING":
3848 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3849 changed_compute_host = self.get_vdu_state(
3850 vm_id, host_is_required=True
3851 )[2]
3852 if vm_state and changed_compute_host == available_compute_id:
3853 self.logger.debug(
3854 "Instance '{}' migrated to the new compute host '{}'".format(
3855 vm_id, changed_compute_host
3856 )
3857 )
3858 return state, available_compute_id
3859 else:
3860 raise vimconn.VimConnException(
3861 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3862 vm_id, available_compute_id
3863 ),
3864 http_code=vimconn.HTTP_Bad_Request,
3865 )
3866 else:
3867 raise vimconn.VimConnException(
3868 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3869 available_compute_id
3870 ),
3871 http_code=vimconn.HTTP_Bad_Request,
3872 )
3873 except (
3874 nvExceptions.BadRequest,
3875 nvExceptions.ClientException,
3876 nvExceptions.NotFound,
3877 ) as e:
3878 self._format_exception(e)
3879
3880 def resize_instance(self, vm_id, new_flavor_id):
3881 """
3882 For resizing the vm based on the given
3883 flavor details
3884 param:
3885 vm_id : ID of an instance
3886 new_flavor_id : Flavor id to be resized
3887 Return the status of a resized instance
3888 """
3889 self._reload_connection()
3890 self.logger.debug("resize the flavor of an instance")
3891 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3892 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3893 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3894 try:
3895 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3896 if old_flavor_disk > new_flavor_disk:
3897 raise nvExceptions.BadRequest(
3898 400,
3899 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3900 )
3901 else:
3902 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3903 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3904 if vm_state:
3905 instance_resized_status = self.confirm_resize(vm_id)
3906 return instance_resized_status
3907 else:
3908 raise nvExceptions.BadRequest(
3909 409,
3910 message="Cannot 'resize' vm_state is in ERROR",
3911 )
3912
3913 else:
3914 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3915 raise nvExceptions.BadRequest(
3916 409,
3917 message="Cannot 'resize' instance while it is in vm_state resized",
3918 )
3919 except (
3920 nvExceptions.BadRequest,
3921 nvExceptions.ClientException,
3922 nvExceptions.NotFound,
3923 ) as e:
3924 self._format_exception(e)
3925
3926 def confirm_resize(self, vm_id):
3927 """
3928 Confirm the resize of an instance
3929 param:
3930 vm_id: ID of an instance
3931 """
3932 self._reload_connection()
3933 self.nova.servers.confirm_resize(server=vm_id)
3934 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3935 self.__wait_for_vm(vm_id, "ACTIVE")
3936 instance_status = self.get_vdu_state(vm_id)[0]
3937 return instance_status
3938
3939 def get_monitoring_data(self):
3940 try:
3941 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3942 self._reload_connection()
3943 all_servers = self.nova.servers.list(detailed=True)
3944 try:
3945 for server in all_servers:
3946 if server.flavor.get("original_name"):
3947 server.flavor["id"] = self.nova.flavors.find(
3948 name=server.flavor["original_name"]
3949 ).id
3950 except nClient.exceptions.NotFound as e:
3951 self.logger.warning(str(e.message))
3952 all_ports = self.neutron.list_ports()
3953 return all_servers, all_ports
3954 except (
3955 vimconn.VimConnException,
3956 vimconn.VimConnNotFoundException,
3957 vimconn.VimConnConnectionException,
3958 ) as e:
3959 raise vimconn.VimConnException(
3960 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3961 )