Feature 10979: Static IPv6 Dual Stack Assignment
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 raise vimconn.VimConnException(
571 type(exception).__name__ + ": " + message_error
572 )
573 elif isinstance(
574 exception,
575 (
576 nvExceptions.ClientException,
577 ksExceptions.ClientException,
578 neExceptions.NeutronException,
579 ),
580 ):
581 raise vimconn.VimConnUnexpectedResponse(
582 type(exception).__name__ + ": " + message_error
583 )
584 elif isinstance(exception, nvExceptions.Conflict):
585 raise vimconn.VimConnConflictException(
586 type(exception).__name__ + ": " + message_error
587 )
588 elif isinstance(exception, vimconn.VimConnException):
589 raise exception
590 else: # ()
591 self.logger.error("General Exception " + message_error, exc_info=True)
592
593 raise vimconn.VimConnConnectionException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 def _get_ids_from_name(self):
598 """
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600 :return: None
601 """
602 # get tenant_id if only tenant_name is supplied
603 self._reload_connection()
604
605 if not self.my_tenant_id:
606 raise vimconn.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self.tenant_name, self.tenant_id
609 )
610 )
611
612 if self.config.get("security_groups") and not self.security_groups_id:
613 # convert from name to id
614 neutron_sg_list = self.neutron.list_security_groups(
615 tenant_id=self.my_tenant_id
616 )["security_groups"]
617
618 self.security_groups_id = []
619 for sg in self.config.get("security_groups"):
620 for neutron_sg in neutron_sg_list:
621 if sg in (neutron_sg["id"], neutron_sg["name"]):
622 self.security_groups_id.append(neutron_sg["id"])
623 break
624 else:
625 self.security_groups_id = None
626
627 raise vimconn.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg)
629 )
630
631 def check_vim_connectivity(self):
632 # just get network list to check connectivity and credentials
633 self.get_network_list(filter_dict={})
634
635 def get_tenant_list(self, filter_dict={}):
636 """Obtain tenants of VIM
637 filter_dict can contain the following keys:
638 name: filter by tenant name
639 id: filter by tenant uuid/id
640 <other VIM specific>
641 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
642 """
643 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
644
645 try:
646 self._reload_connection()
647
648 if self.api_version3:
649 project_class_list = self.keystone.projects.list(
650 name=filter_dict.get("name")
651 )
652 else:
653 project_class_list = self.keystone.tenants.findall(**filter_dict)
654
655 project_list = []
656
657 for project in project_class_list:
658 if filter_dict.get("id") and filter_dict["id"] != project.id:
659 continue
660
661 project_list.append(project.to_dict())
662
663 return project_list
664 except (
665 ksExceptions.ConnectionError,
666 ksExceptions.ClientException,
667 ConnectionError,
668 ) as e:
669 self._format_exception(e)
670
671 def new_tenant(self, tenant_name, tenant_description):
672 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
673 self.logger.debug("Adding a new tenant name: %s", tenant_name)
674
675 try:
676 self._reload_connection()
677
678 if self.api_version3:
679 project = self.keystone.projects.create(
680 tenant_name,
681 self.config.get("project_domain_id", "default"),
682 description=tenant_description,
683 is_domain=False,
684 )
685 else:
686 project = self.keystone.tenants.create(tenant_name, tenant_description)
687
688 return project.id
689 except (
690 ksExceptions.ConnectionError,
691 ksExceptions.ClientException,
692 ksExceptions.BadRequest,
693 ConnectionError,
694 ) as e:
695 self._format_exception(e)
696
697 def delete_tenant(self, tenant_id):
698 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
699 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
700
701 try:
702 self._reload_connection()
703
704 if self.api_version3:
705 self.keystone.projects.delete(tenant_id)
706 else:
707 self.keystone.tenants.delete(tenant_id)
708
709 return tenant_id
710 except (
711 ksExceptions.ConnectionError,
712 ksExceptions.ClientException,
713 ksExceptions.NotFound,
714 ConnectionError,
715 ) as e:
716 self._format_exception(e)
717
718 def new_network(
719 self,
720 net_name,
721 net_type,
722 ip_profile=None,
723 shared=False,
724 provider_network_profile=None,
725 ):
726 """Adds a tenant network to VIM
727 Params:
728 'net_name': name of the network
729 'net_type': one of:
730 'bridge': overlay isolated network
731 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
732 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
733 'ip_profile': is a dict containing the IP parameters of the network
734 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
735 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
736 'gateway_address': (Optional) ip_schema, that is X.X.X.X
737 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
738 'dhcp_enabled': True or False
739 'dhcp_start_address': ip_schema, first IP to grant
740 'dhcp_count': number of IPs to grant.
741 'shared': if this network can be seen/use by other tenants/organization
742 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
743 physical-network: physnet-label}
744 Returns a tuple with the network identifier and created_items, or raises an exception on error
745 created_items can be None or a dictionary where this method can include key-values that will be passed to
746 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
747 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
748 as not present.
749 """
750 self.logger.debug(
751 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
752 )
753 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
754
755 try:
756 vlan = None
757
758 if provider_network_profile:
759 vlan = provider_network_profile.get("segmentation-id")
760
761 new_net = None
762 created_items = {}
763 self._reload_connection()
764 network_dict = {"name": net_name, "admin_state_up": True}
765
766 if net_type in ("data", "ptp") or provider_network_profile:
767 provider_physical_network = None
768
769 if provider_network_profile and provider_network_profile.get(
770 "physical-network"
771 ):
772 provider_physical_network = provider_network_profile.get(
773 "physical-network"
774 )
775
776 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
777 # or not declared, just ignore the checking
778 if (
779 isinstance(
780 self.config.get("dataplane_physical_net"), (tuple, list)
781 )
782 and provider_physical_network
783 not in self.config["dataplane_physical_net"]
784 ):
785 raise vimconn.VimConnConflictException(
786 "Invalid parameter 'provider-network:physical-network' "
787 "for network creation. '{}' is not one of the declared "
788 "list at VIM_config:dataplane_physical_net".format(
789 provider_physical_network
790 )
791 )
792
793 # use the default dataplane_physical_net
794 if not provider_physical_network:
795 provider_physical_network = self.config.get(
796 "dataplane_physical_net"
797 )
798
799 # if it is non empty list, use the first value. If it is a string use the value directly
800 if (
801 isinstance(provider_physical_network, (tuple, list))
802 and provider_physical_network
803 ):
804 provider_physical_network = provider_physical_network[0]
805
806 if not provider_physical_network:
807 raise vimconn.VimConnConflictException(
808 "missing information needed for underlay networks. Provide "
809 "'dataplane_physical_net' configuration at VIM or use the NS "
810 "instantiation parameter 'provider-network.physical-network'"
811 " for the VLD"
812 )
813
814 if not self.config.get("multisegment_support"):
815 network_dict[
816 "provider:physical_network"
817 ] = provider_physical_network
818
819 if (
820 provider_network_profile
821 and "network-type" in provider_network_profile
822 ):
823 network_dict[
824 "provider:network_type"
825 ] = provider_network_profile["network-type"]
826 else:
827 network_dict["provider:network_type"] = self.config.get(
828 "dataplane_network_type", "vlan"
829 )
830
831 if vlan:
832 network_dict["provider:segmentation_id"] = vlan
833 else:
834 # Multi-segment case
835 segment_list = []
836 segment1_dict = {
837 "provider:physical_network": "",
838 "provider:network_type": "vxlan",
839 }
840 segment_list.append(segment1_dict)
841 segment2_dict = {
842 "provider:physical_network": provider_physical_network,
843 "provider:network_type": "vlan",
844 }
845
846 if vlan:
847 segment2_dict["provider:segmentation_id"] = vlan
848 elif self.config.get("multisegment_vlan_range"):
849 vlanID = self._generate_multisegment_vlanID()
850 segment2_dict["provider:segmentation_id"] = vlanID
851
852 # else
853 # raise vimconn.VimConnConflictException(
854 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
855 # network")
856 segment_list.append(segment2_dict)
857 network_dict["segments"] = segment_list
858
859 # VIO Specific Changes. It needs a concrete VLAN
860 if self.vim_type == "VIO" and vlan is None:
861 if self.config.get("dataplane_net_vlan_range") is None:
862 raise vimconn.VimConnConflictException(
863 "You must provide 'dataplane_net_vlan_range' in format "
864 "[start_ID - end_ID] at VIM_config for creating underlay "
865 "networks"
866 )
867
868 network_dict["provider:segmentation_id"] = self._generate_vlanID()
869
870 network_dict["shared"] = shared
871
872 if self.config.get("disable_network_port_security"):
873 network_dict["port_security_enabled"] = False
874
875 if self.config.get("neutron_availability_zone_hints"):
876 hints = self.config.get("neutron_availability_zone_hints")
877
878 if isinstance(hints, str):
879 hints = [hints]
880
881 network_dict["availability_zone_hints"] = hints
882
883 new_net = self.neutron.create_network({"network": network_dict})
884 # print new_net
885 # create subnetwork, even if there is no profile
886
887 if not ip_profile:
888 ip_profile = {}
889
890 if not ip_profile.get("subnet_address"):
891 # Fake subnet is required
892 subnet_rand = random.SystemRandom().randint(0, 255)
893 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
894
895 if "ip_version" not in ip_profile:
896 ip_profile["ip_version"] = "IPv4"
897
898 subnet = {
899 "name": net_name + "-subnet",
900 "network_id": new_net["network"]["id"],
901 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
902 "cidr": ip_profile["subnet_address"],
903 }
904
905 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
906 if ip_profile.get("gateway_address"):
907 subnet["gateway_ip"] = ip_profile["gateway_address"]
908 else:
909 subnet["gateway_ip"] = None
910
911 if ip_profile.get("dns_address"):
912 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
913
914 if "dhcp_enabled" in ip_profile:
915 subnet["enable_dhcp"] = (
916 False
917 if ip_profile["dhcp_enabled"] == "false"
918 or ip_profile["dhcp_enabled"] is False
919 else True
920 )
921
922 if ip_profile.get("dhcp_start_address"):
923 subnet["allocation_pools"] = []
924 subnet["allocation_pools"].append(dict())
925 subnet["allocation_pools"][0]["start"] = ip_profile[
926 "dhcp_start_address"
927 ]
928
929 if ip_profile.get("dhcp_count"):
930 # parts = ip_profile["dhcp_start_address"].split(".")
931 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
932 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
933 ip_int += ip_profile["dhcp_count"] - 1
934 ip_str = str(netaddr.IPAddress(ip_int))
935 subnet["allocation_pools"][0]["end"] = ip_str
936
937 if (
938 ip_profile.get("ipv6_address_mode")
939 and ip_profile["ip_version"] != "IPv4"
940 ):
941 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
942 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
943 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
944 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
945
946 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
947 self.neutron.create_subnet({"subnet": subnet})
948
949 if net_type == "data" and self.config.get("multisegment_support"):
950 if self.config.get("l2gw_support"):
951 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
952 for l2gw in l2gw_list:
953 l2gw_conn = {
954 "l2_gateway_id": l2gw["id"],
955 "network_id": new_net["network"]["id"],
956 "segmentation_id": str(vlanID),
957 }
958 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
959 {"l2_gateway_connection": l2gw_conn}
960 )
961 created_items[
962 "l2gwconn:"
963 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
964 ] = True
965
966 return new_net["network"]["id"], created_items
967 except Exception as e:
968 # delete l2gw connections (if any) before deleting the network
969 for k, v in created_items.items():
970 if not v: # skip already deleted
971 continue
972
973 try:
974 k_item, _, k_id = k.partition(":")
975
976 if k_item == "l2gwconn":
977 self.neutron.delete_l2_gateway_connection(k_id)
978 except Exception as e2:
979 self.logger.error(
980 "Error deleting l2 gateway connection: {}: {}".format(
981 type(e2).__name__, e2
982 )
983 )
984
985 if new_net:
986 self.neutron.delete_network(new_net["network"]["id"])
987
988 self._format_exception(e)
989
990 def get_network_list(self, filter_dict={}):
991 """Obtain tenant networks of VIM
992 Filter_dict can be:
993 name: network name
994 id: network uuid
995 shared: boolean
996 tenant_id: tenant
997 admin_state_up: boolean
998 status: 'ACTIVE'
999 Returns the network list of dictionaries
1000 """
1001 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1002
1003 try:
1004 self._reload_connection()
1005 filter_dict_os = filter_dict.copy()
1006
1007 if self.api_version3 and "tenant_id" in filter_dict_os:
1008 # TODO check
1009 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1010
1011 net_dict = self.neutron.list_networks(**filter_dict_os)
1012 net_list = net_dict["networks"]
1013 self.__net_os2mano(net_list)
1014
1015 return net_list
1016 except (
1017 neExceptions.ConnectionFailed,
1018 ksExceptions.ClientException,
1019 neExceptions.NeutronException,
1020 ConnectionError,
1021 ) as e:
1022 self._format_exception(e)
1023
1024 def get_network(self, net_id):
1025 """Obtain details of network from VIM
1026 Returns the network information from a network id"""
1027 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1028 filter_dict = {"id": net_id}
1029 net_list = self.get_network_list(filter_dict)
1030
1031 if len(net_list) == 0:
1032 raise vimconn.VimConnNotFoundException(
1033 "Network '{}' not found".format(net_id)
1034 )
1035 elif len(net_list) > 1:
1036 raise vimconn.VimConnConflictException(
1037 "Found more than one network with this criteria"
1038 )
1039
1040 net = net_list[0]
1041 subnets = []
1042 for subnet_id in net.get("subnets", ()):
1043 try:
1044 subnet = self.neutron.show_subnet(subnet_id)
1045 except Exception as e:
1046 self.logger.error(
1047 "osconnector.get_network(): Error getting subnet %s %s"
1048 % (net_id, str(e))
1049 )
1050 subnet = {"id": subnet_id, "fault": str(e)}
1051
1052 subnets.append(subnet)
1053
1054 net["subnets"] = subnets
1055 net["encapsulation"] = net.get("provider:network_type")
1056 net["encapsulation_type"] = net.get("provider:network_type")
1057 net["segmentation_id"] = net.get("provider:segmentation_id")
1058 net["encapsulation_id"] = net.get("provider:segmentation_id")
1059
1060 return net
1061
1062 def delete_network(self, net_id, created_items=None):
1063 """
1064 Removes a tenant network from VIM and its associated elements
1065 :param net_id: VIM identifier of the network, provided by method new_network
1066 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1067 Returns the network identifier or raises an exception upon error or when network is not found
1068 """
1069 self.logger.debug("Deleting network '%s' from VIM", net_id)
1070
1071 if created_items is None:
1072 created_items = {}
1073
1074 try:
1075 self._reload_connection()
1076 # delete l2gw connections (if any) before deleting the network
1077 for k, v in created_items.items():
1078 if not v: # skip already deleted
1079 continue
1080
1081 try:
1082 k_item, _, k_id = k.partition(":")
1083 if k_item == "l2gwconn":
1084 self.neutron.delete_l2_gateway_connection(k_id)
1085 except Exception as e:
1086 self.logger.error(
1087 "Error deleting l2 gateway connection: {}: {}".format(
1088 type(e).__name__, e
1089 )
1090 )
1091
1092 # delete VM ports attached to this networks before the network
1093 ports = self.neutron.list_ports(network_id=net_id)
1094 for p in ports["ports"]:
1095 try:
1096 self.neutron.delete_port(p["id"])
1097 except Exception as e:
1098 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1099
1100 self.neutron.delete_network(net_id)
1101
1102 return net_id
1103 except (
1104 neExceptions.ConnectionFailed,
1105 neExceptions.NetworkNotFoundClient,
1106 neExceptions.NeutronException,
1107 ksExceptions.ClientException,
1108 neExceptions.NeutronException,
1109 ConnectionError,
1110 ) as e:
1111 self._format_exception(e)
1112
1113 def refresh_nets_status(self, net_list):
1114 """Get the status of the networks
1115 Params: the list of network identifiers
1116 Returns a dictionary with:
1117 net_id: #VIM id of this network
1118 status: #Mandatory. Text with one of:
1119 # DELETED (not found at vim)
1120 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1121 # OTHER (Vim reported other status not understood)
1122 # ERROR (VIM indicates an ERROR status)
1123 # ACTIVE, INACTIVE, DOWN (admin down),
1124 # BUILD (on building process)
1125 #
1126 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1127 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1128 """
1129 net_dict = {}
1130
1131 for net_id in net_list:
1132 net = {}
1133
1134 try:
1135 net_vim = self.get_network(net_id)
1136
1137 if net_vim["status"] in netStatus2manoFormat:
1138 net["status"] = netStatus2manoFormat[net_vim["status"]]
1139 else:
1140 net["status"] = "OTHER"
1141 net["error_msg"] = "VIM status reported " + net_vim["status"]
1142
1143 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1144 net["status"] = "DOWN"
1145
1146 net["vim_info"] = self.serialize(net_vim)
1147
1148 if net_vim.get("fault"): # TODO
1149 net["error_msg"] = str(net_vim["fault"])
1150 except vimconn.VimConnNotFoundException as e:
1151 self.logger.error("Exception getting net status: %s", str(e))
1152 net["status"] = "DELETED"
1153 net["error_msg"] = str(e)
1154 except vimconn.VimConnException as e:
1155 self.logger.error("Exception getting net status: %s", str(e))
1156 net["status"] = "VIM_ERROR"
1157 net["error_msg"] = str(e)
1158 net_dict[net_id] = net
1159 return net_dict
1160
1161 def get_flavor(self, flavor_id):
1162 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1163 self.logger.debug("Getting flavor '%s'", flavor_id)
1164
1165 try:
1166 self._reload_connection()
1167 flavor = self.nova.flavors.find(id=flavor_id)
1168 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1169
1170 return flavor.to_dict()
1171 except (
1172 nvExceptions.NotFound,
1173 nvExceptions.ClientException,
1174 ksExceptions.ClientException,
1175 ConnectionError,
1176 ) as e:
1177 self._format_exception(e)
1178
1179 def get_flavor_id_from_data(self, flavor_dict):
1180 """Obtain flavor id that match the flavor description
1181 Returns the flavor_id or raises a vimconnNotFoundException
1182 flavor_dict: contains the required ram, vcpus, disk
1183 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1184 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1185 vimconnNotFoundException is raised
1186 """
1187 exact_match = False if self.config.get("use_existing_flavors") else True
1188
1189 try:
1190 self._reload_connection()
1191 flavor_candidate_id = None
1192 flavor_candidate_data = (10000, 10000, 10000)
1193 flavor_target = (
1194 flavor_dict["ram"],
1195 flavor_dict["vcpus"],
1196 flavor_dict["disk"],
1197 flavor_dict.get("ephemeral", 0),
1198 flavor_dict.get("swap", 0),
1199 )
1200 # numa=None
1201 extended = flavor_dict.get("extended", {})
1202 if extended:
1203 # TODO
1204 raise vimconn.VimConnNotFoundException(
1205 "Flavor with EPA still not implemented"
1206 )
1207 # if len(numas) > 1:
1208 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1209 # numa=numas[0]
1210 # numas = extended.get("numas")
1211 for flavor in self.nova.flavors.list():
1212 epa = flavor.get_keys()
1213
1214 if epa:
1215 continue
1216 # TODO
1217
1218 flavor_data = (
1219 flavor.ram,
1220 flavor.vcpus,
1221 flavor.disk,
1222 flavor.ephemeral,
1223 flavor.swap if isinstance(flavor.swap, int) else 0,
1224 )
1225 if flavor_data == flavor_target:
1226 return flavor.id
1227 elif (
1228 not exact_match
1229 and flavor_target < flavor_data < flavor_candidate_data
1230 ):
1231 flavor_candidate_id = flavor.id
1232 flavor_candidate_data = flavor_data
1233
1234 if not exact_match and flavor_candidate_id:
1235 return flavor_candidate_id
1236
1237 raise vimconn.VimConnNotFoundException(
1238 "Cannot find any flavor matching '{}'".format(flavor_dict)
1239 )
1240 except (
1241 nvExceptions.NotFound,
1242 nvExceptions.ClientException,
1243 ksExceptions.ClientException,
1244 ConnectionError,
1245 ) as e:
1246 self._format_exception(e)
1247
1248 @staticmethod
1249 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1250 """Process resource quota and fill up extra_specs.
1251 Args:
1252 quota (dict): Keeping the quota of resurces
1253 prefix (str) Prefix
1254 extra_specs (dict) Dict to be filled to be used during flavor creation
1255
1256 """
1257 if "limit" in quota:
1258 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1259
1260 if "reserve" in quota:
1261 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1262
1263 if "shares" in quota:
1264 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1265 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1266
1267 @staticmethod
1268 def process_numa_memory(
1269 numa: dict, node_id: Optional[int], extra_specs: dict
1270 ) -> None:
1271 """Set the memory in extra_specs.
1272 Args:
1273 numa (dict): A dictionary which includes numa information
1274 node_id (int): ID of numa node
1275 extra_specs (dict): To be filled.
1276
1277 """
1278 if not numa.get("memory"):
1279 return
1280 memory_mb = numa["memory"] * 1024
1281 memory = "hw:numa_mem.{}".format(node_id)
1282 extra_specs[memory] = int(memory_mb)
1283
1284 @staticmethod
1285 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1286 """Set the cpu in extra_specs.
1287 Args:
1288 numa (dict): A dictionary which includes numa information
1289 node_id (int): ID of numa node
1290 extra_specs (dict): To be filled.
1291
1292 """
1293 if not numa.get("vcpu"):
1294 return
1295 vcpu = numa["vcpu"]
1296 cpu = "hw:numa_cpus.{}".format(node_id)
1297 vcpu = ",".join(map(str, vcpu))
1298 extra_specs[cpu] = vcpu
1299
1300 @staticmethod
1301 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1302 """Fill up extra_specs if numa has paired-threads.
1303 Args:
1304 numa (dict): A dictionary which includes numa information
1305 extra_specs (dict): To be filled.
1306
1307 Returns:
1308 threads (int) Number of virtual cpus
1309
1310 """
1311 if not numa.get("paired-threads"):
1312 return
1313
1314 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1315 threads = numa["paired-threads"] * 2
1316 extra_specs["hw:cpu_thread_policy"] = "require"
1317 extra_specs["hw:cpu_policy"] = "dedicated"
1318 return threads
1319
1320 @staticmethod
1321 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1322 """Fill up extra_specs if numa has cores.
1323 Args:
1324 numa (dict): A dictionary which includes numa information
1325 extra_specs (dict): To be filled.
1326
1327 Returns:
1328 cores (int) Number of virtual cpus
1329
1330 """
1331 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1332 # architecture, or a non-SMT architecture will be emulated
1333 if not numa.get("cores"):
1334 return
1335 cores = numa["cores"]
1336 extra_specs["hw:cpu_thread_policy"] = "isolate"
1337 extra_specs["hw:cpu_policy"] = "dedicated"
1338 return cores
1339
1340 @staticmethod
1341 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1342 """Fill up extra_specs if numa has threads.
1343 Args:
1344 numa (dict): A dictionary which includes numa information
1345 extra_specs (dict): To be filled.
1346
1347 Returns:
1348 threads (int) Number of virtual cpus
1349
1350 """
1351 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1352 if not numa.get("threads"):
1353 return
1354 threads = numa["threads"]
1355 extra_specs["hw:cpu_thread_policy"] = "prefer"
1356 extra_specs["hw:cpu_policy"] = "dedicated"
1357 return threads
1358
1359 def _process_numa_parameters_of_flavor(
1360 self, numas: List, extra_specs: Dict
1361 ) -> None:
1362 """Process numa parameters and fill up extra_specs.
1363
1364 Args:
1365 numas (list): List of dictionary which includes numa information
1366 extra_specs (dict): To be filled.
1367
1368 """
1369 numa_nodes = len(numas)
1370 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1371 cpu_cores, cpu_threads = 0, 0
1372
1373 if self.vim_type == "VIO":
1374 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1375
1376 for numa in numas:
1377 if "id" in numa:
1378 node_id = numa["id"]
1379 # overwrite ram and vcpus
1380 # check if key "memory" is present in numa else use ram value at flavor
1381 self.process_numa_memory(numa, node_id, extra_specs)
1382 self.process_numa_vcpu(numa, node_id, extra_specs)
1383
1384 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1385 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1386
1387 if "paired-threads" in numa:
1388 threads = self.process_numa_paired_threads(numa, extra_specs)
1389 cpu_threads += threads
1390
1391 elif "cores" in numa:
1392 cores = self.process_numa_cores(numa, extra_specs)
1393 cpu_cores += cores
1394
1395 elif "threads" in numa:
1396 threads = self.process_numa_threads(numa, extra_specs)
1397 cpu_threads += threads
1398
1399 if cpu_cores:
1400 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1401 if cpu_threads:
1402 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1403
1404 @staticmethod
1405 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1406 """According to number of numa nodes, updates the extra_specs for VIO.
1407
1408 Args:
1409
1410 numa_nodes (int): List keeps the numa node numbers
1411 extra_specs (dict): Extra specs dict to be updated
1412
1413 """
1414 # If there are several numas, we do not define specific affinity.
1415 extra_specs["vmware:latency_sensitivity_level"] = "high"
1416
1417 def _change_flavor_name(
1418 self, name: str, name_suffix: int, flavor_data: dict
1419 ) -> str:
1420 """Change the flavor name if the name already exists.
1421
1422 Args:
1423 name (str): Flavor name to be checked
1424 name_suffix (int): Suffix to be appended to name
1425 flavor_data (dict): Flavor dict
1426
1427 Returns:
1428 name (str): New flavor name to be used
1429
1430 """
1431 # Get used names
1432 fl = self.nova.flavors.list()
1433 fl_names = [f.name for f in fl]
1434
1435 while name in fl_names:
1436 name_suffix += 1
1437 name = flavor_data["name"] + "-" + str(name_suffix)
1438
1439 return name
1440
1441 def _process_extended_config_of_flavor(
1442 self, extended: dict, extra_specs: dict
1443 ) -> None:
1444 """Process the extended dict to fill up extra_specs.
1445 Args:
1446
1447 extended (dict): Keeping the extra specification of flavor
1448 extra_specs (dict) Dict to be filled to be used during flavor creation
1449
1450 """
1451 quotas = {
1452 "cpu-quota": "cpu",
1453 "mem-quota": "memory",
1454 "vif-quota": "vif",
1455 "disk-io-quota": "disk_io",
1456 }
1457
1458 page_sizes = {
1459 "LARGE": "large",
1460 "SMALL": "small",
1461 "SIZE_2MB": "2MB",
1462 "SIZE_1GB": "1GB",
1463 "PREFER_LARGE": "any",
1464 }
1465
1466 policies = {
1467 "cpu-pinning-policy": "hw:cpu_policy",
1468 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1469 "mem-policy": "hw:numa_mempolicy",
1470 }
1471
1472 numas = extended.get("numas")
1473 if numas:
1474 self._process_numa_parameters_of_flavor(numas, extra_specs)
1475
1476 for quota, item in quotas.items():
1477 if quota in extended.keys():
1478 self.process_resource_quota(extended.get(quota), item, extra_specs)
1479
1480 # Set the mempage size as specified in the descriptor
1481 if extended.get("mempage-size"):
1482 if extended["mempage-size"] in page_sizes.keys():
1483 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1484 else:
1485 # Normally, validations in NBI should not allow to this condition.
1486 self.logger.debug(
1487 "Invalid mempage-size %s. Will be ignored",
1488 extended.get("mempage-size"),
1489 )
1490
1491 for policy, hw_policy in policies.items():
1492 if extended.get(policy):
1493 extra_specs[hw_policy] = extended[policy].lower()
1494
1495 @staticmethod
1496 def _get_flavor_details(flavor_data: dict) -> Tuple:
1497 """Returns the details of flavor
1498 Args:
1499 flavor_data (dict): Dictionary that includes required flavor details
1500
1501 Returns:
1502 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1503
1504 """
1505 return (
1506 flavor_data.get("ram", 64),
1507 flavor_data.get("vcpus", 1),
1508 {},
1509 flavor_data.get("extended"),
1510 )
1511
1512 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1513 """Adds a tenant flavor to openstack VIM.
1514 if change_name_if_used is True, it will change name in case of conflict,
1515 because it is not supported name repetition.
1516
1517 Args:
1518 flavor_data (dict): Flavor details to be processed
1519 change_name_if_used (bool): Change name in case of conflict
1520
1521 Returns:
1522 flavor_id (str): flavor identifier
1523
1524 """
1525 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1526 retry = 0
1527 max_retries = 3
1528 name_suffix = 0
1529
1530 try:
1531 name = flavor_data["name"]
1532 while retry < max_retries:
1533 retry += 1
1534 try:
1535 self._reload_connection()
1536
1537 if change_name_if_used:
1538 name = self._change_flavor_name(name, name_suffix, flavor_data)
1539
1540 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1541 flavor_data
1542 )
1543 if extended:
1544 self._process_extended_config_of_flavor(extended, extra_specs)
1545
1546 # Create flavor
1547
1548 new_flavor = self.nova.flavors.create(
1549 name=name,
1550 ram=ram,
1551 vcpus=vcpus,
1552 disk=flavor_data.get("disk", 0),
1553 ephemeral=flavor_data.get("ephemeral", 0),
1554 swap=flavor_data.get("swap", 0),
1555 is_public=flavor_data.get("is_public", True),
1556 )
1557
1558 # Add metadata
1559 if extra_specs:
1560 new_flavor.set_keys(extra_specs)
1561
1562 return new_flavor.id
1563
1564 except nvExceptions.Conflict as e:
1565 if change_name_if_used and retry < max_retries:
1566 continue
1567
1568 self._format_exception(e)
1569
1570 except (
1571 ksExceptions.ClientException,
1572 nvExceptions.ClientException,
1573 ConnectionError,
1574 KeyError,
1575 ) as e:
1576 self._format_exception(e)
1577
1578 def delete_flavor(self, flavor_id):
1579 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1580 try:
1581 self._reload_connection()
1582 self.nova.flavors.delete(flavor_id)
1583
1584 return flavor_id
1585 # except nvExceptions.BadRequest as e:
1586 except (
1587 nvExceptions.NotFound,
1588 ksExceptions.ClientException,
1589 nvExceptions.ClientException,
1590 ConnectionError,
1591 ) as e:
1592 self._format_exception(e)
1593
1594 def new_image(self, image_dict):
1595 """
1596 Adds a tenant image to VIM. imge_dict is a dictionary with:
1597 name: name
1598 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1599 location: path or URI
1600 public: "yes" or "no"
1601 metadata: metadata of the image
1602 Returns the image_id
1603 """
1604 retry = 0
1605 max_retries = 3
1606
1607 while retry < max_retries:
1608 retry += 1
1609 try:
1610 self._reload_connection()
1611
1612 # determine format http://docs.openstack.org/developer/glance/formats.html
1613 if "disk_format" in image_dict:
1614 disk_format = image_dict["disk_format"]
1615 else: # autodiscover based on extension
1616 if image_dict["location"].endswith(".qcow2"):
1617 disk_format = "qcow2"
1618 elif image_dict["location"].endswith(".vhd"):
1619 disk_format = "vhd"
1620 elif image_dict["location"].endswith(".vmdk"):
1621 disk_format = "vmdk"
1622 elif image_dict["location"].endswith(".vdi"):
1623 disk_format = "vdi"
1624 elif image_dict["location"].endswith(".iso"):
1625 disk_format = "iso"
1626 elif image_dict["location"].endswith(".aki"):
1627 disk_format = "aki"
1628 elif image_dict["location"].endswith(".ari"):
1629 disk_format = "ari"
1630 elif image_dict["location"].endswith(".ami"):
1631 disk_format = "ami"
1632 else:
1633 disk_format = "raw"
1634
1635 self.logger.debug(
1636 "new_image: '%s' loading from '%s'",
1637 image_dict["name"],
1638 image_dict["location"],
1639 )
1640 if self.vim_type == "VIO":
1641 container_format = "bare"
1642 if "container_format" in image_dict:
1643 container_format = image_dict["container_format"]
1644
1645 new_image = self.glance.images.create(
1646 name=image_dict["name"],
1647 container_format=container_format,
1648 disk_format=disk_format,
1649 )
1650 else:
1651 new_image = self.glance.images.create(name=image_dict["name"])
1652
1653 if image_dict["location"].startswith("http"):
1654 # TODO there is not a method to direct download. It must be downloaded locally with requests
1655 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1656 else: # local path
1657 with open(image_dict["location"]) as fimage:
1658 self.glance.images.upload(new_image.id, fimage)
1659 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1660 # image_dict.get("public","yes")=="yes",
1661 # container_format="bare", data=fimage, disk_format=disk_format)
1662
1663 metadata_to_load = image_dict.get("metadata")
1664
1665 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1666 # for openstack
1667 if self.vim_type == "VIO":
1668 metadata_to_load["upload_location"] = image_dict["location"]
1669 else:
1670 metadata_to_load["location"] = image_dict["location"]
1671
1672 self.glance.images.update(new_image.id, **metadata_to_load)
1673
1674 return new_image.id
1675 except (
1676 nvExceptions.Conflict,
1677 ksExceptions.ClientException,
1678 nvExceptions.ClientException,
1679 ) as e:
1680 self._format_exception(e)
1681 except (
1682 HTTPException,
1683 gl1Exceptions.HTTPException,
1684 gl1Exceptions.CommunicationError,
1685 ConnectionError,
1686 ) as e:
1687 if retry == max_retries:
1688 continue
1689
1690 self._format_exception(e)
1691 except IOError as e: # can not open the file
1692 raise vimconn.VimConnConnectionException(
1693 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1694 http_code=vimconn.HTTP_Bad_Request,
1695 )
1696
1697 def delete_image(self, image_id):
1698 """Deletes a tenant image from openstack VIM. Returns the old id"""
1699 try:
1700 self._reload_connection()
1701 self.glance.images.delete(image_id)
1702
1703 return image_id
1704 except (
1705 nvExceptions.NotFound,
1706 ksExceptions.ClientException,
1707 nvExceptions.ClientException,
1708 gl1Exceptions.CommunicationError,
1709 gl1Exceptions.HTTPNotFound,
1710 ConnectionError,
1711 ) as e: # TODO remove
1712 self._format_exception(e)
1713
1714 def get_image_id_from_path(self, path):
1715 """Get the image id from image path in the VIM database. Returns the image_id"""
1716 try:
1717 self._reload_connection()
1718 images = self.glance.images.list()
1719
1720 for image in images:
1721 if image.metadata.get("location") == path:
1722 return image.id
1723
1724 raise vimconn.VimConnNotFoundException(
1725 "image with location '{}' not found".format(path)
1726 )
1727 except (
1728 ksExceptions.ClientException,
1729 nvExceptions.ClientException,
1730 gl1Exceptions.CommunicationError,
1731 ConnectionError,
1732 ) as e:
1733 self._format_exception(e)
1734
1735 def get_image_list(self, filter_dict={}):
1736 """Obtain tenant images from VIM
1737 Filter_dict can be:
1738 id: image id
1739 name: image name
1740 checksum: image checksum
1741 Returns the image list of dictionaries:
1742 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1743 List can be empty
1744 """
1745 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1746
1747 try:
1748 self._reload_connection()
1749 # filter_dict_os = filter_dict.copy()
1750 # First we filter by the available filter fields: name, id. The others are removed.
1751 image_list = self.glance.images.list()
1752 filtered_list = []
1753
1754 for image in image_list:
1755 try:
1756 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1757 continue
1758
1759 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1760 continue
1761
1762 if (
1763 filter_dict.get("checksum")
1764 and image["checksum"] != filter_dict["checksum"]
1765 ):
1766 continue
1767
1768 filtered_list.append(image.copy())
1769 except gl1Exceptions.HTTPNotFound:
1770 pass
1771
1772 return filtered_list
1773 except (
1774 ksExceptions.ClientException,
1775 nvExceptions.ClientException,
1776 gl1Exceptions.CommunicationError,
1777 ConnectionError,
1778 ) as e:
1779 self._format_exception(e)
1780
1781 def __wait_for_vm(self, vm_id, status):
1782 """wait until vm is in the desired status and return True.
1783 If the VM gets in ERROR status, return false.
1784 If the timeout is reached generate an exception"""
1785 elapsed_time = 0
1786 while elapsed_time < server_timeout:
1787 vm_status = self.nova.servers.get(vm_id).status
1788
1789 if vm_status == status:
1790 return True
1791
1792 if vm_status == "ERROR":
1793 return False
1794
1795 time.sleep(5)
1796 elapsed_time += 5
1797
1798 # if we exceeded the timeout rollback
1799 if elapsed_time >= server_timeout:
1800 raise vimconn.VimConnException(
1801 "Timeout waiting for instance " + vm_id + " to get " + status,
1802 http_code=vimconn.HTTP_Request_Timeout,
1803 )
1804
1805 def _get_openstack_availablity_zones(self):
1806 """
1807 Get from openstack availability zones available
1808 :return:
1809 """
1810 try:
1811 openstack_availability_zone = self.nova.availability_zones.list()
1812 openstack_availability_zone = [
1813 str(zone.zoneName)
1814 for zone in openstack_availability_zone
1815 if zone.zoneName != "internal"
1816 ]
1817
1818 return openstack_availability_zone
1819 except Exception:
1820 return None
1821
1822 def _set_availablity_zones(self):
1823 """
1824 Set vim availablity zone
1825 :return:
1826 """
1827 if "availability_zone" in self.config:
1828 vim_availability_zones = self.config.get("availability_zone")
1829
1830 if isinstance(vim_availability_zones, str):
1831 self.availability_zone = [vim_availability_zones]
1832 elif isinstance(vim_availability_zones, list):
1833 self.availability_zone = vim_availability_zones
1834 else:
1835 self.availability_zone = self._get_openstack_availablity_zones()
1836
1837 def _get_vm_availability_zone(
1838 self, availability_zone_index, availability_zone_list
1839 ):
1840 """
1841 Return thge availability zone to be used by the created VM.
1842 :return: The VIM availability zone to be used or None
1843 """
1844 if availability_zone_index is None:
1845 if not self.config.get("availability_zone"):
1846 return None
1847 elif isinstance(self.config.get("availability_zone"), str):
1848 return self.config["availability_zone"]
1849 else:
1850 # TODO consider using a different parameter at config for default AV and AV list match
1851 return self.config["availability_zone"][0]
1852
1853 vim_availability_zones = self.availability_zone
1854 # check if VIM offer enough availability zones describe in the VNFD
1855 if vim_availability_zones and len(availability_zone_list) <= len(
1856 vim_availability_zones
1857 ):
1858 # check if all the names of NFV AV match VIM AV names
1859 match_by_index = False
1860 for av in availability_zone_list:
1861 if av not in vim_availability_zones:
1862 match_by_index = True
1863 break
1864
1865 if match_by_index:
1866 return vim_availability_zones[availability_zone_index]
1867 else:
1868 return availability_zone_list[availability_zone_index]
1869 else:
1870 raise vimconn.VimConnConflictException(
1871 "No enough availability zones at VIM for this deployment"
1872 )
1873
1874 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1875 """Fill up the security_groups in the port_dict.
1876
1877 Args:
1878 net (dict): Network details
1879 port_dict (dict): Port details
1880
1881 """
1882 if (
1883 self.config.get("security_groups")
1884 and net.get("port_security") is not False
1885 and not self.config.get("no_port_security_extension")
1886 ):
1887 if not self.security_groups_id:
1888 self._get_ids_from_name()
1889
1890 port_dict["security_groups"] = self.security_groups_id
1891
1892 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1893 """Fill up the network binding depending on network type in the port_dict.
1894
1895 Args:
1896 net (dict): Network details
1897 port_dict (dict): Port details
1898
1899 """
1900 if not net.get("type"):
1901 raise vimconn.VimConnException("Type is missing in the network details.")
1902
1903 if net["type"] == "virtual":
1904 pass
1905
1906 # For VF
1907 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1908 port_dict["binding:vnic_type"] = "direct"
1909
1910 # VIO specific Changes
1911 if self.vim_type == "VIO":
1912 # Need to create port with port_security_enabled = False and no-security-groups
1913 port_dict["port_security_enabled"] = False
1914 port_dict["provider_security_groups"] = []
1915 port_dict["security_groups"] = []
1916
1917 else:
1918 # For PT PCI-PASSTHROUGH
1919 port_dict["binding:vnic_type"] = "direct-physical"
1920
1921 @staticmethod
1922 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1923 """Set the "ip" parameter in net dictionary.
1924
1925 Args:
1926 new_port (dict): New created port
1927 net (dict): Network details
1928
1929 """
1930 fixed_ips = new_port["port"].get("fixed_ips")
1931
1932 if fixed_ips:
1933 net["ip"] = fixed_ips[0].get("ip_address")
1934 else:
1935 net["ip"] = None
1936
1937 @staticmethod
1938 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1939 """Fill up the mac_address and fixed_ips in port_dict.
1940
1941 Args:
1942 net (dict): Network details
1943 port_dict (dict): Port details
1944
1945 """
1946 if net.get("mac_address"):
1947 port_dict["mac_address"] = net["mac_address"]
1948
1949 ip_dual_list = []
1950 if ip_list := net.get("ip_address"):
1951 if not isinstance(ip_list, list):
1952 ip_list = [ip_list]
1953 for ip in ip_list:
1954 ip_dict = {"ip_address": ip}
1955 ip_dual_list.append(ip_dict)
1956 port_dict["fixed_ips"] = ip_dual_list
1957 # TODO add "subnet_id": <subnet_id>
1958
1959 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1960 """Create new port using neutron.
1961
1962 Args:
1963 port_dict (dict): Port details
1964 created_items (dict): All created items
1965 net (dict): Network details
1966
1967 Returns:
1968 new_port (dict): New created port
1969
1970 """
1971 new_port = self.neutron.create_port({"port": port_dict})
1972 created_items["port:" + str(new_port["port"]["id"])] = True
1973 net["mac_address"] = new_port["port"]["mac_address"]
1974 net["vim_id"] = new_port["port"]["id"]
1975
1976 return new_port
1977
1978 def _create_port(
1979 self, net: dict, name: str, created_items: dict
1980 ) -> Tuple[dict, dict]:
1981 """Create port using net details.
1982
1983 Args:
1984 net (dict): Network details
1985 name (str): Name to be used as network name if net dict does not include name
1986 created_items (dict): All created items
1987
1988 Returns:
1989 new_port, port New created port, port dictionary
1990
1991 """
1992
1993 port_dict = {
1994 "network_id": net["net_id"],
1995 "name": net.get("name"),
1996 "admin_state_up": True,
1997 }
1998
1999 if not port_dict["name"]:
2000 port_dict["name"] = name
2001
2002 self._prepare_port_dict_security_groups(net, port_dict)
2003
2004 self._prepare_port_dict_binding(net, port_dict)
2005
2006 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2007
2008 new_port = self._create_new_port(port_dict, created_items, net)
2009
2010 vimconnector._set_fixed_ip(new_port, net)
2011
2012 port = {"port-id": new_port["port"]["id"]}
2013
2014 if float(self.nova.api_version.get_string()) >= 2.32:
2015 port["tag"] = new_port["port"]["name"]
2016
2017 return new_port, port
2018
2019 def _prepare_network_for_vminstance(
2020 self,
2021 name: str,
2022 net_list: list,
2023 created_items: dict,
2024 net_list_vim: list,
2025 external_network: list,
2026 no_secured_ports: list,
2027 ) -> None:
2028 """Create port and fill up net dictionary for new VM instance creation.
2029
2030 Args:
2031 name (str): Name of network
2032 net_list (list): List of networks
2033 created_items (dict): All created items belongs to a VM
2034 net_list_vim (list): List of ports
2035 external_network (list): List of external-networks
2036 no_secured_ports (list): Port security disabled ports
2037 """
2038
2039 self._reload_connection()
2040
2041 for net in net_list:
2042 # Skip non-connected iface
2043 if not net.get("net_id"):
2044 continue
2045
2046 new_port, port = self._create_port(net, name, created_items)
2047
2048 net_list_vim.append(port)
2049
2050 if net.get("floating_ip", False):
2051 net["exit_on_floating_ip_error"] = True
2052 external_network.append(net)
2053
2054 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2055 net["exit_on_floating_ip_error"] = False
2056 external_network.append(net)
2057 net["floating_ip"] = self.config.get("use_floating_ip")
2058
2059 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2060 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2061 if net.get("port_security") is False and not self.config.get(
2062 "no_port_security_extension"
2063 ):
2064 no_secured_ports.append(
2065 (
2066 new_port["port"]["id"],
2067 net.get("port_security_disable_strategy"),
2068 )
2069 )
2070
2071 def _prepare_persistent_root_volumes(
2072 self,
2073 name: str,
2074 vm_av_zone: list,
2075 disk: dict,
2076 base_disk_index: int,
2077 block_device_mapping: dict,
2078 existing_vim_volumes: list,
2079 created_items: dict,
2080 ) -> Optional[str]:
2081 """Prepare persistent root volumes for new VM instance.
2082
2083 Args:
2084 name (str): Name of VM instance
2085 vm_av_zone (list): List of availability zones
2086 disk (dict): Disk details
2087 base_disk_index (int): Disk index
2088 block_device_mapping (dict): Block device details
2089 existing_vim_volumes (list): Existing disk details
2090 created_items (dict): All created items belongs to VM
2091
2092 Returns:
2093 boot_volume_id (str): ID of boot volume
2094
2095 """
2096 # Disk may include only vim_volume_id or only vim_id."
2097 # Use existing persistent root volume finding with volume_id or vim_id
2098 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2099
2100 if disk.get(key_id):
2101 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2102 existing_vim_volumes.append({"id": disk[key_id]})
2103
2104 else:
2105 # Create persistent root volume
2106 volume = self.cinder.volumes.create(
2107 size=disk["size"],
2108 name=name + "vd" + chr(base_disk_index),
2109 imageRef=disk["image_id"],
2110 # Make sure volume is in the same AZ as the VM to be attached to
2111 availability_zone=vm_av_zone,
2112 )
2113 boot_volume_id = volume.id
2114 self.update_block_device_mapping(
2115 volume=volume,
2116 block_device_mapping=block_device_mapping,
2117 base_disk_index=base_disk_index,
2118 disk=disk,
2119 created_items=created_items,
2120 )
2121
2122 return boot_volume_id
2123
2124 @staticmethod
2125 def update_block_device_mapping(
2126 volume: object,
2127 block_device_mapping: dict,
2128 base_disk_index: int,
2129 disk: dict,
2130 created_items: dict,
2131 ) -> None:
2132 """Add volume information to block device mapping dict.
2133 Args:
2134 volume (object): Created volume object
2135 block_device_mapping (dict): Block device details
2136 base_disk_index (int): Disk index
2137 disk (dict): Disk details
2138 created_items (dict): All created items belongs to VM
2139 """
2140 if not volume:
2141 raise vimconn.VimConnException("Volume is empty.")
2142
2143 if not hasattr(volume, "id"):
2144 raise vimconn.VimConnException(
2145 "Created volume is not valid, does not have id attribute."
2146 )
2147
2148 volume_txt = "volume:" + str(volume.id)
2149 if disk.get("keep"):
2150 volume_txt += ":keep"
2151 created_items[volume_txt] = True
2152 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2153
2154 def _prepare_non_root_persistent_volumes(
2155 self,
2156 name: str,
2157 disk: dict,
2158 vm_av_zone: list,
2159 block_device_mapping: dict,
2160 base_disk_index: int,
2161 existing_vim_volumes: list,
2162 created_items: dict,
2163 ) -> None:
2164 """Prepare persistent volumes for new VM instance.
2165
2166 Args:
2167 name (str): Name of VM instance
2168 disk (dict): Disk details
2169 vm_av_zone (list): List of availability zones
2170 block_device_mapping (dict): Block device details
2171 base_disk_index (int): Disk index
2172 existing_vim_volumes (list): Existing disk details
2173 created_items (dict): All created items belongs to VM
2174 """
2175 # Non-root persistent volumes
2176 # Disk may include only vim_volume_id or only vim_id."
2177 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2178
2179 if disk.get(key_id):
2180 # Use existing persistent volume
2181 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2182 existing_vim_volumes.append({"id": disk[key_id]})
2183
2184 else:
2185 # Create persistent volume
2186 volume = self.cinder.volumes.create(
2187 size=disk["size"],
2188 name=name + "vd" + chr(base_disk_index),
2189 # Make sure volume is in the same AZ as the VM to be attached to
2190 availability_zone=vm_av_zone,
2191 )
2192 self.update_block_device_mapping(
2193 volume=volume,
2194 block_device_mapping=block_device_mapping,
2195 base_disk_index=base_disk_index,
2196 disk=disk,
2197 created_items=created_items,
2198 )
2199
2200 def _wait_for_created_volumes_availability(
2201 self, elapsed_time: int, created_items: dict
2202 ) -> Optional[int]:
2203 """Wait till created volumes become available.
2204
2205 Args:
2206 elapsed_time (int): Passed time while waiting
2207 created_items (dict): All created items belongs to VM
2208
2209 Returns:
2210 elapsed_time (int): Time spent while waiting
2211
2212 """
2213
2214 while elapsed_time < volume_timeout:
2215 for created_item in created_items:
2216 v, volume_id = (
2217 created_item.split(":")[0],
2218 created_item.split(":")[1],
2219 )
2220 if v == "volume":
2221 if self.cinder.volumes.get(volume_id).status != "available":
2222 break
2223 else:
2224 # All ready: break from while
2225 break
2226
2227 time.sleep(5)
2228 elapsed_time += 5
2229
2230 return elapsed_time
2231
2232 def _wait_for_existing_volumes_availability(
2233 self, elapsed_time: int, existing_vim_volumes: list
2234 ) -> Optional[int]:
2235 """Wait till existing volumes become available.
2236
2237 Args:
2238 elapsed_time (int): Passed time while waiting
2239 existing_vim_volumes (list): Existing volume details
2240
2241 Returns:
2242 elapsed_time (int): Time spent while waiting
2243
2244 """
2245
2246 while elapsed_time < volume_timeout:
2247 for volume in existing_vim_volumes:
2248 if self.cinder.volumes.get(volume["id"]).status != "available":
2249 break
2250 else: # all ready: break from while
2251 break
2252
2253 time.sleep(5)
2254 elapsed_time += 5
2255
2256 return elapsed_time
2257
2258 def _prepare_disk_for_vminstance(
2259 self,
2260 name: str,
2261 existing_vim_volumes: list,
2262 created_items: dict,
2263 vm_av_zone: list,
2264 block_device_mapping: dict,
2265 disk_list: list = None,
2266 ) -> None:
2267 """Prepare all volumes for new VM instance.
2268
2269 Args:
2270 name (str): Name of Instance
2271 existing_vim_volumes (list): List of existing volumes
2272 created_items (dict): All created items belongs to VM
2273 vm_av_zone (list): VM availability zone
2274 block_device_mapping (dict): Block devices to be attached to VM
2275 disk_list (list): List of disks
2276
2277 """
2278 # Create additional volumes in case these are present in disk_list
2279 base_disk_index = ord("b")
2280 boot_volume_id = None
2281 elapsed_time = 0
2282
2283 for disk in disk_list:
2284 if "image_id" in disk:
2285 # Root persistent volume
2286 base_disk_index = ord("a")
2287 boot_volume_id = self._prepare_persistent_root_volumes(
2288 name=name,
2289 vm_av_zone=vm_av_zone,
2290 disk=disk,
2291 base_disk_index=base_disk_index,
2292 block_device_mapping=block_device_mapping,
2293 existing_vim_volumes=existing_vim_volumes,
2294 created_items=created_items,
2295 )
2296 else:
2297 # Non-root persistent volume
2298 self._prepare_non_root_persistent_volumes(
2299 name=name,
2300 disk=disk,
2301 vm_av_zone=vm_av_zone,
2302 block_device_mapping=block_device_mapping,
2303 base_disk_index=base_disk_index,
2304 existing_vim_volumes=existing_vim_volumes,
2305 created_items=created_items,
2306 )
2307 base_disk_index += 1
2308
2309 # Wait until created volumes are with status available
2310 elapsed_time = self._wait_for_created_volumes_availability(
2311 elapsed_time, created_items
2312 )
2313 # Wait until existing volumes in vim are with status available
2314 elapsed_time = self._wait_for_existing_volumes_availability(
2315 elapsed_time, existing_vim_volumes
2316 )
2317 # If we exceeded the timeout rollback
2318 if elapsed_time >= volume_timeout:
2319 raise vimconn.VimConnException(
2320 "Timeout creating volumes for instance " + name,
2321 http_code=vimconn.HTTP_Request_Timeout,
2322 )
2323 if boot_volume_id:
2324 self.cinder.volumes.set_bootable(boot_volume_id, True)
2325
2326 def _find_the_external_network_for_floating_ip(self):
2327 """Get the external network ip in order to create floating IP.
2328
2329 Returns:
2330 pool_id (str): External network pool ID
2331
2332 """
2333
2334 # Find the external network
2335 external_nets = list()
2336
2337 for net in self.neutron.list_networks()["networks"]:
2338 if net["router:external"]:
2339 external_nets.append(net)
2340
2341 if len(external_nets) == 0:
2342 raise vimconn.VimConnException(
2343 "Cannot create floating_ip automatically since "
2344 "no external network is present",
2345 http_code=vimconn.HTTP_Conflict,
2346 )
2347
2348 if len(external_nets) > 1:
2349 raise vimconn.VimConnException(
2350 "Cannot create floating_ip automatically since "
2351 "multiple external networks are present",
2352 http_code=vimconn.HTTP_Conflict,
2353 )
2354
2355 # Pool ID
2356 return external_nets[0].get("id")
2357
2358 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2359 """Trigger neutron to create a new floating IP using external network ID.
2360
2361 Args:
2362 param (dict): Input parameters to create a floating IP
2363 created_items (dict): All created items belongs to new VM instance
2364
2365 Raises:
2366
2367 VimConnException
2368 """
2369 try:
2370 self.logger.debug("Creating floating IP")
2371 new_floating_ip = self.neutron.create_floatingip(param)
2372 free_floating_ip = new_floating_ip["floatingip"]["id"]
2373 created_items["floating_ip:" + str(free_floating_ip)] = True
2374
2375 except Exception as e:
2376 raise vimconn.VimConnException(
2377 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2378 http_code=vimconn.HTTP_Conflict,
2379 )
2380
2381 def _create_floating_ip(
2382 self, floating_network: dict, server: object, created_items: dict
2383 ) -> None:
2384 """Get the available Pool ID and create a new floating IP.
2385
2386 Args:
2387 floating_network (dict): Dict including external network ID
2388 server (object): Server object
2389 created_items (dict): All created items belongs to new VM instance
2390
2391 """
2392
2393 # Pool_id is available
2394 if (
2395 isinstance(floating_network["floating_ip"], str)
2396 and floating_network["floating_ip"].lower() != "true"
2397 ):
2398 pool_id = floating_network["floating_ip"]
2399
2400 # Find the Pool_id
2401 else:
2402 pool_id = self._find_the_external_network_for_floating_ip()
2403
2404 param = {
2405 "floatingip": {
2406 "floating_network_id": pool_id,
2407 "tenant_id": server.tenant_id,
2408 }
2409 }
2410
2411 self._neutron_create_float_ip(param, created_items)
2412
2413 def _find_floating_ip(
2414 self,
2415 server: object,
2416 floating_ips: list,
2417 floating_network: dict,
2418 ) -> Optional[str]:
2419 """Find the available free floating IPs if there are.
2420
2421 Args:
2422 server (object): Server object
2423 floating_ips (list): List of floating IPs
2424 floating_network (dict): Details of floating network such as ID
2425
2426 Returns:
2427 free_floating_ip (str): Free floating ip address
2428
2429 """
2430 for fip in floating_ips:
2431 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2432 continue
2433
2434 if isinstance(floating_network["floating_ip"], str):
2435 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2436 continue
2437
2438 return fip["id"]
2439
2440 def _assign_floating_ip(
2441 self, free_floating_ip: str, floating_network: dict
2442 ) -> Dict:
2443 """Assign the free floating ip address to port.
2444
2445 Args:
2446 free_floating_ip (str): Floating IP to be assigned
2447 floating_network (dict): ID of floating network
2448
2449 Returns:
2450 fip (dict) (dict): Floating ip details
2451
2452 """
2453 # The vim_id key contains the neutron.port_id
2454 self.neutron.update_floatingip(
2455 free_floating_ip,
2456 {"floatingip": {"port_id": floating_network["vim_id"]}},
2457 )
2458 # For race condition ensure not re-assigned to other VM after 5 seconds
2459 time.sleep(5)
2460
2461 return self.neutron.show_floatingip(free_floating_ip)
2462
2463 def _get_free_floating_ip(
2464 self, server: object, floating_network: dict
2465 ) -> Optional[str]:
2466 """Get the free floating IP address.
2467
2468 Args:
2469 server (object): Server Object
2470 floating_network (dict): Floating network details
2471
2472 Returns:
2473 free_floating_ip (str): Free floating ip addr
2474
2475 """
2476
2477 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2478
2479 # Randomize
2480 random.shuffle(floating_ips)
2481
2482 return self._find_floating_ip(server, floating_ips, floating_network)
2483
2484 def _prepare_external_network_for_vminstance(
2485 self,
2486 external_network: list,
2487 server: object,
2488 created_items: dict,
2489 vm_start_time: float,
2490 ) -> None:
2491 """Assign floating IP address for VM instance.
2492
2493 Args:
2494 external_network (list): ID of External network
2495 server (object): Server Object
2496 created_items (dict): All created items belongs to new VM instance
2497 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2498
2499 Raises:
2500 VimConnException
2501
2502 """
2503 for floating_network in external_network:
2504 try:
2505 assigned = False
2506 floating_ip_retries = 3
2507 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2508 # several times
2509 while not assigned:
2510 free_floating_ip = self._get_free_floating_ip(
2511 server, floating_network
2512 )
2513
2514 if not free_floating_ip:
2515 self._create_floating_ip(
2516 floating_network, server, created_items
2517 )
2518
2519 try:
2520 # For race condition ensure not already assigned
2521 fip = self.neutron.show_floatingip(free_floating_ip)
2522
2523 if fip["floatingip"].get("port_id"):
2524 continue
2525
2526 # Assign floating ip
2527 fip = self._assign_floating_ip(
2528 free_floating_ip, floating_network
2529 )
2530
2531 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2532 self.logger.warning(
2533 "floating_ip {} re-assigned to other port".format(
2534 free_floating_ip
2535 )
2536 )
2537 continue
2538
2539 self.logger.debug(
2540 "Assigned floating_ip {} to VM {}".format(
2541 free_floating_ip, server.id
2542 )
2543 )
2544
2545 assigned = True
2546
2547 except Exception as e:
2548 # Openstack need some time after VM creation to assign an IP. So retry if fails
2549 vm_status = self.nova.servers.get(server.id).status
2550
2551 if vm_status not in ("ACTIVE", "ERROR"):
2552 if time.time() - vm_start_time < server_timeout:
2553 time.sleep(5)
2554 continue
2555 elif floating_ip_retries > 0:
2556 floating_ip_retries -= 1
2557 continue
2558
2559 raise vimconn.VimConnException(
2560 "Cannot create floating_ip: {} {}".format(
2561 type(e).__name__, e
2562 ),
2563 http_code=vimconn.HTTP_Conflict,
2564 )
2565
2566 except Exception as e:
2567 if not floating_network["exit_on_floating_ip_error"]:
2568 self.logger.error("Cannot create floating_ip. %s", str(e))
2569 continue
2570
2571 raise
2572
2573 def _update_port_security_for_vminstance(
2574 self,
2575 no_secured_ports: list,
2576 server: object,
2577 ) -> None:
2578 """Updates the port security according to no_secured_ports list.
2579
2580 Args:
2581 no_secured_ports (list): List of ports that security will be disabled
2582 server (object): Server Object
2583
2584 Raises:
2585 VimConnException
2586
2587 """
2588 # Wait until the VM is active and then disable the port-security
2589 if no_secured_ports:
2590 self.__wait_for_vm(server.id, "ACTIVE")
2591
2592 for port in no_secured_ports:
2593 port_update = {
2594 "port": {"port_security_enabled": False, "security_groups": None}
2595 }
2596
2597 if port[1] == "allow-address-pairs":
2598 port_update = {
2599 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2600 }
2601
2602 try:
2603 self.neutron.update_port(port[0], port_update)
2604
2605 except Exception:
2606 raise vimconn.VimConnException(
2607 "It was not possible to disable port security for port {}".format(
2608 port[0]
2609 )
2610 )
2611
2612 def new_vminstance(
2613 self,
2614 name: str,
2615 description: str,
2616 start: bool,
2617 image_id: str,
2618 flavor_id: str,
2619 affinity_group_list: list,
2620 net_list: list,
2621 cloud_config=None,
2622 disk_list=None,
2623 availability_zone_index=None,
2624 availability_zone_list=None,
2625 ) -> tuple:
2626 """Adds a VM instance to VIM.
2627
2628 Args:
2629 name (str): name of VM
2630 description (str): description
2631 start (bool): indicates if VM must start or boot in pause mode. Ignored
2632 image_id (str) image uuid
2633 flavor_id (str) flavor uuid
2634 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2635 net_list (list): list of interfaces, each one is a dictionary with:
2636 name: name of network
2637 net_id: network uuid to connect
2638 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2639 model: interface model, ignored #TODO
2640 mac_address: used for SR-IOV ifaces #TODO for other types
2641 use: 'data', 'bridge', 'mgmt'
2642 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2643 vim_id: filled/added by this function
2644 floating_ip: True/False (or it can be None)
2645 port_security: True/False
2646 cloud_config (dict): (optional) dictionary with:
2647 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2648 users: (optional) list of users to be inserted, each item is a dict with:
2649 name: (mandatory) user name,
2650 key-pairs: (optional) list of strings with the public key to be inserted to the user
2651 user-data: (optional) string is a text script to be passed directly to cloud-init
2652 config-files: (optional). List of files to be transferred. Each item is a dict with:
2653 dest: (mandatory) string with the destination absolute path
2654 encoding: (optional, by default text). Can be one of:
2655 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2656 content : (mandatory) string with the content of the file
2657 permissions: (optional) string with file permissions, typically octal notation '0644'
2658 owner: (optional) file owner, string with the format 'owner:group'
2659 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2660 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2661 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2662 size: (mandatory) string with the size of the disk in GB
2663 vim_id: (optional) should use this existing volume id
2664 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2665 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2666 availability_zone_index is None
2667 #TODO ip, security groups
2668
2669 Returns:
2670 A tuple with the instance identifier and created_items or raises an exception on error
2671 created_items can be None or a dictionary where this method can include key-values that will be passed to
2672 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2673 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2674 as not present.
2675
2676 """
2677 self.logger.debug(
2678 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2679 image_id,
2680 flavor_id,
2681 str(net_list),
2682 )
2683
2684 try:
2685 server = None
2686 created_items = {}
2687 net_list_vim = []
2688 # list of external networks to be connected to instance, later on used to create floating_ip
2689 external_network = []
2690 # List of ports with port-security disabled
2691 no_secured_ports = []
2692 block_device_mapping = {}
2693 existing_vim_volumes = []
2694 server_group_id = None
2695 scheduller_hints = {}
2696
2697 # Check the Openstack Connection
2698 self._reload_connection()
2699
2700 # Prepare network list
2701 self._prepare_network_for_vminstance(
2702 name=name,
2703 net_list=net_list,
2704 created_items=created_items,
2705 net_list_vim=net_list_vim,
2706 external_network=external_network,
2707 no_secured_ports=no_secured_ports,
2708 )
2709
2710 # Cloud config
2711 config_drive, userdata = self._create_user_data(cloud_config)
2712
2713 # Get availability Zone
2714 vm_av_zone = self._get_vm_availability_zone(
2715 availability_zone_index, availability_zone_list
2716 )
2717
2718 if disk_list:
2719 # Prepare disks
2720 self._prepare_disk_for_vminstance(
2721 name=name,
2722 existing_vim_volumes=existing_vim_volumes,
2723 created_items=created_items,
2724 vm_av_zone=vm_av_zone,
2725 block_device_mapping=block_device_mapping,
2726 disk_list=disk_list,
2727 )
2728
2729 if affinity_group_list:
2730 # Only first id on the list will be used. Openstack restriction
2731 server_group_id = affinity_group_list[0]["affinity_group_id"]
2732 scheduller_hints["group"] = server_group_id
2733
2734 self.logger.debug(
2735 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2736 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2737 "block_device_mapping={}, server_group={})".format(
2738 name,
2739 image_id,
2740 flavor_id,
2741 net_list_vim,
2742 self.config.get("security_groups"),
2743 vm_av_zone,
2744 self.config.get("keypair"),
2745 userdata,
2746 config_drive,
2747 block_device_mapping,
2748 server_group_id,
2749 )
2750 )
2751
2752 # Create VM
2753 server = self.nova.servers.create(
2754 name=name,
2755 image=image_id,
2756 flavor=flavor_id,
2757 nics=net_list_vim,
2758 security_groups=self.config.get("security_groups"),
2759 # TODO remove security_groups in future versions. Already at neutron port
2760 availability_zone=vm_av_zone,
2761 key_name=self.config.get("keypair"),
2762 userdata=userdata,
2763 config_drive=config_drive,
2764 block_device_mapping=block_device_mapping,
2765 scheduler_hints=scheduller_hints,
2766 )
2767
2768 vm_start_time = time.time()
2769
2770 self._update_port_security_for_vminstance(no_secured_ports, server)
2771
2772 self._prepare_external_network_for_vminstance(
2773 external_network=external_network,
2774 server=server,
2775 created_items=created_items,
2776 vm_start_time=vm_start_time,
2777 )
2778
2779 return server.id, created_items
2780
2781 except Exception as e:
2782 server_id = None
2783 if server:
2784 server_id = server.id
2785
2786 try:
2787 created_items = self.remove_keep_tag_from_persistent_volumes(
2788 created_items
2789 )
2790
2791 self.delete_vminstance(server_id, created_items)
2792
2793 except Exception as e2:
2794 self.logger.error("new_vminstance rollback fail {}".format(e2))
2795
2796 self._format_exception(e)
2797
2798 @staticmethod
2799 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2800 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2801
2802 Args:
2803 created_items (dict): All created items belongs to VM
2804
2805 Returns:
2806 updated_created_items (dict): Dict which does not include keep flag for volumes.
2807
2808 """
2809 return {
2810 key.replace(":keep", ""): value for (key, value) in created_items.items()
2811 }
2812
2813 def get_vminstance(self, vm_id):
2814 """Returns the VM instance information from VIM"""
2815 # self.logger.debug("Getting VM from VIM")
2816 try:
2817 self._reload_connection()
2818 server = self.nova.servers.find(id=vm_id)
2819 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2820
2821 return server.to_dict()
2822 except (
2823 ksExceptions.ClientException,
2824 nvExceptions.ClientException,
2825 nvExceptions.NotFound,
2826 ConnectionError,
2827 ) as e:
2828 self._format_exception(e)
2829
2830 def get_vminstance_console(self, vm_id, console_type="vnc"):
2831 """
2832 Get a console for the virtual machine
2833 Params:
2834 vm_id: uuid of the VM
2835 console_type, can be:
2836 "novnc" (by default), "xvpvnc" for VNC types,
2837 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2838 Returns dict with the console parameters:
2839 protocol: ssh, ftp, http, https, ...
2840 server: usually ip address
2841 port: the http, ssh, ... port
2842 suffix: extra text, e.g. the http path and query string
2843 """
2844 self.logger.debug("Getting VM CONSOLE from VIM")
2845
2846 try:
2847 self._reload_connection()
2848 server = self.nova.servers.find(id=vm_id)
2849
2850 if console_type is None or console_type == "novnc":
2851 console_dict = server.get_vnc_console("novnc")
2852 elif console_type == "xvpvnc":
2853 console_dict = server.get_vnc_console(console_type)
2854 elif console_type == "rdp-html5":
2855 console_dict = server.get_rdp_console(console_type)
2856 elif console_type == "spice-html5":
2857 console_dict = server.get_spice_console(console_type)
2858 else:
2859 raise vimconn.VimConnException(
2860 "console type '{}' not allowed".format(console_type),
2861 http_code=vimconn.HTTP_Bad_Request,
2862 )
2863
2864 console_dict1 = console_dict.get("console")
2865
2866 if console_dict1:
2867 console_url = console_dict1.get("url")
2868
2869 if console_url:
2870 # parse console_url
2871 protocol_index = console_url.find("//")
2872 suffix_index = (
2873 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2874 )
2875 port_index = (
2876 console_url[protocol_index + 2 : suffix_index].find(":")
2877 + protocol_index
2878 + 2
2879 )
2880
2881 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2882 return (
2883 -vimconn.HTTP_Internal_Server_Error,
2884 "Unexpected response from VIM",
2885 )
2886
2887 console_dict = {
2888 "protocol": console_url[0:protocol_index],
2889 "server": console_url[protocol_index + 2 : port_index],
2890 "port": console_url[port_index:suffix_index],
2891 "suffix": console_url[suffix_index + 1 :],
2892 }
2893 protocol_index += 2
2894
2895 return console_dict
2896 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2897 except (
2898 nvExceptions.NotFound,
2899 ksExceptions.ClientException,
2900 nvExceptions.ClientException,
2901 nvExceptions.BadRequest,
2902 ConnectionError,
2903 ) as e:
2904 self._format_exception(e)
2905
2906 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2907 """Neutron delete ports by id.
2908 Args:
2909 k_id (str): Port id in the VIM
2910 """
2911 try:
2912 port_dict = self.neutron.list_ports()
2913 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2914
2915 if k_id in existing_ports:
2916 self.neutron.delete_port(k_id)
2917
2918 except Exception as e:
2919 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2920
2921 def _delete_volumes_by_id_wth_cinder(
2922 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2923 ) -> bool:
2924 """Cinder delete volume by id.
2925 Args:
2926 k (str): Full item name in created_items
2927 k_id (str): ID of floating ip in VIM
2928 volumes_to_hold (list): Volumes not to delete
2929 created_items (dict): All created items belongs to VM
2930 """
2931 try:
2932 if k_id in volumes_to_hold:
2933 return
2934
2935 if self.cinder.volumes.get(k_id).status != "available":
2936 return True
2937
2938 else:
2939 self.cinder.volumes.delete(k_id)
2940 created_items[k] = None
2941
2942 except Exception as e:
2943 self.logger.error(
2944 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2945 )
2946
2947 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2948 """Neutron delete floating ip by id.
2949 Args:
2950 k (str): Full item name in created_items
2951 k_id (str): ID of floating ip in VIM
2952 created_items (dict): All created items belongs to VM
2953 """
2954 try:
2955 self.neutron.delete_floatingip(k_id)
2956 created_items[k] = None
2957
2958 except Exception as e:
2959 self.logger.error(
2960 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2961 )
2962
2963 @staticmethod
2964 def _get_item_name_id(k: str) -> Tuple[str, str]:
2965 k_item, _, k_id = k.partition(":")
2966 return k_item, k_id
2967
2968 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2969 """Delete VM ports attached to the networks before deleting virtual machine.
2970 Args:
2971 created_items (dict): All created items belongs to VM
2972 """
2973
2974 for k, v in created_items.items():
2975 if not v: # skip already deleted
2976 continue
2977
2978 try:
2979 k_item, k_id = self._get_item_name_id(k)
2980 if k_item == "port":
2981 self._delete_ports_by_id_wth_neutron(k_id)
2982
2983 except Exception as e:
2984 self.logger.error(
2985 "Error deleting port: {}: {}".format(type(e).__name__, e)
2986 )
2987
2988 def _delete_created_items(
2989 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2990 ) -> bool:
2991 """Delete Volumes and floating ip if they exist in created_items."""
2992 for k, v in created_items.items():
2993 if not v: # skip already deleted
2994 continue
2995
2996 try:
2997 k_item, k_id = self._get_item_name_id(k)
2998
2999 if k_item == "volume":
3000 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3001 k, k_id, volumes_to_hold, created_items
3002 )
3003
3004 if unavailable_vol:
3005 keep_waiting = True
3006
3007 elif k_item == "floating_ip":
3008 self._delete_floating_ip_by_id(k, k_id, created_items)
3009
3010 except Exception as e:
3011 self.logger.error("Error deleting {}: {}".format(k, e))
3012
3013 return keep_waiting
3014
3015 @staticmethod
3016 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3017 """Remove the volumes which has key flag from created_items
3018
3019 Args:
3020 created_items (dict): All created items belongs to VM
3021
3022 Returns:
3023 created_items (dict): Persistent volumes eliminated created_items
3024 """
3025 return {
3026 key: value
3027 for (key, value) in created_items.items()
3028 if len(key.split(":")) == 2
3029 }
3030
3031 def delete_vminstance(
3032 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3033 ) -> None:
3034 """Removes a VM instance from VIM. Returns the old identifier.
3035 Args:
3036 vm_id (str): Identifier of VM instance
3037 created_items (dict): All created items belongs to VM
3038 volumes_to_hold (list): Volumes_to_hold
3039 """
3040 if created_items is None:
3041 created_items = {}
3042 if volumes_to_hold is None:
3043 volumes_to_hold = []
3044
3045 try:
3046 created_items = self._extract_items_wth_keep_flag_from_created_items(
3047 created_items
3048 )
3049
3050 self._reload_connection()
3051
3052 # Delete VM ports attached to the networks before the virtual machine
3053 if created_items:
3054 self._delete_vm_ports_attached_to_network(created_items)
3055
3056 if vm_id:
3057 self.nova.servers.delete(vm_id)
3058
3059 # Although having detached, volumes should have in active status before deleting.
3060 # We ensure in this loop
3061 keep_waiting = True
3062 elapsed_time = 0
3063
3064 while keep_waiting and elapsed_time < volume_timeout:
3065 keep_waiting = False
3066
3067 # Delete volumes and floating IP.
3068 keep_waiting = self._delete_created_items(
3069 created_items, volumes_to_hold, keep_waiting
3070 )
3071
3072 if keep_waiting:
3073 time.sleep(1)
3074 elapsed_time += 1
3075
3076 except (
3077 nvExceptions.NotFound,
3078 ksExceptions.ClientException,
3079 nvExceptions.ClientException,
3080 ConnectionError,
3081 ) as e:
3082 self._format_exception(e)
3083
3084 def refresh_vms_status(self, vm_list):
3085 """Get the status of the virtual machines and their interfaces/ports
3086 Params: the list of VM identifiers
3087 Returns a dictionary with:
3088 vm_id: #VIM id of this Virtual Machine
3089 status: #Mandatory. Text with one of:
3090 # DELETED (not found at vim)
3091 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3092 # OTHER (Vim reported other status not understood)
3093 # ERROR (VIM indicates an ERROR status)
3094 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3095 # CREATING (on building process), ERROR
3096 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3097 #
3098 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3099 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3100 interfaces:
3101 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3102 mac_address: #Text format XX:XX:XX:XX:XX:XX
3103 vim_net_id: #network id where this interface is connected
3104 vim_interface_id: #interface/port VIM id
3105 ip_address: #null, or text with IPv4, IPv6 address
3106 compute_node: #identification of compute node where PF,VF interface is allocated
3107 pci: #PCI address of the NIC that hosts the PF,VF
3108 vlan: #physical VLAN used for VF
3109 """
3110 vm_dict = {}
3111 self.logger.debug(
3112 "refresh_vms status: Getting tenant VM instance information from VIM"
3113 )
3114
3115 for vm_id in vm_list:
3116 vm = {}
3117
3118 try:
3119 vm_vim = self.get_vminstance(vm_id)
3120
3121 if vm_vim["status"] in vmStatus2manoFormat:
3122 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3123 else:
3124 vm["status"] = "OTHER"
3125 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3126
3127 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3128 vm_vim.pop("user_data", None)
3129 vm["vim_info"] = self.serialize(vm_vim)
3130
3131 vm["interfaces"] = []
3132 if vm_vim.get("fault"):
3133 vm["error_msg"] = str(vm_vim["fault"])
3134
3135 # get interfaces
3136 try:
3137 self._reload_connection()
3138 port_dict = self.neutron.list_ports(device_id=vm_id)
3139
3140 for port in port_dict["ports"]:
3141 interface = {}
3142 interface["vim_info"] = self.serialize(port)
3143 interface["mac_address"] = port.get("mac_address")
3144 interface["vim_net_id"] = port["network_id"]
3145 interface["vim_interface_id"] = port["id"]
3146 # check if OS-EXT-SRV-ATTR:host is there,
3147 # in case of non-admin credentials, it will be missing
3148
3149 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3150 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3151
3152 interface["pci"] = None
3153
3154 # check if binding:profile is there,
3155 # in case of non-admin credentials, it will be missing
3156 if port.get("binding:profile"):
3157 if port["binding:profile"].get("pci_slot"):
3158 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3159 # the slot to 0x00
3160 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3161 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3162 pci = port["binding:profile"]["pci_slot"]
3163 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3164 interface["pci"] = pci
3165
3166 interface["vlan"] = None
3167
3168 if port.get("binding:vif_details"):
3169 interface["vlan"] = port["binding:vif_details"].get("vlan")
3170
3171 # Get vlan from network in case not present in port for those old openstacks and cases where
3172 # it is needed vlan at PT
3173 if not interface["vlan"]:
3174 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3175 network = self.neutron.show_network(port["network_id"])
3176
3177 if (
3178 network["network"].get("provider:network_type")
3179 == "vlan"
3180 ):
3181 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3182 interface["vlan"] = network["network"].get(
3183 "provider:segmentation_id"
3184 )
3185
3186 ips = []
3187 # look for floating ip address
3188 try:
3189 floating_ip_dict = self.neutron.list_floatingips(
3190 port_id=port["id"]
3191 )
3192
3193 if floating_ip_dict.get("floatingips"):
3194 ips.append(
3195 floating_ip_dict["floatingips"][0].get(
3196 "floating_ip_address"
3197 )
3198 )
3199 except Exception:
3200 pass
3201
3202 for subnet in port["fixed_ips"]:
3203 ips.append(subnet["ip_address"])
3204
3205 interface["ip_address"] = ";".join(ips)
3206 vm["interfaces"].append(interface)
3207 except Exception as e:
3208 self.logger.error(
3209 "Error getting vm interface information {}: {}".format(
3210 type(e).__name__, e
3211 ),
3212 exc_info=True,
3213 )
3214 except vimconn.VimConnNotFoundException as e:
3215 self.logger.error("Exception getting vm status: %s", str(e))
3216 vm["status"] = "DELETED"
3217 vm["error_msg"] = str(e)
3218 except vimconn.VimConnException as e:
3219 self.logger.error("Exception getting vm status: %s", str(e))
3220 vm["status"] = "VIM_ERROR"
3221 vm["error_msg"] = str(e)
3222
3223 vm_dict[vm_id] = vm
3224
3225 return vm_dict
3226
3227 def action_vminstance(self, vm_id, action_dict, created_items={}):
3228 """Send and action over a VM instance from VIM
3229 Returns None or the console dict if the action was successfully sent to the VIM
3230 """
3231 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3232
3233 try:
3234 self._reload_connection()
3235 server = self.nova.servers.find(id=vm_id)
3236
3237 if "start" in action_dict:
3238 if action_dict["start"] == "rebuild":
3239 server.rebuild()
3240 else:
3241 if server.status == "PAUSED":
3242 server.unpause()
3243 elif server.status == "SUSPENDED":
3244 server.resume()
3245 elif server.status == "SHUTOFF":
3246 server.start()
3247 else:
3248 self.logger.debug(
3249 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3250 )
3251 raise vimconn.VimConnException(
3252 "Cannot 'start' instance while it is in active state",
3253 http_code=vimconn.HTTP_Bad_Request,
3254 )
3255
3256 elif "pause" in action_dict:
3257 server.pause()
3258 elif "resume" in action_dict:
3259 server.resume()
3260 elif "shutoff" in action_dict or "shutdown" in action_dict:
3261 self.logger.debug("server status %s", server.status)
3262 if server.status == "ACTIVE":
3263 server.stop()
3264 else:
3265 self.logger.debug("ERROR: VM is not in Active state")
3266 raise vimconn.VimConnException(
3267 "VM is not in active state, stop operation is not allowed",
3268 http_code=vimconn.HTTP_Bad_Request,
3269 )
3270 elif "forceOff" in action_dict:
3271 server.stop() # TODO
3272 elif "terminate" in action_dict:
3273 server.delete()
3274 elif "createImage" in action_dict:
3275 server.create_image()
3276 # "path":path_schema,
3277 # "description":description_schema,
3278 # "name":name_schema,
3279 # "metadata":metadata_schema,
3280 # "imageRef": id_schema,
3281 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3282 elif "rebuild" in action_dict:
3283 server.rebuild(server.image["id"])
3284 elif "reboot" in action_dict:
3285 server.reboot() # reboot_type="SOFT"
3286 elif "console" in action_dict:
3287 console_type = action_dict["console"]
3288
3289 if console_type is None or console_type == "novnc":
3290 console_dict = server.get_vnc_console("novnc")
3291 elif console_type == "xvpvnc":
3292 console_dict = server.get_vnc_console(console_type)
3293 elif console_type == "rdp-html5":
3294 console_dict = server.get_rdp_console(console_type)
3295 elif console_type == "spice-html5":
3296 console_dict = server.get_spice_console(console_type)
3297 else:
3298 raise vimconn.VimConnException(
3299 "console type '{}' not allowed".format(console_type),
3300 http_code=vimconn.HTTP_Bad_Request,
3301 )
3302
3303 try:
3304 console_url = console_dict["console"]["url"]
3305 # parse console_url
3306 protocol_index = console_url.find("//")
3307 suffix_index = (
3308 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3309 )
3310 port_index = (
3311 console_url[protocol_index + 2 : suffix_index].find(":")
3312 + protocol_index
3313 + 2
3314 )
3315
3316 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3317 raise vimconn.VimConnException(
3318 "Unexpected response from VIM " + str(console_dict)
3319 )
3320
3321 console_dict2 = {
3322 "protocol": console_url[0:protocol_index],
3323 "server": console_url[protocol_index + 2 : port_index],
3324 "port": int(console_url[port_index + 1 : suffix_index]),
3325 "suffix": console_url[suffix_index + 1 :],
3326 }
3327
3328 return console_dict2
3329 except Exception:
3330 raise vimconn.VimConnException(
3331 "Unexpected response from VIM " + str(console_dict)
3332 )
3333
3334 return None
3335 except (
3336 ksExceptions.ClientException,
3337 nvExceptions.ClientException,
3338 nvExceptions.NotFound,
3339 ConnectionError,
3340 ) as e:
3341 self._format_exception(e)
3342 # TODO insert exception vimconn.HTTP_Unauthorized
3343
3344 # ###### VIO Specific Changes #########
3345 def _generate_vlanID(self):
3346 """
3347 Method to get unused vlanID
3348 Args:
3349 None
3350 Returns:
3351 vlanID
3352 """
3353 # Get used VLAN IDs
3354 usedVlanIDs = []
3355 networks = self.get_network_list()
3356
3357 for net in networks:
3358 if net.get("provider:segmentation_id"):
3359 usedVlanIDs.append(net.get("provider:segmentation_id"))
3360
3361 used_vlanIDs = set(usedVlanIDs)
3362
3363 # find unused VLAN ID
3364 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3365 try:
3366 start_vlanid, end_vlanid = map(
3367 int, vlanID_range.replace(" ", "").split("-")
3368 )
3369
3370 for vlanID in range(start_vlanid, end_vlanid + 1):
3371 if vlanID not in used_vlanIDs:
3372 return vlanID
3373 except Exception as exp:
3374 raise vimconn.VimConnException(
3375 "Exception {} occurred while generating VLAN ID.".format(exp)
3376 )
3377 else:
3378 raise vimconn.VimConnConflictException(
3379 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3380 self.config.get("dataplane_net_vlan_range")
3381 )
3382 )
3383
3384 def _generate_multisegment_vlanID(self):
3385 """
3386 Method to get unused vlanID
3387 Args:
3388 None
3389 Returns:
3390 vlanID
3391 """
3392 # Get used VLAN IDs
3393 usedVlanIDs = []
3394 networks = self.get_network_list()
3395 for net in networks:
3396 if net.get("provider:network_type") == "vlan" and net.get(
3397 "provider:segmentation_id"
3398 ):
3399 usedVlanIDs.append(net.get("provider:segmentation_id"))
3400 elif net.get("segments"):
3401 for segment in net.get("segments"):
3402 if segment.get("provider:network_type") == "vlan" and segment.get(
3403 "provider:segmentation_id"
3404 ):
3405 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3406
3407 used_vlanIDs = set(usedVlanIDs)
3408
3409 # find unused VLAN ID
3410 for vlanID_range in self.config.get("multisegment_vlan_range"):
3411 try:
3412 start_vlanid, end_vlanid = map(
3413 int, vlanID_range.replace(" ", "").split("-")
3414 )
3415
3416 for vlanID in range(start_vlanid, end_vlanid + 1):
3417 if vlanID not in used_vlanIDs:
3418 return vlanID
3419 except Exception as exp:
3420 raise vimconn.VimConnException(
3421 "Exception {} occurred while generating VLAN ID.".format(exp)
3422 )
3423 else:
3424 raise vimconn.VimConnConflictException(
3425 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3426 self.config.get("multisegment_vlan_range")
3427 )
3428 )
3429
3430 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3431 """
3432 Method to validate user given vlanID ranges
3433 Args: None
3434 Returns: None
3435 """
3436 for vlanID_range in input_vlan_range:
3437 vlan_range = vlanID_range.replace(" ", "")
3438 # validate format
3439 vlanID_pattern = r"(\d)*-(\d)*$"
3440 match_obj = re.match(vlanID_pattern, vlan_range)
3441 if not match_obj:
3442 raise vimconn.VimConnConflictException(
3443 "Invalid VLAN range for {}: {}.You must provide "
3444 "'{}' in format [start_ID - end_ID].".format(
3445 text_vlan_range, vlanID_range, text_vlan_range
3446 )
3447 )
3448
3449 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3450 if start_vlanid <= 0:
3451 raise vimconn.VimConnConflictException(
3452 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3453 "networks valid IDs are 1 to 4094 ".format(
3454 text_vlan_range, vlanID_range
3455 )
3456 )
3457
3458 if end_vlanid > 4094:
3459 raise vimconn.VimConnConflictException(
3460 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3461 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3462 text_vlan_range, vlanID_range
3463 )
3464 )
3465
3466 if start_vlanid > end_vlanid:
3467 raise vimconn.VimConnConflictException(
3468 "Invalid VLAN range for {}: {}. You must provide '{}'"
3469 " in format start_ID - end_ID and start_ID < end_ID ".format(
3470 text_vlan_range, vlanID_range, text_vlan_range
3471 )
3472 )
3473
3474 def get_hosts_info(self):
3475 """Get the information of deployed hosts
3476 Returns the hosts content"""
3477 if self.debug:
3478 print("osconnector: Getting Host info from VIM")
3479
3480 try:
3481 h_list = []
3482 self._reload_connection()
3483 hypervisors = self.nova.hypervisors.list()
3484
3485 for hype in hypervisors:
3486 h_list.append(hype.to_dict())
3487
3488 return 1, {"hosts": h_list}
3489 except nvExceptions.NotFound as e:
3490 error_value = -vimconn.HTTP_Not_Found
3491 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3492 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3493 error_value = -vimconn.HTTP_Bad_Request
3494 error_text = (
3495 type(e).__name__
3496 + ": "
3497 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3498 )
3499
3500 # TODO insert exception vimconn.HTTP_Unauthorized
3501 # if reaching here is because an exception
3502 self.logger.debug("get_hosts_info " + error_text)
3503
3504 return error_value, error_text
3505
3506 def get_hosts(self, vim_tenant):
3507 """Get the hosts and deployed instances
3508 Returns the hosts content"""
3509 r, hype_dict = self.get_hosts_info()
3510
3511 if r < 0:
3512 return r, hype_dict
3513
3514 hypervisors = hype_dict["hosts"]
3515
3516 try:
3517 servers = self.nova.servers.list()
3518 for hype in hypervisors:
3519 for server in servers:
3520 if (
3521 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3522 == hype["hypervisor_hostname"]
3523 ):
3524 if "vm" in hype:
3525 hype["vm"].append(server.id)
3526 else:
3527 hype["vm"] = [server.id]
3528
3529 return 1, hype_dict
3530 except nvExceptions.NotFound as e:
3531 error_value = -vimconn.HTTP_Not_Found
3532 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3533 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3534 error_value = -vimconn.HTTP_Bad_Request
3535 error_text = (
3536 type(e).__name__
3537 + ": "
3538 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3539 )
3540
3541 # TODO insert exception vimconn.HTTP_Unauthorized
3542 # if reaching here is because an exception
3543 self.logger.debug("get_hosts " + error_text)
3544
3545 return error_value, error_text
3546
3547 def new_affinity_group(self, affinity_group_data):
3548 """Adds a server group to VIM
3549 affinity_group_data contains a dictionary with information, keys:
3550 name: name in VIM for the server group
3551 type: affinity or anti-affinity
3552 scope: Only nfvi-node allowed
3553 Returns the server group identifier"""
3554 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3555
3556 try:
3557 name = affinity_group_data["name"]
3558 policy = affinity_group_data["type"]
3559
3560 self._reload_connection()
3561 new_server_group = self.nova.server_groups.create(name, policy)
3562
3563 return new_server_group.id
3564 except (
3565 ksExceptions.ClientException,
3566 nvExceptions.ClientException,
3567 ConnectionError,
3568 KeyError,
3569 ) as e:
3570 self._format_exception(e)
3571
3572 def get_affinity_group(self, affinity_group_id):
3573 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3574 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3575 try:
3576 self._reload_connection()
3577 server_group = self.nova.server_groups.find(id=affinity_group_id)
3578
3579 return server_group.to_dict()
3580 except (
3581 nvExceptions.NotFound,
3582 nvExceptions.ClientException,
3583 ksExceptions.ClientException,
3584 ConnectionError,
3585 ) as e:
3586 self._format_exception(e)
3587
3588 def delete_affinity_group(self, affinity_group_id):
3589 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3590 self.logger.debug("Getting server group '%s'", affinity_group_id)
3591 try:
3592 self._reload_connection()
3593 self.nova.server_groups.delete(affinity_group_id)
3594
3595 return affinity_group_id
3596 except (
3597 nvExceptions.NotFound,
3598 ksExceptions.ClientException,
3599 nvExceptions.ClientException,
3600 ConnectionError,
3601 ) as e:
3602 self._format_exception(e)
3603
3604 def get_vdu_state(self, vm_id):
3605 """
3606 Getting the state of a vdu
3607 param:
3608 vm_id: ID of an instance
3609 """
3610 self.logger.debug("Getting the status of VM")
3611 self.logger.debug("VIM VM ID %s", vm_id)
3612 self._reload_connection()
3613 server = self.nova.servers.find(id=vm_id)
3614 server_dict = server.to_dict()
3615 vdu_data = [
3616 server_dict["status"],
3617 server_dict["flavor"]["id"],
3618 server_dict["OS-EXT-SRV-ATTR:host"],
3619 server_dict["OS-EXT-AZ:availability_zone"],
3620 ]
3621 self.logger.debug("vdu_data %s", vdu_data)
3622 return vdu_data
3623
3624 def check_compute_availability(self, host, server_flavor_details):
3625 self._reload_connection()
3626 hypervisor_search = self.nova.hypervisors.search(
3627 hypervisor_match=host, servers=True
3628 )
3629 for hypervisor in hypervisor_search:
3630 hypervisor_id = hypervisor.to_dict()["id"]
3631 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3632 hypervisor_dict = hypervisor_details.to_dict()
3633 hypervisor_temp = json.dumps(hypervisor_dict)
3634 hypervisor_json = json.loads(hypervisor_temp)
3635 resources_available = [
3636 hypervisor_json["free_ram_mb"],
3637 hypervisor_json["disk_available_least"],
3638 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3639 ]
3640 compute_available = all(
3641 x > y for x, y in zip(resources_available, server_flavor_details)
3642 )
3643 if compute_available:
3644 return host
3645
3646 def check_availability_zone(
3647 self, old_az, server_flavor_details, old_host, host=None
3648 ):
3649 self._reload_connection()
3650 az_check = {"zone_check": False, "compute_availability": None}
3651 aggregates_list = self.nova.aggregates.list()
3652 for aggregate in aggregates_list:
3653 aggregate_details = aggregate.to_dict()
3654 aggregate_temp = json.dumps(aggregate_details)
3655 aggregate_json = json.loads(aggregate_temp)
3656 if aggregate_json["availability_zone"] == old_az:
3657 hosts_list = aggregate_json["hosts"]
3658 if host is not None:
3659 if host in hosts_list:
3660 az_check["zone_check"] = True
3661 available_compute_id = self.check_compute_availability(
3662 host, server_flavor_details
3663 )
3664 if available_compute_id is not None:
3665 az_check["compute_availability"] = available_compute_id
3666 else:
3667 for check_host in hosts_list:
3668 if check_host != old_host:
3669 available_compute_id = self.check_compute_availability(
3670 check_host, server_flavor_details
3671 )
3672 if available_compute_id is not None:
3673 az_check["zone_check"] = True
3674 az_check["compute_availability"] = available_compute_id
3675 break
3676 else:
3677 az_check["zone_check"] = True
3678 return az_check
3679
3680 def migrate_instance(self, vm_id, compute_host=None):
3681 """
3682 Migrate a vdu
3683 param:
3684 vm_id: ID of an instance
3685 compute_host: Host to migrate the vdu to
3686 """
3687 self._reload_connection()
3688 vm_state = False
3689 instance_state = self.get_vdu_state(vm_id)
3690 server_flavor_id = instance_state[1]
3691 server_hypervisor_name = instance_state[2]
3692 server_availability_zone = instance_state[3]
3693 try:
3694 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3695 server_flavor_details = [
3696 server_flavor["ram"],
3697 server_flavor["disk"],
3698 server_flavor["vcpus"],
3699 ]
3700 if compute_host == server_hypervisor_name:
3701 raise vimconn.VimConnException(
3702 "Unable to migrate instance '{}' to the same host '{}'".format(
3703 vm_id, compute_host
3704 ),
3705 http_code=vimconn.HTTP_Bad_Request,
3706 )
3707 az_status = self.check_availability_zone(
3708 server_availability_zone,
3709 server_flavor_details,
3710 server_hypervisor_name,
3711 compute_host,
3712 )
3713 availability_zone_check = az_status["zone_check"]
3714 available_compute_id = az_status.get("compute_availability")
3715
3716 if availability_zone_check is False:
3717 raise vimconn.VimConnException(
3718 "Unable to migrate instance '{}' to a different availability zone".format(
3719 vm_id
3720 ),
3721 http_code=vimconn.HTTP_Bad_Request,
3722 )
3723 if available_compute_id is not None:
3724 self.nova.servers.live_migrate(
3725 server=vm_id,
3726 host=available_compute_id,
3727 block_migration=True,
3728 disk_over_commit=False,
3729 )
3730 state = "MIGRATING"
3731 changed_compute_host = ""
3732 if state == "MIGRATING":
3733 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3734 changed_compute_host = self.get_vdu_state(vm_id)[2]
3735 if vm_state and changed_compute_host == available_compute_id:
3736 self.logger.debug(
3737 "Instance '{}' migrated to the new compute host '{}'".format(
3738 vm_id, changed_compute_host
3739 )
3740 )
3741 return state, available_compute_id
3742 else:
3743 raise vimconn.VimConnException(
3744 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3745 vm_id, available_compute_id
3746 ),
3747 http_code=vimconn.HTTP_Bad_Request,
3748 )
3749 else:
3750 raise vimconn.VimConnException(
3751 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3752 available_compute_id
3753 ),
3754 http_code=vimconn.HTTP_Bad_Request,
3755 )
3756 except (
3757 nvExceptions.BadRequest,
3758 nvExceptions.ClientException,
3759 nvExceptions.NotFound,
3760 ) as e:
3761 self._format_exception(e)
3762
3763 def resize_instance(self, vm_id, new_flavor_id):
3764 """
3765 For resizing the vm based on the given
3766 flavor details
3767 param:
3768 vm_id : ID of an instance
3769 new_flavor_id : Flavor id to be resized
3770 Return the status of a resized instance
3771 """
3772 self._reload_connection()
3773 self.logger.debug("resize the flavor of an instance")
3774 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3775 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3776 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3777 try:
3778 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3779 if old_flavor_disk > new_flavor_disk:
3780 raise nvExceptions.BadRequest(
3781 400,
3782 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3783 )
3784 else:
3785 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3786 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3787 if vm_state:
3788 instance_resized_status = self.confirm_resize(vm_id)
3789 return instance_resized_status
3790 else:
3791 raise nvExceptions.BadRequest(
3792 409,
3793 message="Cannot 'resize' vm_state is in ERROR",
3794 )
3795
3796 else:
3797 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3798 raise nvExceptions.BadRequest(
3799 409,
3800 message="Cannot 'resize' instance while it is in vm_state resized",
3801 )
3802 except (
3803 nvExceptions.BadRequest,
3804 nvExceptions.ClientException,
3805 nvExceptions.NotFound,
3806 ) as e:
3807 self._format_exception(e)
3808
3809 def confirm_resize(self, vm_id):
3810 """
3811 Confirm the resize of an instance
3812 param:
3813 vm_id: ID of an instance
3814 """
3815 self._reload_connection()
3816 self.nova.servers.confirm_resize(server=vm_id)
3817 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3818 self.__wait_for_vm(vm_id, "ACTIVE")
3819 instance_status = self.get_vdu_state(vm_id)[0]
3820 return instance_status
3821
3822 def get_monitoring_data(self):
3823 try:
3824 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3825 self._reload_connection()
3826 all_servers = self.nova.servers.list(detailed=True)
3827 all_ports = self.neutron.list_ports()
3828 return all_servers, all_ports
3829 except (
3830 vimconn.VimConnException,
3831 vimconn.VimConnNotFoundException,
3832 vimconn.VimConnConnectionException,
3833 ) as e:
3834 raise vimconn.VimConnException(
3835 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3836 )