Feature 10972: Support of volume multi-attach
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.60"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 raise vimconn.VimConnException(
571 type(exception).__name__ + ": " + message_error
572 )
573 elif isinstance(
574 exception,
575 (
576 nvExceptions.ClientException,
577 ksExceptions.ClientException,
578 neExceptions.NeutronException,
579 ),
580 ):
581 raise vimconn.VimConnUnexpectedResponse(
582 type(exception).__name__ + ": " + message_error
583 )
584 elif isinstance(exception, nvExceptions.Conflict):
585 raise vimconn.VimConnConflictException(
586 type(exception).__name__ + ": " + message_error
587 )
588 elif isinstance(exception, vimconn.VimConnException):
589 raise exception
590 else: # ()
591 self.logger.error("General Exception " + message_error, exc_info=True)
592
593 raise vimconn.VimConnConnectionException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 def _get_ids_from_name(self):
598 """
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600 :return: None
601 """
602 # get tenant_id if only tenant_name is supplied
603 self._reload_connection()
604
605 if not self.my_tenant_id:
606 raise vimconn.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self.tenant_name, self.tenant_id
609 )
610 )
611
612 if self.config.get("security_groups") and not self.security_groups_id:
613 # convert from name to id
614 neutron_sg_list = self.neutron.list_security_groups(
615 tenant_id=self.my_tenant_id
616 )["security_groups"]
617
618 self.security_groups_id = []
619 for sg in self.config.get("security_groups"):
620 for neutron_sg in neutron_sg_list:
621 if sg in (neutron_sg["id"], neutron_sg["name"]):
622 self.security_groups_id.append(neutron_sg["id"])
623 break
624 else:
625 self.security_groups_id = None
626
627 raise vimconn.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg)
629 )
630
631 def check_vim_connectivity(self):
632 # just get network list to check connectivity and credentials
633 self.get_network_list(filter_dict={})
634
635 def get_tenant_list(self, filter_dict={}):
636 """Obtain tenants of VIM
637 filter_dict can contain the following keys:
638 name: filter by tenant name
639 id: filter by tenant uuid/id
640 <other VIM specific>
641 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
642 """
643 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
644
645 try:
646 self._reload_connection()
647
648 if self.api_version3:
649 project_class_list = self.keystone.projects.list(
650 name=filter_dict.get("name")
651 )
652 else:
653 project_class_list = self.keystone.tenants.findall(**filter_dict)
654
655 project_list = []
656
657 for project in project_class_list:
658 if filter_dict.get("id") and filter_dict["id"] != project.id:
659 continue
660
661 project_list.append(project.to_dict())
662
663 return project_list
664 except (
665 ksExceptions.ConnectionError,
666 ksExceptions.ClientException,
667 ConnectionError,
668 ) as e:
669 self._format_exception(e)
670
671 def new_tenant(self, tenant_name, tenant_description):
672 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
673 self.logger.debug("Adding a new tenant name: %s", tenant_name)
674
675 try:
676 self._reload_connection()
677
678 if self.api_version3:
679 project = self.keystone.projects.create(
680 tenant_name,
681 self.config.get("project_domain_id", "default"),
682 description=tenant_description,
683 is_domain=False,
684 )
685 else:
686 project = self.keystone.tenants.create(tenant_name, tenant_description)
687
688 return project.id
689 except (
690 ksExceptions.ConnectionError,
691 ksExceptions.ClientException,
692 ksExceptions.BadRequest,
693 ConnectionError,
694 ) as e:
695 self._format_exception(e)
696
697 def delete_tenant(self, tenant_id):
698 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
699 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
700
701 try:
702 self._reload_connection()
703
704 if self.api_version3:
705 self.keystone.projects.delete(tenant_id)
706 else:
707 self.keystone.tenants.delete(tenant_id)
708
709 return tenant_id
710 except (
711 ksExceptions.ConnectionError,
712 ksExceptions.ClientException,
713 ksExceptions.NotFound,
714 ConnectionError,
715 ) as e:
716 self._format_exception(e)
717
718 def new_network(
719 self,
720 net_name,
721 net_type,
722 ip_profile=None,
723 shared=False,
724 provider_network_profile=None,
725 ):
726 """Adds a tenant network to VIM
727 Params:
728 'net_name': name of the network
729 'net_type': one of:
730 'bridge': overlay isolated network
731 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
732 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
733 'ip_profile': is a dict containing the IP parameters of the network
734 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
735 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
736 'gateway_address': (Optional) ip_schema, that is X.X.X.X
737 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
738 'dhcp_enabled': True or False
739 'dhcp_start_address': ip_schema, first IP to grant
740 'dhcp_count': number of IPs to grant.
741 'shared': if this network can be seen/use by other tenants/organization
742 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
743 physical-network: physnet-label}
744 Returns a tuple with the network identifier and created_items, or raises an exception on error
745 created_items can be None or a dictionary where this method can include key-values that will be passed to
746 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
747 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
748 as not present.
749 """
750 self.logger.debug(
751 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
752 )
753 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
754
755 try:
756 vlan = None
757
758 if provider_network_profile:
759 vlan = provider_network_profile.get("segmentation-id")
760
761 new_net = None
762 created_items = {}
763 self._reload_connection()
764 network_dict = {"name": net_name, "admin_state_up": True}
765
766 if net_type in ("data", "ptp") or provider_network_profile:
767 provider_physical_network = None
768
769 if provider_network_profile and provider_network_profile.get(
770 "physical-network"
771 ):
772 provider_physical_network = provider_network_profile.get(
773 "physical-network"
774 )
775
776 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
777 # or not declared, just ignore the checking
778 if (
779 isinstance(
780 self.config.get("dataplane_physical_net"), (tuple, list)
781 )
782 and provider_physical_network
783 not in self.config["dataplane_physical_net"]
784 ):
785 raise vimconn.VimConnConflictException(
786 "Invalid parameter 'provider-network:physical-network' "
787 "for network creation. '{}' is not one of the declared "
788 "list at VIM_config:dataplane_physical_net".format(
789 provider_physical_network
790 )
791 )
792
793 # use the default dataplane_physical_net
794 if not provider_physical_network:
795 provider_physical_network = self.config.get(
796 "dataplane_physical_net"
797 )
798
799 # if it is non empty list, use the first value. If it is a string use the value directly
800 if (
801 isinstance(provider_physical_network, (tuple, list))
802 and provider_physical_network
803 ):
804 provider_physical_network = provider_physical_network[0]
805
806 if not provider_physical_network:
807 raise vimconn.VimConnConflictException(
808 "missing information needed for underlay networks. Provide "
809 "'dataplane_physical_net' configuration at VIM or use the NS "
810 "instantiation parameter 'provider-network.physical-network'"
811 " for the VLD"
812 )
813
814 if not self.config.get("multisegment_support"):
815 network_dict[
816 "provider:physical_network"
817 ] = provider_physical_network
818
819 if (
820 provider_network_profile
821 and "network-type" in provider_network_profile
822 ):
823 network_dict[
824 "provider:network_type"
825 ] = provider_network_profile["network-type"]
826 else:
827 network_dict["provider:network_type"] = self.config.get(
828 "dataplane_network_type", "vlan"
829 )
830
831 if vlan:
832 network_dict["provider:segmentation_id"] = vlan
833 else:
834 # Multi-segment case
835 segment_list = []
836 segment1_dict = {
837 "provider:physical_network": "",
838 "provider:network_type": "vxlan",
839 }
840 segment_list.append(segment1_dict)
841 segment2_dict = {
842 "provider:physical_network": provider_physical_network,
843 "provider:network_type": "vlan",
844 }
845
846 if vlan:
847 segment2_dict["provider:segmentation_id"] = vlan
848 elif self.config.get("multisegment_vlan_range"):
849 vlanID = self._generate_multisegment_vlanID()
850 segment2_dict["provider:segmentation_id"] = vlanID
851
852 # else
853 # raise vimconn.VimConnConflictException(
854 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
855 # network")
856 segment_list.append(segment2_dict)
857 network_dict["segments"] = segment_list
858
859 # VIO Specific Changes. It needs a concrete VLAN
860 if self.vim_type == "VIO" and vlan is None:
861 if self.config.get("dataplane_net_vlan_range") is None:
862 raise vimconn.VimConnConflictException(
863 "You must provide 'dataplane_net_vlan_range' in format "
864 "[start_ID - end_ID] at VIM_config for creating underlay "
865 "networks"
866 )
867
868 network_dict["provider:segmentation_id"] = self._generate_vlanID()
869
870 network_dict["shared"] = shared
871
872 if self.config.get("disable_network_port_security"):
873 network_dict["port_security_enabled"] = False
874
875 if self.config.get("neutron_availability_zone_hints"):
876 hints = self.config.get("neutron_availability_zone_hints")
877
878 if isinstance(hints, str):
879 hints = [hints]
880
881 network_dict["availability_zone_hints"] = hints
882
883 new_net = self.neutron.create_network({"network": network_dict})
884 # print new_net
885 # create subnetwork, even if there is no profile
886
887 if not ip_profile:
888 ip_profile = {}
889
890 if not ip_profile.get("subnet_address"):
891 # Fake subnet is required
892 subnet_rand = random.SystemRandom().randint(0, 255)
893 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
894
895 if "ip_version" not in ip_profile:
896 ip_profile["ip_version"] = "IPv4"
897
898 subnet = {
899 "name": net_name + "-subnet",
900 "network_id": new_net["network"]["id"],
901 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
902 "cidr": ip_profile["subnet_address"],
903 }
904
905 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
906 if ip_profile.get("gateway_address"):
907 subnet["gateway_ip"] = ip_profile["gateway_address"]
908 else:
909 subnet["gateway_ip"] = None
910
911 if ip_profile.get("dns_address"):
912 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
913
914 if "dhcp_enabled" in ip_profile:
915 subnet["enable_dhcp"] = (
916 False
917 if ip_profile["dhcp_enabled"] == "false"
918 or ip_profile["dhcp_enabled"] is False
919 else True
920 )
921
922 if ip_profile.get("dhcp_start_address"):
923 subnet["allocation_pools"] = []
924 subnet["allocation_pools"].append(dict())
925 subnet["allocation_pools"][0]["start"] = ip_profile[
926 "dhcp_start_address"
927 ]
928
929 if ip_profile.get("dhcp_count"):
930 # parts = ip_profile["dhcp_start_address"].split(".")
931 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
932 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
933 ip_int += ip_profile["dhcp_count"] - 1
934 ip_str = str(netaddr.IPAddress(ip_int))
935 subnet["allocation_pools"][0]["end"] = ip_str
936
937 if (
938 ip_profile.get("ipv6_address_mode")
939 and ip_profile["ip_version"] != "IPv4"
940 ):
941 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
942 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
943 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
944 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
945
946 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
947 self.neutron.create_subnet({"subnet": subnet})
948
949 if net_type == "data" and self.config.get("multisegment_support"):
950 if self.config.get("l2gw_support"):
951 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
952 for l2gw in l2gw_list:
953 l2gw_conn = {
954 "l2_gateway_id": l2gw["id"],
955 "network_id": new_net["network"]["id"],
956 "segmentation_id": str(vlanID),
957 }
958 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
959 {"l2_gateway_connection": l2gw_conn}
960 )
961 created_items[
962 "l2gwconn:"
963 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
964 ] = True
965
966 return new_net["network"]["id"], created_items
967 except Exception as e:
968 # delete l2gw connections (if any) before deleting the network
969 for k, v in created_items.items():
970 if not v: # skip already deleted
971 continue
972
973 try:
974 k_item, _, k_id = k.partition(":")
975
976 if k_item == "l2gwconn":
977 self.neutron.delete_l2_gateway_connection(k_id)
978 except Exception as e2:
979 self.logger.error(
980 "Error deleting l2 gateway connection: {}: {}".format(
981 type(e2).__name__, e2
982 )
983 )
984
985 if new_net:
986 self.neutron.delete_network(new_net["network"]["id"])
987
988 self._format_exception(e)
989
990 def get_network_list(self, filter_dict={}):
991 """Obtain tenant networks of VIM
992 Filter_dict can be:
993 name: network name
994 id: network uuid
995 shared: boolean
996 tenant_id: tenant
997 admin_state_up: boolean
998 status: 'ACTIVE'
999 Returns the network list of dictionaries
1000 """
1001 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1002
1003 try:
1004 self._reload_connection()
1005 filter_dict_os = filter_dict.copy()
1006
1007 if self.api_version3 and "tenant_id" in filter_dict_os:
1008 # TODO check
1009 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1010
1011 net_dict = self.neutron.list_networks(**filter_dict_os)
1012 net_list = net_dict["networks"]
1013 self.__net_os2mano(net_list)
1014
1015 return net_list
1016 except (
1017 neExceptions.ConnectionFailed,
1018 ksExceptions.ClientException,
1019 neExceptions.NeutronException,
1020 ConnectionError,
1021 ) as e:
1022 self._format_exception(e)
1023
1024 def get_network(self, net_id):
1025 """Obtain details of network from VIM
1026 Returns the network information from a network id"""
1027 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1028 filter_dict = {"id": net_id}
1029 net_list = self.get_network_list(filter_dict)
1030
1031 if len(net_list) == 0:
1032 raise vimconn.VimConnNotFoundException(
1033 "Network '{}' not found".format(net_id)
1034 )
1035 elif len(net_list) > 1:
1036 raise vimconn.VimConnConflictException(
1037 "Found more than one network with this criteria"
1038 )
1039
1040 net = net_list[0]
1041 subnets = []
1042 for subnet_id in net.get("subnets", ()):
1043 try:
1044 subnet = self.neutron.show_subnet(subnet_id)
1045 except Exception as e:
1046 self.logger.error(
1047 "osconnector.get_network(): Error getting subnet %s %s"
1048 % (net_id, str(e))
1049 )
1050 subnet = {"id": subnet_id, "fault": str(e)}
1051
1052 subnets.append(subnet)
1053
1054 net["subnets"] = subnets
1055 net["encapsulation"] = net.get("provider:network_type")
1056 net["encapsulation_type"] = net.get("provider:network_type")
1057 net["segmentation_id"] = net.get("provider:segmentation_id")
1058 net["encapsulation_id"] = net.get("provider:segmentation_id")
1059
1060 return net
1061
1062 def delete_network(self, net_id, created_items=None):
1063 """
1064 Removes a tenant network from VIM and its associated elements
1065 :param net_id: VIM identifier of the network, provided by method new_network
1066 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1067 Returns the network identifier or raises an exception upon error or when network is not found
1068 """
1069 self.logger.debug("Deleting network '%s' from VIM", net_id)
1070
1071 if created_items is None:
1072 created_items = {}
1073
1074 try:
1075 self._reload_connection()
1076 # delete l2gw connections (if any) before deleting the network
1077 for k, v in created_items.items():
1078 if not v: # skip already deleted
1079 continue
1080
1081 try:
1082 k_item, _, k_id = k.partition(":")
1083 if k_item == "l2gwconn":
1084 self.neutron.delete_l2_gateway_connection(k_id)
1085 except Exception as e:
1086 self.logger.error(
1087 "Error deleting l2 gateway connection: {}: {}".format(
1088 type(e).__name__, e
1089 )
1090 )
1091
1092 # delete VM ports attached to this networks before the network
1093 ports = self.neutron.list_ports(network_id=net_id)
1094 for p in ports["ports"]:
1095 try:
1096 self.neutron.delete_port(p["id"])
1097 except Exception as e:
1098 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1099
1100 self.neutron.delete_network(net_id)
1101
1102 return net_id
1103 except (
1104 neExceptions.ConnectionFailed,
1105 neExceptions.NetworkNotFoundClient,
1106 neExceptions.NeutronException,
1107 ksExceptions.ClientException,
1108 neExceptions.NeutronException,
1109 ConnectionError,
1110 ) as e:
1111 self._format_exception(e)
1112
1113 def refresh_nets_status(self, net_list):
1114 """Get the status of the networks
1115 Params: the list of network identifiers
1116 Returns a dictionary with:
1117 net_id: #VIM id of this network
1118 status: #Mandatory. Text with one of:
1119 # DELETED (not found at vim)
1120 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1121 # OTHER (Vim reported other status not understood)
1122 # ERROR (VIM indicates an ERROR status)
1123 # ACTIVE, INACTIVE, DOWN (admin down),
1124 # BUILD (on building process)
1125 #
1126 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1127 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1128 """
1129 net_dict = {}
1130
1131 for net_id in net_list:
1132 net = {}
1133
1134 try:
1135 net_vim = self.get_network(net_id)
1136
1137 if net_vim["status"] in netStatus2manoFormat:
1138 net["status"] = netStatus2manoFormat[net_vim["status"]]
1139 else:
1140 net["status"] = "OTHER"
1141 net["error_msg"] = "VIM status reported " + net_vim["status"]
1142
1143 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1144 net["status"] = "DOWN"
1145
1146 net["vim_info"] = self.serialize(net_vim)
1147
1148 if net_vim.get("fault"): # TODO
1149 net["error_msg"] = str(net_vim["fault"])
1150 except vimconn.VimConnNotFoundException as e:
1151 self.logger.error("Exception getting net status: %s", str(e))
1152 net["status"] = "DELETED"
1153 net["error_msg"] = str(e)
1154 except vimconn.VimConnException as e:
1155 self.logger.error("Exception getting net status: %s", str(e))
1156 net["status"] = "VIM_ERROR"
1157 net["error_msg"] = str(e)
1158 net_dict[net_id] = net
1159 return net_dict
1160
1161 def get_flavor(self, flavor_id):
1162 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1163 self.logger.debug("Getting flavor '%s'", flavor_id)
1164
1165 try:
1166 self._reload_connection()
1167 flavor = self.nova.flavors.find(id=flavor_id)
1168 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1169
1170 return flavor.to_dict()
1171 except (
1172 nvExceptions.NotFound,
1173 nvExceptions.ClientException,
1174 ksExceptions.ClientException,
1175 ConnectionError,
1176 ) as e:
1177 self._format_exception(e)
1178
1179 def get_flavor_id_from_data(self, flavor_dict):
1180 """Obtain flavor id that match the flavor description
1181 Returns the flavor_id or raises a vimconnNotFoundException
1182 flavor_dict: contains the required ram, vcpus, disk
1183 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1184 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1185 vimconnNotFoundException is raised
1186 """
1187 exact_match = False if self.config.get("use_existing_flavors") else True
1188
1189 try:
1190 self._reload_connection()
1191 flavor_candidate_id = None
1192 flavor_candidate_data = (10000, 10000, 10000)
1193 flavor_target = (
1194 flavor_dict["ram"],
1195 flavor_dict["vcpus"],
1196 flavor_dict["disk"],
1197 flavor_dict.get("ephemeral", 0),
1198 flavor_dict.get("swap", 0),
1199 )
1200 # numa=None
1201 extended = flavor_dict.get("extended", {})
1202 if extended:
1203 # TODO
1204 raise vimconn.VimConnNotFoundException(
1205 "Flavor with EPA still not implemented"
1206 )
1207 # if len(numas) > 1:
1208 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1209 # numa=numas[0]
1210 # numas = extended.get("numas")
1211 for flavor in self.nova.flavors.list():
1212 epa = flavor.get_keys()
1213
1214 if epa:
1215 continue
1216 # TODO
1217
1218 flavor_data = (
1219 flavor.ram,
1220 flavor.vcpus,
1221 flavor.disk,
1222 flavor.ephemeral,
1223 flavor.swap if isinstance(flavor.swap, int) else 0,
1224 )
1225 if flavor_data == flavor_target:
1226 return flavor.id
1227 elif (
1228 not exact_match
1229 and flavor_target < flavor_data < flavor_candidate_data
1230 ):
1231 flavor_candidate_id = flavor.id
1232 flavor_candidate_data = flavor_data
1233
1234 if not exact_match and flavor_candidate_id:
1235 return flavor_candidate_id
1236
1237 raise vimconn.VimConnNotFoundException(
1238 "Cannot find any flavor matching '{}'".format(flavor_dict)
1239 )
1240 except (
1241 nvExceptions.NotFound,
1242 nvExceptions.ClientException,
1243 ksExceptions.ClientException,
1244 ConnectionError,
1245 ) as e:
1246 self._format_exception(e)
1247
1248 @staticmethod
1249 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1250 """Process resource quota and fill up extra_specs.
1251 Args:
1252 quota (dict): Keeping the quota of resurces
1253 prefix (str) Prefix
1254 extra_specs (dict) Dict to be filled to be used during flavor creation
1255
1256 """
1257 if "limit" in quota:
1258 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1259
1260 if "reserve" in quota:
1261 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1262
1263 if "shares" in quota:
1264 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1265 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1266
1267 @staticmethod
1268 def process_numa_memory(
1269 numa: dict, node_id: Optional[int], extra_specs: dict
1270 ) -> None:
1271 """Set the memory in extra_specs.
1272 Args:
1273 numa (dict): A dictionary which includes numa information
1274 node_id (int): ID of numa node
1275 extra_specs (dict): To be filled.
1276
1277 """
1278 if not numa.get("memory"):
1279 return
1280 memory_mb = numa["memory"] * 1024
1281 memory = "hw:numa_mem.{}".format(node_id)
1282 extra_specs[memory] = int(memory_mb)
1283
1284 @staticmethod
1285 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1286 """Set the cpu in extra_specs.
1287 Args:
1288 numa (dict): A dictionary which includes numa information
1289 node_id (int): ID of numa node
1290 extra_specs (dict): To be filled.
1291
1292 """
1293 if not numa.get("vcpu"):
1294 return
1295 vcpu = numa["vcpu"]
1296 cpu = "hw:numa_cpus.{}".format(node_id)
1297 vcpu = ",".join(map(str, vcpu))
1298 extra_specs[cpu] = vcpu
1299
1300 @staticmethod
1301 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1302 """Fill up extra_specs if numa has paired-threads.
1303 Args:
1304 numa (dict): A dictionary which includes numa information
1305 extra_specs (dict): To be filled.
1306
1307 Returns:
1308 threads (int) Number of virtual cpus
1309
1310 """
1311 if not numa.get("paired-threads"):
1312 return
1313
1314 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1315 threads = numa["paired-threads"] * 2
1316 extra_specs["hw:cpu_thread_policy"] = "require"
1317 extra_specs["hw:cpu_policy"] = "dedicated"
1318 return threads
1319
1320 @staticmethod
1321 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1322 """Fill up extra_specs if numa has cores.
1323 Args:
1324 numa (dict): A dictionary which includes numa information
1325 extra_specs (dict): To be filled.
1326
1327 Returns:
1328 cores (int) Number of virtual cpus
1329
1330 """
1331 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1332 # architecture, or a non-SMT architecture will be emulated
1333 if not numa.get("cores"):
1334 return
1335 cores = numa["cores"]
1336 extra_specs["hw:cpu_thread_policy"] = "isolate"
1337 extra_specs["hw:cpu_policy"] = "dedicated"
1338 return cores
1339
1340 @staticmethod
1341 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1342 """Fill up extra_specs if numa has threads.
1343 Args:
1344 numa (dict): A dictionary which includes numa information
1345 extra_specs (dict): To be filled.
1346
1347 Returns:
1348 threads (int) Number of virtual cpus
1349
1350 """
1351 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1352 if not numa.get("threads"):
1353 return
1354 threads = numa["threads"]
1355 extra_specs["hw:cpu_thread_policy"] = "prefer"
1356 extra_specs["hw:cpu_policy"] = "dedicated"
1357 return threads
1358
1359 def _process_numa_parameters_of_flavor(
1360 self, numas: List, extra_specs: Dict
1361 ) -> None:
1362 """Process numa parameters and fill up extra_specs.
1363
1364 Args:
1365 numas (list): List of dictionary which includes numa information
1366 extra_specs (dict): To be filled.
1367
1368 """
1369 numa_nodes = len(numas)
1370 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1371 cpu_cores, cpu_threads = 0, 0
1372
1373 if self.vim_type == "VIO":
1374 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1375
1376 for numa in numas:
1377 if "id" in numa:
1378 node_id = numa["id"]
1379 # overwrite ram and vcpus
1380 # check if key "memory" is present in numa else use ram value at flavor
1381 self.process_numa_memory(numa, node_id, extra_specs)
1382 self.process_numa_vcpu(numa, node_id, extra_specs)
1383
1384 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1385 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1386
1387 if "paired-threads" in numa:
1388 threads = self.process_numa_paired_threads(numa, extra_specs)
1389 cpu_threads += threads
1390
1391 elif "cores" in numa:
1392 cores = self.process_numa_cores(numa, extra_specs)
1393 cpu_cores += cores
1394
1395 elif "threads" in numa:
1396 threads = self.process_numa_threads(numa, extra_specs)
1397 cpu_threads += threads
1398
1399 if cpu_cores:
1400 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1401 if cpu_threads:
1402 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1403
1404 @staticmethod
1405 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1406 """According to number of numa nodes, updates the extra_specs for VIO.
1407
1408 Args:
1409
1410 numa_nodes (int): List keeps the numa node numbers
1411 extra_specs (dict): Extra specs dict to be updated
1412
1413 """
1414 # If there are several numas, we do not define specific affinity.
1415 extra_specs["vmware:latency_sensitivity_level"] = "high"
1416
1417 def _change_flavor_name(
1418 self, name: str, name_suffix: int, flavor_data: dict
1419 ) -> str:
1420 """Change the flavor name if the name already exists.
1421
1422 Args:
1423 name (str): Flavor name to be checked
1424 name_suffix (int): Suffix to be appended to name
1425 flavor_data (dict): Flavor dict
1426
1427 Returns:
1428 name (str): New flavor name to be used
1429
1430 """
1431 # Get used names
1432 fl = self.nova.flavors.list()
1433 fl_names = [f.name for f in fl]
1434
1435 while name in fl_names:
1436 name_suffix += 1
1437 name = flavor_data["name"] + "-" + str(name_suffix)
1438
1439 return name
1440
1441 def _process_extended_config_of_flavor(
1442 self, extended: dict, extra_specs: dict
1443 ) -> None:
1444 """Process the extended dict to fill up extra_specs.
1445 Args:
1446
1447 extended (dict): Keeping the extra specification of flavor
1448 extra_specs (dict) Dict to be filled to be used during flavor creation
1449
1450 """
1451 quotas = {
1452 "cpu-quota": "cpu",
1453 "mem-quota": "memory",
1454 "vif-quota": "vif",
1455 "disk-io-quota": "disk_io",
1456 }
1457
1458 page_sizes = {
1459 "LARGE": "large",
1460 "SMALL": "small",
1461 "SIZE_2MB": "2MB",
1462 "SIZE_1GB": "1GB",
1463 "PREFER_LARGE": "any",
1464 }
1465
1466 policies = {
1467 "cpu-pinning-policy": "hw:cpu_policy",
1468 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1469 "mem-policy": "hw:numa_mempolicy",
1470 }
1471
1472 numas = extended.get("numas")
1473 if numas:
1474 self._process_numa_parameters_of_flavor(numas, extra_specs)
1475
1476 for quota, item in quotas.items():
1477 if quota in extended.keys():
1478 self.process_resource_quota(extended.get(quota), item, extra_specs)
1479
1480 # Set the mempage size as specified in the descriptor
1481 if extended.get("mempage-size"):
1482 if extended["mempage-size"] in page_sizes.keys():
1483 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1484 else:
1485 # Normally, validations in NBI should not allow to this condition.
1486 self.logger.debug(
1487 "Invalid mempage-size %s. Will be ignored",
1488 extended.get("mempage-size"),
1489 )
1490
1491 for policy, hw_policy in policies.items():
1492 if extended.get(policy):
1493 extra_specs[hw_policy] = extended[policy].lower()
1494
1495 @staticmethod
1496 def _get_flavor_details(flavor_data: dict) -> Tuple:
1497 """Returns the details of flavor
1498 Args:
1499 flavor_data (dict): Dictionary that includes required flavor details
1500
1501 Returns:
1502 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1503
1504 """
1505 return (
1506 flavor_data.get("ram", 64),
1507 flavor_data.get("vcpus", 1),
1508 {},
1509 flavor_data.get("extended"),
1510 )
1511
1512 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1513 """Adds a tenant flavor to openstack VIM.
1514 if change_name_if_used is True, it will change name in case of conflict,
1515 because it is not supported name repetition.
1516
1517 Args:
1518 flavor_data (dict): Flavor details to be processed
1519 change_name_if_used (bool): Change name in case of conflict
1520
1521 Returns:
1522 flavor_id (str): flavor identifier
1523
1524 """
1525 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1526 retry = 0
1527 max_retries = 3
1528 name_suffix = 0
1529
1530 try:
1531 name = flavor_data["name"]
1532 while retry < max_retries:
1533 retry += 1
1534 try:
1535 self._reload_connection()
1536
1537 if change_name_if_used:
1538 name = self._change_flavor_name(name, name_suffix, flavor_data)
1539
1540 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1541 flavor_data
1542 )
1543 if extended:
1544 self._process_extended_config_of_flavor(extended, extra_specs)
1545
1546 # Create flavor
1547
1548 new_flavor = self.nova.flavors.create(
1549 name=name,
1550 ram=ram,
1551 vcpus=vcpus,
1552 disk=flavor_data.get("disk", 0),
1553 ephemeral=flavor_data.get("ephemeral", 0),
1554 swap=flavor_data.get("swap", 0),
1555 is_public=flavor_data.get("is_public", True),
1556 )
1557
1558 # Add metadata
1559 if extra_specs:
1560 new_flavor.set_keys(extra_specs)
1561
1562 return new_flavor.id
1563
1564 except nvExceptions.Conflict as e:
1565 if change_name_if_used and retry < max_retries:
1566 continue
1567
1568 self._format_exception(e)
1569
1570 except (
1571 ksExceptions.ClientException,
1572 nvExceptions.ClientException,
1573 ConnectionError,
1574 KeyError,
1575 ) as e:
1576 self._format_exception(e)
1577
1578 def delete_flavor(self, flavor_id):
1579 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1580 try:
1581 self._reload_connection()
1582 self.nova.flavors.delete(flavor_id)
1583
1584 return flavor_id
1585 # except nvExceptions.BadRequest as e:
1586 except (
1587 nvExceptions.NotFound,
1588 ksExceptions.ClientException,
1589 nvExceptions.ClientException,
1590 ConnectionError,
1591 ) as e:
1592 self._format_exception(e)
1593
1594 def new_image(self, image_dict):
1595 """
1596 Adds a tenant image to VIM. imge_dict is a dictionary with:
1597 name: name
1598 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1599 location: path or URI
1600 public: "yes" or "no"
1601 metadata: metadata of the image
1602 Returns the image_id
1603 """
1604 retry = 0
1605 max_retries = 3
1606
1607 while retry < max_retries:
1608 retry += 1
1609 try:
1610 self._reload_connection()
1611
1612 # determine format http://docs.openstack.org/developer/glance/formats.html
1613 if "disk_format" in image_dict:
1614 disk_format = image_dict["disk_format"]
1615 else: # autodiscover based on extension
1616 if image_dict["location"].endswith(".qcow2"):
1617 disk_format = "qcow2"
1618 elif image_dict["location"].endswith(".vhd"):
1619 disk_format = "vhd"
1620 elif image_dict["location"].endswith(".vmdk"):
1621 disk_format = "vmdk"
1622 elif image_dict["location"].endswith(".vdi"):
1623 disk_format = "vdi"
1624 elif image_dict["location"].endswith(".iso"):
1625 disk_format = "iso"
1626 elif image_dict["location"].endswith(".aki"):
1627 disk_format = "aki"
1628 elif image_dict["location"].endswith(".ari"):
1629 disk_format = "ari"
1630 elif image_dict["location"].endswith(".ami"):
1631 disk_format = "ami"
1632 else:
1633 disk_format = "raw"
1634
1635 self.logger.debug(
1636 "new_image: '%s' loading from '%s'",
1637 image_dict["name"],
1638 image_dict["location"],
1639 )
1640 if self.vim_type == "VIO":
1641 container_format = "bare"
1642 if "container_format" in image_dict:
1643 container_format = image_dict["container_format"]
1644
1645 new_image = self.glance.images.create(
1646 name=image_dict["name"],
1647 container_format=container_format,
1648 disk_format=disk_format,
1649 )
1650 else:
1651 new_image = self.glance.images.create(name=image_dict["name"])
1652
1653 if image_dict["location"].startswith("http"):
1654 # TODO there is not a method to direct download. It must be downloaded locally with requests
1655 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1656 else: # local path
1657 with open(image_dict["location"]) as fimage:
1658 self.glance.images.upload(new_image.id, fimage)
1659 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1660 # image_dict.get("public","yes")=="yes",
1661 # container_format="bare", data=fimage, disk_format=disk_format)
1662
1663 metadata_to_load = image_dict.get("metadata")
1664
1665 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1666 # for openstack
1667 if self.vim_type == "VIO":
1668 metadata_to_load["upload_location"] = image_dict["location"]
1669 else:
1670 metadata_to_load["location"] = image_dict["location"]
1671
1672 self.glance.images.update(new_image.id, **metadata_to_load)
1673
1674 return new_image.id
1675 except (
1676 nvExceptions.Conflict,
1677 ksExceptions.ClientException,
1678 nvExceptions.ClientException,
1679 ) as e:
1680 self._format_exception(e)
1681 except (
1682 HTTPException,
1683 gl1Exceptions.HTTPException,
1684 gl1Exceptions.CommunicationError,
1685 ConnectionError,
1686 ) as e:
1687 if retry == max_retries:
1688 continue
1689
1690 self._format_exception(e)
1691 except IOError as e: # can not open the file
1692 raise vimconn.VimConnConnectionException(
1693 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1694 http_code=vimconn.HTTP_Bad_Request,
1695 )
1696
1697 def delete_image(self, image_id):
1698 """Deletes a tenant image from openstack VIM. Returns the old id"""
1699 try:
1700 self._reload_connection()
1701 self.glance.images.delete(image_id)
1702
1703 return image_id
1704 except (
1705 nvExceptions.NotFound,
1706 ksExceptions.ClientException,
1707 nvExceptions.ClientException,
1708 gl1Exceptions.CommunicationError,
1709 gl1Exceptions.HTTPNotFound,
1710 ConnectionError,
1711 ) as e: # TODO remove
1712 self._format_exception(e)
1713
1714 def get_image_id_from_path(self, path):
1715 """Get the image id from image path in the VIM database. Returns the image_id"""
1716 try:
1717 self._reload_connection()
1718 images = self.glance.images.list()
1719
1720 for image in images:
1721 if image.metadata.get("location") == path:
1722 return image.id
1723
1724 raise vimconn.VimConnNotFoundException(
1725 "image with location '{}' not found".format(path)
1726 )
1727 except (
1728 ksExceptions.ClientException,
1729 nvExceptions.ClientException,
1730 gl1Exceptions.CommunicationError,
1731 ConnectionError,
1732 ) as e:
1733 self._format_exception(e)
1734
1735 def get_image_list(self, filter_dict={}):
1736 """Obtain tenant images from VIM
1737 Filter_dict can be:
1738 id: image id
1739 name: image name
1740 checksum: image checksum
1741 Returns the image list of dictionaries:
1742 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1743 List can be empty
1744 """
1745 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1746
1747 try:
1748 self._reload_connection()
1749 # filter_dict_os = filter_dict.copy()
1750 # First we filter by the available filter fields: name, id. The others are removed.
1751 image_list = self.glance.images.list()
1752 filtered_list = []
1753
1754 for image in image_list:
1755 try:
1756 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1757 continue
1758
1759 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1760 continue
1761
1762 if (
1763 filter_dict.get("checksum")
1764 and image["checksum"] != filter_dict["checksum"]
1765 ):
1766 continue
1767
1768 filtered_list.append(image.copy())
1769 except gl1Exceptions.HTTPNotFound:
1770 pass
1771
1772 return filtered_list
1773 except (
1774 ksExceptions.ClientException,
1775 nvExceptions.ClientException,
1776 gl1Exceptions.CommunicationError,
1777 ConnectionError,
1778 ) as e:
1779 self._format_exception(e)
1780
1781 def __wait_for_vm(self, vm_id, status):
1782 """wait until vm is in the desired status and return True.
1783 If the VM gets in ERROR status, return false.
1784 If the timeout is reached generate an exception"""
1785 elapsed_time = 0
1786 while elapsed_time < server_timeout:
1787 vm_status = self.nova.servers.get(vm_id).status
1788
1789 if vm_status == status:
1790 return True
1791
1792 if vm_status == "ERROR":
1793 return False
1794
1795 time.sleep(5)
1796 elapsed_time += 5
1797
1798 # if we exceeded the timeout rollback
1799 if elapsed_time >= server_timeout:
1800 raise vimconn.VimConnException(
1801 "Timeout waiting for instance " + vm_id + " to get " + status,
1802 http_code=vimconn.HTTP_Request_Timeout,
1803 )
1804
1805 def _get_openstack_availablity_zones(self):
1806 """
1807 Get from openstack availability zones available
1808 :return:
1809 """
1810 try:
1811 openstack_availability_zone = self.nova.availability_zones.list()
1812 openstack_availability_zone = [
1813 str(zone.zoneName)
1814 for zone in openstack_availability_zone
1815 if zone.zoneName != "internal"
1816 ]
1817
1818 return openstack_availability_zone
1819 except Exception:
1820 return None
1821
1822 def _set_availablity_zones(self):
1823 """
1824 Set vim availablity zone
1825 :return:
1826 """
1827 if "availability_zone" in self.config:
1828 vim_availability_zones = self.config.get("availability_zone")
1829
1830 if isinstance(vim_availability_zones, str):
1831 self.availability_zone = [vim_availability_zones]
1832 elif isinstance(vim_availability_zones, list):
1833 self.availability_zone = vim_availability_zones
1834 else:
1835 self.availability_zone = self._get_openstack_availablity_zones()
1836
1837 def _get_vm_availability_zone(
1838 self, availability_zone_index, availability_zone_list
1839 ):
1840 """
1841 Return thge availability zone to be used by the created VM.
1842 :return: The VIM availability zone to be used or None
1843 """
1844 if availability_zone_index is None:
1845 if not self.config.get("availability_zone"):
1846 return None
1847 elif isinstance(self.config.get("availability_zone"), str):
1848 return self.config["availability_zone"]
1849 else:
1850 # TODO consider using a different parameter at config for default AV and AV list match
1851 return self.config["availability_zone"][0]
1852
1853 vim_availability_zones = self.availability_zone
1854 # check if VIM offer enough availability zones describe in the VNFD
1855 if vim_availability_zones and len(availability_zone_list) <= len(
1856 vim_availability_zones
1857 ):
1858 # check if all the names of NFV AV match VIM AV names
1859 match_by_index = False
1860 for av in availability_zone_list:
1861 if av not in vim_availability_zones:
1862 match_by_index = True
1863 break
1864
1865 if match_by_index:
1866 return vim_availability_zones[availability_zone_index]
1867 else:
1868 return availability_zone_list[availability_zone_index]
1869 else:
1870 raise vimconn.VimConnConflictException(
1871 "No enough availability zones at VIM for this deployment"
1872 )
1873
1874 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1875 """Fill up the security_groups in the port_dict.
1876
1877 Args:
1878 net (dict): Network details
1879 port_dict (dict): Port details
1880
1881 """
1882 if (
1883 self.config.get("security_groups")
1884 and net.get("port_security") is not False
1885 and not self.config.get("no_port_security_extension")
1886 ):
1887 if not self.security_groups_id:
1888 self._get_ids_from_name()
1889
1890 port_dict["security_groups"] = self.security_groups_id
1891
1892 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1893 """Fill up the network binding depending on network type in the port_dict.
1894
1895 Args:
1896 net (dict): Network details
1897 port_dict (dict): Port details
1898
1899 """
1900 if not net.get("type"):
1901 raise vimconn.VimConnException("Type is missing in the network details.")
1902
1903 if net["type"] == "virtual":
1904 pass
1905
1906 # For VF
1907 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1908 port_dict["binding:vnic_type"] = "direct"
1909
1910 # VIO specific Changes
1911 if self.vim_type == "VIO":
1912 # Need to create port with port_security_enabled = False and no-security-groups
1913 port_dict["port_security_enabled"] = False
1914 port_dict["provider_security_groups"] = []
1915 port_dict["security_groups"] = []
1916
1917 else:
1918 # For PT PCI-PASSTHROUGH
1919 port_dict["binding:vnic_type"] = "direct-physical"
1920
1921 @staticmethod
1922 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1923 """Set the "ip" parameter in net dictionary.
1924
1925 Args:
1926 new_port (dict): New created port
1927 net (dict): Network details
1928
1929 """
1930 fixed_ips = new_port["port"].get("fixed_ips")
1931
1932 if fixed_ips:
1933 net["ip"] = fixed_ips[0].get("ip_address")
1934 else:
1935 net["ip"] = None
1936
1937 @staticmethod
1938 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1939 """Fill up the mac_address and fixed_ips in port_dict.
1940
1941 Args:
1942 net (dict): Network details
1943 port_dict (dict): Port details
1944
1945 """
1946 if net.get("mac_address"):
1947 port_dict["mac_address"] = net["mac_address"]
1948
1949 ip_dual_list = []
1950 if ip_list := net.get("ip_address"):
1951 if not isinstance(ip_list, list):
1952 ip_list = [ip_list]
1953 for ip in ip_list:
1954 ip_dict = {"ip_address": ip}
1955 ip_dual_list.append(ip_dict)
1956 port_dict["fixed_ips"] = ip_dual_list
1957 # TODO add "subnet_id": <subnet_id>
1958
1959 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1960 """Create new port using neutron.
1961
1962 Args:
1963 port_dict (dict): Port details
1964 created_items (dict): All created items
1965 net (dict): Network details
1966
1967 Returns:
1968 new_port (dict): New created port
1969
1970 """
1971 new_port = self.neutron.create_port({"port": port_dict})
1972 created_items["port:" + str(new_port["port"]["id"])] = True
1973 net["mac_address"] = new_port["port"]["mac_address"]
1974 net["vim_id"] = new_port["port"]["id"]
1975
1976 return new_port
1977
1978 def _create_port(
1979 self, net: dict, name: str, created_items: dict
1980 ) -> Tuple[dict, dict]:
1981 """Create port using net details.
1982
1983 Args:
1984 net (dict): Network details
1985 name (str): Name to be used as network name if net dict does not include name
1986 created_items (dict): All created items
1987
1988 Returns:
1989 new_port, port New created port, port dictionary
1990
1991 """
1992
1993 port_dict = {
1994 "network_id": net["net_id"],
1995 "name": net.get("name"),
1996 "admin_state_up": True,
1997 }
1998
1999 if not port_dict["name"]:
2000 port_dict["name"] = name
2001
2002 self._prepare_port_dict_security_groups(net, port_dict)
2003
2004 self._prepare_port_dict_binding(net, port_dict)
2005
2006 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2007
2008 new_port = self._create_new_port(port_dict, created_items, net)
2009
2010 vimconnector._set_fixed_ip(new_port, net)
2011
2012 port = {"port-id": new_port["port"]["id"]}
2013
2014 if float(self.nova.api_version.get_string()) >= 2.32:
2015 port["tag"] = new_port["port"]["name"]
2016
2017 return new_port, port
2018
2019 def _prepare_network_for_vminstance(
2020 self,
2021 name: str,
2022 net_list: list,
2023 created_items: dict,
2024 net_list_vim: list,
2025 external_network: list,
2026 no_secured_ports: list,
2027 ) -> None:
2028 """Create port and fill up net dictionary for new VM instance creation.
2029
2030 Args:
2031 name (str): Name of network
2032 net_list (list): List of networks
2033 created_items (dict): All created items belongs to a VM
2034 net_list_vim (list): List of ports
2035 external_network (list): List of external-networks
2036 no_secured_ports (list): Port security disabled ports
2037 """
2038
2039 self._reload_connection()
2040
2041 for net in net_list:
2042 # Skip non-connected iface
2043 if not net.get("net_id"):
2044 continue
2045
2046 new_port, port = self._create_port(net, name, created_items)
2047
2048 net_list_vim.append(port)
2049
2050 if net.get("floating_ip", False):
2051 net["exit_on_floating_ip_error"] = True
2052 external_network.append(net)
2053
2054 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2055 net["exit_on_floating_ip_error"] = False
2056 external_network.append(net)
2057 net["floating_ip"] = self.config.get("use_floating_ip")
2058
2059 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2060 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2061 if net.get("port_security") is False and not self.config.get(
2062 "no_port_security_extension"
2063 ):
2064 no_secured_ports.append(
2065 (
2066 new_port["port"]["id"],
2067 net.get("port_security_disable_strategy"),
2068 )
2069 )
2070
2071 def _prepare_persistent_root_volumes(
2072 self,
2073 name: str,
2074 vm_av_zone: list,
2075 disk: dict,
2076 base_disk_index: int,
2077 block_device_mapping: dict,
2078 existing_vim_volumes: list,
2079 created_items: dict,
2080 ) -> Optional[str]:
2081 """Prepare persistent root volumes for new VM instance.
2082
2083 Args:
2084 name (str): Name of VM instance
2085 vm_av_zone (list): List of availability zones
2086 disk (dict): Disk details
2087 base_disk_index (int): Disk index
2088 block_device_mapping (dict): Block device details
2089 existing_vim_volumes (list): Existing disk details
2090 created_items (dict): All created items belongs to VM
2091
2092 Returns:
2093 boot_volume_id (str): ID of boot volume
2094
2095 """
2096 # Disk may include only vim_volume_id or only vim_id."
2097 # Use existing persistent root volume finding with volume_id or vim_id
2098 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2099
2100 if disk.get(key_id):
2101 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2102 existing_vim_volumes.append({"id": disk[key_id]})
2103
2104 else:
2105 # Create persistent root volume
2106 volume = self.cinder.volumes.create(
2107 size=disk["size"],
2108 name=name + "vd" + chr(base_disk_index),
2109 imageRef=disk["image_id"],
2110 # Make sure volume is in the same AZ as the VM to be attached to
2111 availability_zone=vm_av_zone,
2112 )
2113 boot_volume_id = volume.id
2114 self.update_block_device_mapping(
2115 volume=volume,
2116 block_device_mapping=block_device_mapping,
2117 base_disk_index=base_disk_index,
2118 disk=disk,
2119 created_items=created_items,
2120 )
2121
2122 return boot_volume_id
2123
2124 @staticmethod
2125 def update_block_device_mapping(
2126 volume: object,
2127 block_device_mapping: dict,
2128 base_disk_index: int,
2129 disk: dict,
2130 created_items: dict,
2131 ) -> None:
2132 """Add volume information to block device mapping dict.
2133 Args:
2134 volume (object): Created volume object
2135 block_device_mapping (dict): Block device details
2136 base_disk_index (int): Disk index
2137 disk (dict): Disk details
2138 created_items (dict): All created items belongs to VM
2139 """
2140 if not volume:
2141 raise vimconn.VimConnException("Volume is empty.")
2142
2143 if not hasattr(volume, "id"):
2144 raise vimconn.VimConnException(
2145 "Created volume is not valid, does not have id attribute."
2146 )
2147
2148 volume_txt = "volume:" + str(volume.id)
2149 if disk.get("keep"):
2150 volume_txt += ":keep"
2151 created_items[volume_txt] = True
2152 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2153
2154 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2155 try:
2156 volume = self.cinder.volumes.create(
2157 size=shared_volume_data["size"],
2158 name=shared_volume_data["name"],
2159 volume_type="multiattach",
2160 )
2161 return (volume.name, volume.id)
2162 except (ConnectionError, KeyError) as e:
2163 self._format_exception(e)
2164
2165 def _prepare_shared_volumes(
2166 self,
2167 name: str,
2168 disk: dict,
2169 base_disk_index: int,
2170 block_device_mapping: dict,
2171 existing_vim_volumes: list,
2172 created_items: dict,
2173 ):
2174 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2175 if volumes.get(disk["name"]):
2176 sv_id = volumes[disk["name"]]
2177 volume = self.cinder.volumes.get(sv_id)
2178 self.update_block_device_mapping(
2179 volume=volume,
2180 block_device_mapping=block_device_mapping,
2181 base_disk_index=base_disk_index,
2182 disk=disk,
2183 created_items=created_items,
2184 )
2185
2186 def _prepare_non_root_persistent_volumes(
2187 self,
2188 name: str,
2189 disk: dict,
2190 vm_av_zone: list,
2191 block_device_mapping: dict,
2192 base_disk_index: int,
2193 existing_vim_volumes: list,
2194 created_items: dict,
2195 ) -> None:
2196 """Prepare persistent volumes for new VM instance.
2197
2198 Args:
2199 name (str): Name of VM instance
2200 disk (dict): Disk details
2201 vm_av_zone (list): List of availability zones
2202 block_device_mapping (dict): Block device details
2203 base_disk_index (int): Disk index
2204 existing_vim_volumes (list): Existing disk details
2205 created_items (dict): All created items belongs to VM
2206 """
2207 # Non-root persistent volumes
2208 # Disk may include only vim_volume_id or only vim_id."
2209 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2210 if disk.get(key_id):
2211 # Use existing persistent volume
2212 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2213 existing_vim_volumes.append({"id": disk[key_id]})
2214 else:
2215 volume_name = f"{name}vd{chr(base_disk_index)}"
2216 volume = self.cinder.volumes.create(
2217 size=disk["size"],
2218 name=volume_name,
2219 # Make sure volume is in the same AZ as the VM to be attached to
2220 availability_zone=vm_av_zone,
2221 )
2222 self.update_block_device_mapping(
2223 volume=volume,
2224 block_device_mapping=block_device_mapping,
2225 base_disk_index=base_disk_index,
2226 disk=disk,
2227 created_items=created_items,
2228 )
2229
2230 def _wait_for_created_volumes_availability(
2231 self, elapsed_time: int, created_items: dict
2232 ) -> Optional[int]:
2233 """Wait till created volumes become available.
2234
2235 Args:
2236 elapsed_time (int): Passed time while waiting
2237 created_items (dict): All created items belongs to VM
2238
2239 Returns:
2240 elapsed_time (int): Time spent while waiting
2241
2242 """
2243 while elapsed_time < volume_timeout:
2244 for created_item in created_items:
2245 v, volume_id = (
2246 created_item.split(":")[0],
2247 created_item.split(":")[1],
2248 )
2249 if v == "volume":
2250 volume = self.cinder.volumes.get(volume_id)
2251 if (
2252 volume.volume_type == "multiattach"
2253 and volume.status == "in-use"
2254 ):
2255 return elapsed_time
2256 elif volume.status != "available":
2257 break
2258 else:
2259 # All ready: break from while
2260 break
2261
2262 time.sleep(5)
2263 elapsed_time += 5
2264
2265 return elapsed_time
2266
2267 def _wait_for_existing_volumes_availability(
2268 self, elapsed_time: int, existing_vim_volumes: list
2269 ) -> Optional[int]:
2270 """Wait till existing volumes become available.
2271
2272 Args:
2273 elapsed_time (int): Passed time while waiting
2274 existing_vim_volumes (list): Existing volume details
2275
2276 Returns:
2277 elapsed_time (int): Time spent while waiting
2278
2279 """
2280
2281 while elapsed_time < volume_timeout:
2282 for volume in existing_vim_volumes:
2283 v = self.cinder.volumes.get(volume["id"])
2284 if v.volume_type == "multiattach" and v.status == "in-use":
2285 return elapsed_time
2286 elif v.status != "available":
2287 break
2288 else: # all ready: break from while
2289 break
2290
2291 time.sleep(5)
2292 elapsed_time += 5
2293
2294 return elapsed_time
2295
2296 def _prepare_disk_for_vminstance(
2297 self,
2298 name: str,
2299 existing_vim_volumes: list,
2300 created_items: dict,
2301 vm_av_zone: list,
2302 block_device_mapping: dict,
2303 disk_list: list = None,
2304 ) -> None:
2305 """Prepare all volumes for new VM instance.
2306
2307 Args:
2308 name (str): Name of Instance
2309 existing_vim_volumes (list): List of existing volumes
2310 created_items (dict): All created items belongs to VM
2311 vm_av_zone (list): VM availability zone
2312 block_device_mapping (dict): Block devices to be attached to VM
2313 disk_list (list): List of disks
2314
2315 """
2316 # Create additional volumes in case these are present in disk_list
2317 base_disk_index = ord("b")
2318 boot_volume_id = None
2319 elapsed_time = 0
2320 for disk in disk_list:
2321 if "image_id" in disk:
2322 # Root persistent volume
2323 base_disk_index = ord("a")
2324 boot_volume_id = self._prepare_persistent_root_volumes(
2325 name=name,
2326 vm_av_zone=vm_av_zone,
2327 disk=disk,
2328 base_disk_index=base_disk_index,
2329 block_device_mapping=block_device_mapping,
2330 existing_vim_volumes=existing_vim_volumes,
2331 created_items=created_items,
2332 )
2333 elif disk.get("multiattach"):
2334 self._prepare_shared_volumes(
2335 name=name,
2336 disk=disk,
2337 base_disk_index=base_disk_index,
2338 block_device_mapping=block_device_mapping,
2339 existing_vim_volumes=existing_vim_volumes,
2340 created_items=created_items,
2341 )
2342 else:
2343 # Non-root persistent volume
2344 self._prepare_non_root_persistent_volumes(
2345 name=name,
2346 disk=disk,
2347 vm_av_zone=vm_av_zone,
2348 block_device_mapping=block_device_mapping,
2349 base_disk_index=base_disk_index,
2350 existing_vim_volumes=existing_vim_volumes,
2351 created_items=created_items,
2352 )
2353 base_disk_index += 1
2354
2355 # Wait until created volumes are with status available
2356 elapsed_time = self._wait_for_created_volumes_availability(
2357 elapsed_time, created_items
2358 )
2359 # Wait until existing volumes in vim are with status available
2360 elapsed_time = self._wait_for_existing_volumes_availability(
2361 elapsed_time, existing_vim_volumes
2362 )
2363 # If we exceeded the timeout rollback
2364 if elapsed_time >= volume_timeout:
2365 raise vimconn.VimConnException(
2366 "Timeout creating volumes for instance " + name,
2367 http_code=vimconn.HTTP_Request_Timeout,
2368 )
2369 if boot_volume_id:
2370 self.cinder.volumes.set_bootable(boot_volume_id, True)
2371
2372 def _find_the_external_network_for_floating_ip(self):
2373 """Get the external network ip in order to create floating IP.
2374
2375 Returns:
2376 pool_id (str): External network pool ID
2377
2378 """
2379
2380 # Find the external network
2381 external_nets = list()
2382
2383 for net in self.neutron.list_networks()["networks"]:
2384 if net["router:external"]:
2385 external_nets.append(net)
2386
2387 if len(external_nets) == 0:
2388 raise vimconn.VimConnException(
2389 "Cannot create floating_ip automatically since "
2390 "no external network is present",
2391 http_code=vimconn.HTTP_Conflict,
2392 )
2393
2394 if len(external_nets) > 1:
2395 raise vimconn.VimConnException(
2396 "Cannot create floating_ip automatically since "
2397 "multiple external networks are present",
2398 http_code=vimconn.HTTP_Conflict,
2399 )
2400
2401 # Pool ID
2402 return external_nets[0].get("id")
2403
2404 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2405 """Trigger neutron to create a new floating IP using external network ID.
2406
2407 Args:
2408 param (dict): Input parameters to create a floating IP
2409 created_items (dict): All created items belongs to new VM instance
2410
2411 Raises:
2412
2413 VimConnException
2414 """
2415 try:
2416 self.logger.debug("Creating floating IP")
2417 new_floating_ip = self.neutron.create_floatingip(param)
2418 free_floating_ip = new_floating_ip["floatingip"]["id"]
2419 created_items["floating_ip:" + str(free_floating_ip)] = True
2420
2421 except Exception as e:
2422 raise vimconn.VimConnException(
2423 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2424 http_code=vimconn.HTTP_Conflict,
2425 )
2426
2427 def _create_floating_ip(
2428 self, floating_network: dict, server: object, created_items: dict
2429 ) -> None:
2430 """Get the available Pool ID and create a new floating IP.
2431
2432 Args:
2433 floating_network (dict): Dict including external network ID
2434 server (object): Server object
2435 created_items (dict): All created items belongs to new VM instance
2436
2437 """
2438
2439 # Pool_id is available
2440 if (
2441 isinstance(floating_network["floating_ip"], str)
2442 and floating_network["floating_ip"].lower() != "true"
2443 ):
2444 pool_id = floating_network["floating_ip"]
2445
2446 # Find the Pool_id
2447 else:
2448 pool_id = self._find_the_external_network_for_floating_ip()
2449
2450 param = {
2451 "floatingip": {
2452 "floating_network_id": pool_id,
2453 "tenant_id": server.tenant_id,
2454 }
2455 }
2456
2457 self._neutron_create_float_ip(param, created_items)
2458
2459 def _find_floating_ip(
2460 self,
2461 server: object,
2462 floating_ips: list,
2463 floating_network: dict,
2464 ) -> Optional[str]:
2465 """Find the available free floating IPs if there are.
2466
2467 Args:
2468 server (object): Server object
2469 floating_ips (list): List of floating IPs
2470 floating_network (dict): Details of floating network such as ID
2471
2472 Returns:
2473 free_floating_ip (str): Free floating ip address
2474
2475 """
2476 for fip in floating_ips:
2477 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2478 continue
2479
2480 if isinstance(floating_network["floating_ip"], str):
2481 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2482 continue
2483
2484 return fip["id"]
2485
2486 def _assign_floating_ip(
2487 self, free_floating_ip: str, floating_network: dict
2488 ) -> Dict:
2489 """Assign the free floating ip address to port.
2490
2491 Args:
2492 free_floating_ip (str): Floating IP to be assigned
2493 floating_network (dict): ID of floating network
2494
2495 Returns:
2496 fip (dict) (dict): Floating ip details
2497
2498 """
2499 # The vim_id key contains the neutron.port_id
2500 self.neutron.update_floatingip(
2501 free_floating_ip,
2502 {"floatingip": {"port_id": floating_network["vim_id"]}},
2503 )
2504 # For race condition ensure not re-assigned to other VM after 5 seconds
2505 time.sleep(5)
2506
2507 return self.neutron.show_floatingip(free_floating_ip)
2508
2509 def _get_free_floating_ip(
2510 self, server: object, floating_network: dict
2511 ) -> Optional[str]:
2512 """Get the free floating IP address.
2513
2514 Args:
2515 server (object): Server Object
2516 floating_network (dict): Floating network details
2517
2518 Returns:
2519 free_floating_ip (str): Free floating ip addr
2520
2521 """
2522
2523 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2524
2525 # Randomize
2526 random.shuffle(floating_ips)
2527
2528 return self._find_floating_ip(server, floating_ips, floating_network)
2529
2530 def _prepare_external_network_for_vminstance(
2531 self,
2532 external_network: list,
2533 server: object,
2534 created_items: dict,
2535 vm_start_time: float,
2536 ) -> None:
2537 """Assign floating IP address for VM instance.
2538
2539 Args:
2540 external_network (list): ID of External network
2541 server (object): Server Object
2542 created_items (dict): All created items belongs to new VM instance
2543 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2544
2545 Raises:
2546 VimConnException
2547
2548 """
2549 for floating_network in external_network:
2550 try:
2551 assigned = False
2552 floating_ip_retries = 3
2553 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2554 # several times
2555 while not assigned:
2556 free_floating_ip = self._get_free_floating_ip(
2557 server, floating_network
2558 )
2559
2560 if not free_floating_ip:
2561 self._create_floating_ip(
2562 floating_network, server, created_items
2563 )
2564
2565 try:
2566 # For race condition ensure not already assigned
2567 fip = self.neutron.show_floatingip(free_floating_ip)
2568
2569 if fip["floatingip"].get("port_id"):
2570 continue
2571
2572 # Assign floating ip
2573 fip = self._assign_floating_ip(
2574 free_floating_ip, floating_network
2575 )
2576
2577 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2578 self.logger.warning(
2579 "floating_ip {} re-assigned to other port".format(
2580 free_floating_ip
2581 )
2582 )
2583 continue
2584
2585 self.logger.debug(
2586 "Assigned floating_ip {} to VM {}".format(
2587 free_floating_ip, server.id
2588 )
2589 )
2590
2591 assigned = True
2592
2593 except Exception as e:
2594 # Openstack need some time after VM creation to assign an IP. So retry if fails
2595 vm_status = self.nova.servers.get(server.id).status
2596
2597 if vm_status not in ("ACTIVE", "ERROR"):
2598 if time.time() - vm_start_time < server_timeout:
2599 time.sleep(5)
2600 continue
2601 elif floating_ip_retries > 0:
2602 floating_ip_retries -= 1
2603 continue
2604
2605 raise vimconn.VimConnException(
2606 "Cannot create floating_ip: {} {}".format(
2607 type(e).__name__, e
2608 ),
2609 http_code=vimconn.HTTP_Conflict,
2610 )
2611
2612 except Exception as e:
2613 if not floating_network["exit_on_floating_ip_error"]:
2614 self.logger.error("Cannot create floating_ip. %s", str(e))
2615 continue
2616
2617 raise
2618
2619 def _update_port_security_for_vminstance(
2620 self,
2621 no_secured_ports: list,
2622 server: object,
2623 ) -> None:
2624 """Updates the port security according to no_secured_ports list.
2625
2626 Args:
2627 no_secured_ports (list): List of ports that security will be disabled
2628 server (object): Server Object
2629
2630 Raises:
2631 VimConnException
2632
2633 """
2634 # Wait until the VM is active and then disable the port-security
2635 if no_secured_ports:
2636 self.__wait_for_vm(server.id, "ACTIVE")
2637
2638 for port in no_secured_ports:
2639 port_update = {
2640 "port": {"port_security_enabled": False, "security_groups": None}
2641 }
2642
2643 if port[1] == "allow-address-pairs":
2644 port_update = {
2645 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2646 }
2647
2648 try:
2649 self.neutron.update_port(port[0], port_update)
2650
2651 except Exception:
2652 raise vimconn.VimConnException(
2653 "It was not possible to disable port security for port {}".format(
2654 port[0]
2655 )
2656 )
2657
2658 def new_vminstance(
2659 self,
2660 name: str,
2661 description: str,
2662 start: bool,
2663 image_id: str,
2664 flavor_id: str,
2665 affinity_group_list: list,
2666 net_list: list,
2667 cloud_config=None,
2668 disk_list=None,
2669 availability_zone_index=None,
2670 availability_zone_list=None,
2671 ) -> tuple:
2672 """Adds a VM instance to VIM.
2673
2674 Args:
2675 name (str): name of VM
2676 description (str): description
2677 start (bool): indicates if VM must start or boot in pause mode. Ignored
2678 image_id (str) image uuid
2679 flavor_id (str) flavor uuid
2680 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2681 net_list (list): list of interfaces, each one is a dictionary with:
2682 name: name of network
2683 net_id: network uuid to connect
2684 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2685 model: interface model, ignored #TODO
2686 mac_address: used for SR-IOV ifaces #TODO for other types
2687 use: 'data', 'bridge', 'mgmt'
2688 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2689 vim_id: filled/added by this function
2690 floating_ip: True/False (or it can be None)
2691 port_security: True/False
2692 cloud_config (dict): (optional) dictionary with:
2693 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2694 users: (optional) list of users to be inserted, each item is a dict with:
2695 name: (mandatory) user name,
2696 key-pairs: (optional) list of strings with the public key to be inserted to the user
2697 user-data: (optional) string is a text script to be passed directly to cloud-init
2698 config-files: (optional). List of files to be transferred. Each item is a dict with:
2699 dest: (mandatory) string with the destination absolute path
2700 encoding: (optional, by default text). Can be one of:
2701 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2702 content : (mandatory) string with the content of the file
2703 permissions: (optional) string with file permissions, typically octal notation '0644'
2704 owner: (optional) file owner, string with the format 'owner:group'
2705 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2706 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2707 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2708 size: (mandatory) string with the size of the disk in GB
2709 vim_id: (optional) should use this existing volume id
2710 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2711 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2712 availability_zone_index is None
2713 #TODO ip, security groups
2714
2715 Returns:
2716 A tuple with the instance identifier and created_items or raises an exception on error
2717 created_items can be None or a dictionary where this method can include key-values that will be passed to
2718 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2719 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2720 as not present.
2721
2722 """
2723 self.logger.debug(
2724 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2725 image_id,
2726 flavor_id,
2727 str(net_list),
2728 )
2729
2730 try:
2731 server = None
2732 created_items = {}
2733 net_list_vim = []
2734 # list of external networks to be connected to instance, later on used to create floating_ip
2735 external_network = []
2736 # List of ports with port-security disabled
2737 no_secured_ports = []
2738 block_device_mapping = {}
2739 existing_vim_volumes = []
2740 server_group_id = None
2741 scheduller_hints = {}
2742
2743 # Check the Openstack Connection
2744 self._reload_connection()
2745
2746 # Prepare network list
2747 self._prepare_network_for_vminstance(
2748 name=name,
2749 net_list=net_list,
2750 created_items=created_items,
2751 net_list_vim=net_list_vim,
2752 external_network=external_network,
2753 no_secured_ports=no_secured_ports,
2754 )
2755
2756 # Cloud config
2757 config_drive, userdata = self._create_user_data(cloud_config)
2758
2759 # Get availability Zone
2760 vm_av_zone = self._get_vm_availability_zone(
2761 availability_zone_index, availability_zone_list
2762 )
2763
2764 if disk_list:
2765 # Prepare disks
2766 self._prepare_disk_for_vminstance(
2767 name=name,
2768 existing_vim_volumes=existing_vim_volumes,
2769 created_items=created_items,
2770 vm_av_zone=vm_av_zone,
2771 block_device_mapping=block_device_mapping,
2772 disk_list=disk_list,
2773 )
2774
2775 if affinity_group_list:
2776 # Only first id on the list will be used. Openstack restriction
2777 server_group_id = affinity_group_list[0]["affinity_group_id"]
2778 scheduller_hints["group"] = server_group_id
2779
2780 self.logger.debug(
2781 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2782 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2783 "block_device_mapping={}, server_group={})".format(
2784 name,
2785 image_id,
2786 flavor_id,
2787 net_list_vim,
2788 self.config.get("security_groups"),
2789 vm_av_zone,
2790 self.config.get("keypair"),
2791 userdata,
2792 config_drive,
2793 block_device_mapping,
2794 server_group_id,
2795 )
2796 )
2797 # Create VM
2798 server = self.nova.servers.create(
2799 name=name,
2800 image=image_id,
2801 flavor=flavor_id,
2802 nics=net_list_vim,
2803 security_groups=self.config.get("security_groups"),
2804 # TODO remove security_groups in future versions. Already at neutron port
2805 availability_zone=vm_av_zone,
2806 key_name=self.config.get("keypair"),
2807 userdata=userdata,
2808 config_drive=config_drive,
2809 block_device_mapping=block_device_mapping,
2810 scheduler_hints=scheduller_hints,
2811 )
2812
2813 vm_start_time = time.time()
2814
2815 self._update_port_security_for_vminstance(no_secured_ports, server)
2816
2817 self._prepare_external_network_for_vminstance(
2818 external_network=external_network,
2819 server=server,
2820 created_items=created_items,
2821 vm_start_time=vm_start_time,
2822 )
2823
2824 return server.id, created_items
2825
2826 except Exception as e:
2827 server_id = None
2828 if server:
2829 server_id = server.id
2830
2831 try:
2832 created_items = self.remove_keep_tag_from_persistent_volumes(
2833 created_items
2834 )
2835
2836 self.delete_vminstance(server_id, created_items)
2837
2838 except Exception as e2:
2839 self.logger.error("new_vminstance rollback fail {}".format(e2))
2840
2841 self._format_exception(e)
2842
2843 @staticmethod
2844 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2845 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2846
2847 Args:
2848 created_items (dict): All created items belongs to VM
2849
2850 Returns:
2851 updated_created_items (dict): Dict which does not include keep flag for volumes.
2852
2853 """
2854 return {
2855 key.replace(":keep", ""): value for (key, value) in created_items.items()
2856 }
2857
2858 def get_vminstance(self, vm_id):
2859 """Returns the VM instance information from VIM"""
2860 # self.logger.debug("Getting VM from VIM")
2861 try:
2862 self._reload_connection()
2863 server = self.nova.servers.find(id=vm_id)
2864 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2865
2866 return server.to_dict()
2867 except (
2868 ksExceptions.ClientException,
2869 nvExceptions.ClientException,
2870 nvExceptions.NotFound,
2871 ConnectionError,
2872 ) as e:
2873 self._format_exception(e)
2874
2875 def get_vminstance_console(self, vm_id, console_type="vnc"):
2876 """
2877 Get a console for the virtual machine
2878 Params:
2879 vm_id: uuid of the VM
2880 console_type, can be:
2881 "novnc" (by default), "xvpvnc" for VNC types,
2882 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2883 Returns dict with the console parameters:
2884 protocol: ssh, ftp, http, https, ...
2885 server: usually ip address
2886 port: the http, ssh, ... port
2887 suffix: extra text, e.g. the http path and query string
2888 """
2889 self.logger.debug("Getting VM CONSOLE from VIM")
2890
2891 try:
2892 self._reload_connection()
2893 server = self.nova.servers.find(id=vm_id)
2894
2895 if console_type is None or console_type == "novnc":
2896 console_dict = server.get_vnc_console("novnc")
2897 elif console_type == "xvpvnc":
2898 console_dict = server.get_vnc_console(console_type)
2899 elif console_type == "rdp-html5":
2900 console_dict = server.get_rdp_console(console_type)
2901 elif console_type == "spice-html5":
2902 console_dict = server.get_spice_console(console_type)
2903 else:
2904 raise vimconn.VimConnException(
2905 "console type '{}' not allowed".format(console_type),
2906 http_code=vimconn.HTTP_Bad_Request,
2907 )
2908
2909 console_dict1 = console_dict.get("console")
2910
2911 if console_dict1:
2912 console_url = console_dict1.get("url")
2913
2914 if console_url:
2915 # parse console_url
2916 protocol_index = console_url.find("//")
2917 suffix_index = (
2918 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2919 )
2920 port_index = (
2921 console_url[protocol_index + 2 : suffix_index].find(":")
2922 + protocol_index
2923 + 2
2924 )
2925
2926 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2927 return (
2928 -vimconn.HTTP_Internal_Server_Error,
2929 "Unexpected response from VIM",
2930 )
2931
2932 console_dict = {
2933 "protocol": console_url[0:protocol_index],
2934 "server": console_url[protocol_index + 2 : port_index],
2935 "port": console_url[port_index:suffix_index],
2936 "suffix": console_url[suffix_index + 1 :],
2937 }
2938 protocol_index += 2
2939
2940 return console_dict
2941 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2942 except (
2943 nvExceptions.NotFound,
2944 ksExceptions.ClientException,
2945 nvExceptions.ClientException,
2946 nvExceptions.BadRequest,
2947 ConnectionError,
2948 ) as e:
2949 self._format_exception(e)
2950
2951 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2952 """Neutron delete ports by id.
2953 Args:
2954 k_id (str): Port id in the VIM
2955 """
2956 try:
2957 port_dict = self.neutron.list_ports()
2958 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2959
2960 if k_id in existing_ports:
2961 self.neutron.delete_port(k_id)
2962
2963 except Exception as e:
2964 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2965
2966 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
2967 """Cinder delete volume by id.
2968 Args:
2969 shared_volume_vim_id (str): ID of shared volume in VIM
2970 """
2971 try:
2972 if self.cinder.volumes.get(shared_volume_vim_id).status != "available":
2973 return True
2974
2975 else:
2976 self.cinder.volumes.delete(shared_volume_vim_id)
2977
2978 except Exception as e:
2979 self.logger.error(
2980 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2981 )
2982
2983 def _delete_volumes_by_id_wth_cinder(
2984 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2985 ) -> bool:
2986 """Cinder delete volume by id.
2987 Args:
2988 k (str): Full item name in created_items
2989 k_id (str): ID of floating ip in VIM
2990 volumes_to_hold (list): Volumes not to delete
2991 created_items (dict): All created items belongs to VM
2992 """
2993 try:
2994 if k_id in volumes_to_hold:
2995 return
2996
2997 if self.cinder.volumes.get(k_id).status != "available":
2998 return True
2999
3000 else:
3001 self.cinder.volumes.delete(k_id)
3002 created_items[k] = None
3003
3004 except Exception as e:
3005 self.logger.error(
3006 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3007 )
3008
3009 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3010 """Neutron delete floating ip by id.
3011 Args:
3012 k (str): Full item name in created_items
3013 k_id (str): ID of floating ip in VIM
3014 created_items (dict): All created items belongs to VM
3015 """
3016 try:
3017 self.neutron.delete_floatingip(k_id)
3018 created_items[k] = None
3019
3020 except Exception as e:
3021 self.logger.error(
3022 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3023 )
3024
3025 @staticmethod
3026 def _get_item_name_id(k: str) -> Tuple[str, str]:
3027 k_item, _, k_id = k.partition(":")
3028 return k_item, k_id
3029
3030 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3031 """Delete VM ports attached to the networks before deleting virtual machine.
3032 Args:
3033 created_items (dict): All created items belongs to VM
3034 """
3035
3036 for k, v in created_items.items():
3037 if not v: # skip already deleted
3038 continue
3039
3040 try:
3041 k_item, k_id = self._get_item_name_id(k)
3042 if k_item == "port":
3043 self._delete_ports_by_id_wth_neutron(k_id)
3044
3045 except Exception as e:
3046 self.logger.error(
3047 "Error deleting port: {}: {}".format(type(e).__name__, e)
3048 )
3049
3050 def _delete_created_items(
3051 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3052 ) -> bool:
3053 """Delete Volumes and floating ip if they exist in created_items."""
3054 for k, v in created_items.items():
3055 if not v: # skip already deleted
3056 continue
3057
3058 try:
3059 k_item, k_id = self._get_item_name_id(k)
3060 if k_item == "volume":
3061 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3062 k, k_id, volumes_to_hold, created_items
3063 )
3064
3065 if unavailable_vol:
3066 keep_waiting = True
3067
3068 elif k_item == "floating_ip":
3069 self._delete_floating_ip_by_id(k, k_id, created_items)
3070
3071 except Exception as e:
3072 self.logger.error("Error deleting {}: {}".format(k, e))
3073
3074 return keep_waiting
3075
3076 @staticmethod
3077 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3078 """Remove the volumes which has key flag from created_items
3079
3080 Args:
3081 created_items (dict): All created items belongs to VM
3082
3083 Returns:
3084 created_items (dict): Persistent volumes eliminated created_items
3085 """
3086 return {
3087 key: value
3088 for (key, value) in created_items.items()
3089 if len(key.split(":")) == 2
3090 }
3091
3092 def delete_vminstance(
3093 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3094 ) -> None:
3095 """Removes a VM instance from VIM. Returns the old identifier.
3096 Args:
3097 vm_id (str): Identifier of VM instance
3098 created_items (dict): All created items belongs to VM
3099 volumes_to_hold (list): Volumes_to_hold
3100 """
3101 if created_items is None:
3102 created_items = {}
3103 if volumes_to_hold is None:
3104 volumes_to_hold = []
3105
3106 try:
3107 created_items = self._extract_items_wth_keep_flag_from_created_items(
3108 created_items
3109 )
3110
3111 self._reload_connection()
3112
3113 # Delete VM ports attached to the networks before the virtual machine
3114 if created_items:
3115 self._delete_vm_ports_attached_to_network(created_items)
3116
3117 if vm_id:
3118 self.nova.servers.delete(vm_id)
3119
3120 # Although having detached, volumes should have in active status before deleting.
3121 # We ensure in this loop
3122 keep_waiting = True
3123 elapsed_time = 0
3124
3125 while keep_waiting and elapsed_time < volume_timeout:
3126 keep_waiting = False
3127
3128 # Delete volumes and floating IP.
3129 keep_waiting = self._delete_created_items(
3130 created_items, volumes_to_hold, keep_waiting
3131 )
3132
3133 if keep_waiting:
3134 time.sleep(1)
3135 elapsed_time += 1
3136
3137 except (
3138 nvExceptions.NotFound,
3139 ksExceptions.ClientException,
3140 nvExceptions.ClientException,
3141 ConnectionError,
3142 ) as e:
3143 self._format_exception(e)
3144
3145 def refresh_vms_status(self, vm_list):
3146 """Get the status of the virtual machines and their interfaces/ports
3147 Params: the list of VM identifiers
3148 Returns a dictionary with:
3149 vm_id: #VIM id of this Virtual Machine
3150 status: #Mandatory. Text with one of:
3151 # DELETED (not found at vim)
3152 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3153 # OTHER (Vim reported other status not understood)
3154 # ERROR (VIM indicates an ERROR status)
3155 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3156 # CREATING (on building process), ERROR
3157 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3158 #
3159 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3160 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3161 interfaces:
3162 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3163 mac_address: #Text format XX:XX:XX:XX:XX:XX
3164 vim_net_id: #network id where this interface is connected
3165 vim_interface_id: #interface/port VIM id
3166 ip_address: #null, or text with IPv4, IPv6 address
3167 compute_node: #identification of compute node where PF,VF interface is allocated
3168 pci: #PCI address of the NIC that hosts the PF,VF
3169 vlan: #physical VLAN used for VF
3170 """
3171 vm_dict = {}
3172 self.logger.debug(
3173 "refresh_vms status: Getting tenant VM instance information from VIM"
3174 )
3175
3176 for vm_id in vm_list:
3177 vm = {}
3178
3179 try:
3180 vm_vim = self.get_vminstance(vm_id)
3181
3182 if vm_vim["status"] in vmStatus2manoFormat:
3183 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3184 else:
3185 vm["status"] = "OTHER"
3186 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3187
3188 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3189 vm_vim.pop("user_data", None)
3190 vm["vim_info"] = self.serialize(vm_vim)
3191
3192 vm["interfaces"] = []
3193 if vm_vim.get("fault"):
3194 vm["error_msg"] = str(vm_vim["fault"])
3195
3196 # get interfaces
3197 try:
3198 self._reload_connection()
3199 port_dict = self.neutron.list_ports(device_id=vm_id)
3200
3201 for port in port_dict["ports"]:
3202 interface = {}
3203 interface["vim_info"] = self.serialize(port)
3204 interface["mac_address"] = port.get("mac_address")
3205 interface["vim_net_id"] = port["network_id"]
3206 interface["vim_interface_id"] = port["id"]
3207 # check if OS-EXT-SRV-ATTR:host is there,
3208 # in case of non-admin credentials, it will be missing
3209
3210 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3211 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3212
3213 interface["pci"] = None
3214
3215 # check if binding:profile is there,
3216 # in case of non-admin credentials, it will be missing
3217 if port.get("binding:profile"):
3218 if port["binding:profile"].get("pci_slot"):
3219 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3220 # the slot to 0x00
3221 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3222 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3223 pci = port["binding:profile"]["pci_slot"]
3224 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3225 interface["pci"] = pci
3226
3227 interface["vlan"] = None
3228
3229 if port.get("binding:vif_details"):
3230 interface["vlan"] = port["binding:vif_details"].get("vlan")
3231
3232 # Get vlan from network in case not present in port for those old openstacks and cases where
3233 # it is needed vlan at PT
3234 if not interface["vlan"]:
3235 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3236 network = self.neutron.show_network(port["network_id"])
3237
3238 if (
3239 network["network"].get("provider:network_type")
3240 == "vlan"
3241 ):
3242 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3243 interface["vlan"] = network["network"].get(
3244 "provider:segmentation_id"
3245 )
3246
3247 ips = []
3248 # look for floating ip address
3249 try:
3250 floating_ip_dict = self.neutron.list_floatingips(
3251 port_id=port["id"]
3252 )
3253
3254 if floating_ip_dict.get("floatingips"):
3255 ips.append(
3256 floating_ip_dict["floatingips"][0].get(
3257 "floating_ip_address"
3258 )
3259 )
3260 except Exception:
3261 pass
3262
3263 for subnet in port["fixed_ips"]:
3264 ips.append(subnet["ip_address"])
3265
3266 interface["ip_address"] = ";".join(ips)
3267 vm["interfaces"].append(interface)
3268 except Exception as e:
3269 self.logger.error(
3270 "Error getting vm interface information {}: {}".format(
3271 type(e).__name__, e
3272 ),
3273 exc_info=True,
3274 )
3275 except vimconn.VimConnNotFoundException as e:
3276 self.logger.error("Exception getting vm status: %s", str(e))
3277 vm["status"] = "DELETED"
3278 vm["error_msg"] = str(e)
3279 except vimconn.VimConnException as e:
3280 self.logger.error("Exception getting vm status: %s", str(e))
3281 vm["status"] = "VIM_ERROR"
3282 vm["error_msg"] = str(e)
3283
3284 vm_dict[vm_id] = vm
3285
3286 return vm_dict
3287
3288 def action_vminstance(self, vm_id, action_dict, created_items={}):
3289 """Send and action over a VM instance from VIM
3290 Returns None or the console dict if the action was successfully sent to the VIM
3291 """
3292 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3293
3294 try:
3295 self._reload_connection()
3296 server = self.nova.servers.find(id=vm_id)
3297
3298 if "start" in action_dict:
3299 if action_dict["start"] == "rebuild":
3300 server.rebuild()
3301 else:
3302 if server.status == "PAUSED":
3303 server.unpause()
3304 elif server.status == "SUSPENDED":
3305 server.resume()
3306 elif server.status == "SHUTOFF":
3307 server.start()
3308 else:
3309 self.logger.debug(
3310 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3311 )
3312 raise vimconn.VimConnException(
3313 "Cannot 'start' instance while it is in active state",
3314 http_code=vimconn.HTTP_Bad_Request,
3315 )
3316
3317 elif "pause" in action_dict:
3318 server.pause()
3319 elif "resume" in action_dict:
3320 server.resume()
3321 elif "shutoff" in action_dict or "shutdown" in action_dict:
3322 self.logger.debug("server status %s", server.status)
3323 if server.status == "ACTIVE":
3324 server.stop()
3325 else:
3326 self.logger.debug("ERROR: VM is not in Active state")
3327 raise vimconn.VimConnException(
3328 "VM is not in active state, stop operation is not allowed",
3329 http_code=vimconn.HTTP_Bad_Request,
3330 )
3331 elif "forceOff" in action_dict:
3332 server.stop() # TODO
3333 elif "terminate" in action_dict:
3334 server.delete()
3335 elif "createImage" in action_dict:
3336 server.create_image()
3337 # "path":path_schema,
3338 # "description":description_schema,
3339 # "name":name_schema,
3340 # "metadata":metadata_schema,
3341 # "imageRef": id_schema,
3342 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3343 elif "rebuild" in action_dict:
3344 server.rebuild(server.image["id"])
3345 elif "reboot" in action_dict:
3346 server.reboot() # reboot_type="SOFT"
3347 elif "console" in action_dict:
3348 console_type = action_dict["console"]
3349
3350 if console_type is None or console_type == "novnc":
3351 console_dict = server.get_vnc_console("novnc")
3352 elif console_type == "xvpvnc":
3353 console_dict = server.get_vnc_console(console_type)
3354 elif console_type == "rdp-html5":
3355 console_dict = server.get_rdp_console(console_type)
3356 elif console_type == "spice-html5":
3357 console_dict = server.get_spice_console(console_type)
3358 else:
3359 raise vimconn.VimConnException(
3360 "console type '{}' not allowed".format(console_type),
3361 http_code=vimconn.HTTP_Bad_Request,
3362 )
3363
3364 try:
3365 console_url = console_dict["console"]["url"]
3366 # parse console_url
3367 protocol_index = console_url.find("//")
3368 suffix_index = (
3369 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3370 )
3371 port_index = (
3372 console_url[protocol_index + 2 : suffix_index].find(":")
3373 + protocol_index
3374 + 2
3375 )
3376
3377 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3378 raise vimconn.VimConnException(
3379 "Unexpected response from VIM " + str(console_dict)
3380 )
3381
3382 console_dict2 = {
3383 "protocol": console_url[0:protocol_index],
3384 "server": console_url[protocol_index + 2 : port_index],
3385 "port": int(console_url[port_index + 1 : suffix_index]),
3386 "suffix": console_url[suffix_index + 1 :],
3387 }
3388
3389 return console_dict2
3390 except Exception:
3391 raise vimconn.VimConnException(
3392 "Unexpected response from VIM " + str(console_dict)
3393 )
3394
3395 return None
3396 except (
3397 ksExceptions.ClientException,
3398 nvExceptions.ClientException,
3399 nvExceptions.NotFound,
3400 ConnectionError,
3401 ) as e:
3402 self._format_exception(e)
3403 # TODO insert exception vimconn.HTTP_Unauthorized
3404
3405 # ###### VIO Specific Changes #########
3406 def _generate_vlanID(self):
3407 """
3408 Method to get unused vlanID
3409 Args:
3410 None
3411 Returns:
3412 vlanID
3413 """
3414 # Get used VLAN IDs
3415 usedVlanIDs = []
3416 networks = self.get_network_list()
3417
3418 for net in networks:
3419 if net.get("provider:segmentation_id"):
3420 usedVlanIDs.append(net.get("provider:segmentation_id"))
3421
3422 used_vlanIDs = set(usedVlanIDs)
3423
3424 # find unused VLAN ID
3425 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3426 try:
3427 start_vlanid, end_vlanid = map(
3428 int, vlanID_range.replace(" ", "").split("-")
3429 )
3430
3431 for vlanID in range(start_vlanid, end_vlanid + 1):
3432 if vlanID not in used_vlanIDs:
3433 return vlanID
3434 except Exception as exp:
3435 raise vimconn.VimConnException(
3436 "Exception {} occurred while generating VLAN ID.".format(exp)
3437 )
3438 else:
3439 raise vimconn.VimConnConflictException(
3440 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3441 self.config.get("dataplane_net_vlan_range")
3442 )
3443 )
3444
3445 def _generate_multisegment_vlanID(self):
3446 """
3447 Method to get unused vlanID
3448 Args:
3449 None
3450 Returns:
3451 vlanID
3452 """
3453 # Get used VLAN IDs
3454 usedVlanIDs = []
3455 networks = self.get_network_list()
3456 for net in networks:
3457 if net.get("provider:network_type") == "vlan" and net.get(
3458 "provider:segmentation_id"
3459 ):
3460 usedVlanIDs.append(net.get("provider:segmentation_id"))
3461 elif net.get("segments"):
3462 for segment in net.get("segments"):
3463 if segment.get("provider:network_type") == "vlan" and segment.get(
3464 "provider:segmentation_id"
3465 ):
3466 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3467
3468 used_vlanIDs = set(usedVlanIDs)
3469
3470 # find unused VLAN ID
3471 for vlanID_range in self.config.get("multisegment_vlan_range"):
3472 try:
3473 start_vlanid, end_vlanid = map(
3474 int, vlanID_range.replace(" ", "").split("-")
3475 )
3476
3477 for vlanID in range(start_vlanid, end_vlanid + 1):
3478 if vlanID not in used_vlanIDs:
3479 return vlanID
3480 except Exception as exp:
3481 raise vimconn.VimConnException(
3482 "Exception {} occurred while generating VLAN ID.".format(exp)
3483 )
3484 else:
3485 raise vimconn.VimConnConflictException(
3486 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3487 self.config.get("multisegment_vlan_range")
3488 )
3489 )
3490
3491 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3492 """
3493 Method to validate user given vlanID ranges
3494 Args: None
3495 Returns: None
3496 """
3497 for vlanID_range in input_vlan_range:
3498 vlan_range = vlanID_range.replace(" ", "")
3499 # validate format
3500 vlanID_pattern = r"(\d)*-(\d)*$"
3501 match_obj = re.match(vlanID_pattern, vlan_range)
3502 if not match_obj:
3503 raise vimconn.VimConnConflictException(
3504 "Invalid VLAN range for {}: {}.You must provide "
3505 "'{}' in format [start_ID - end_ID].".format(
3506 text_vlan_range, vlanID_range, text_vlan_range
3507 )
3508 )
3509
3510 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3511 if start_vlanid <= 0:
3512 raise vimconn.VimConnConflictException(
3513 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3514 "networks valid IDs are 1 to 4094 ".format(
3515 text_vlan_range, vlanID_range
3516 )
3517 )
3518
3519 if end_vlanid > 4094:
3520 raise vimconn.VimConnConflictException(
3521 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3522 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3523 text_vlan_range, vlanID_range
3524 )
3525 )
3526
3527 if start_vlanid > end_vlanid:
3528 raise vimconn.VimConnConflictException(
3529 "Invalid VLAN range for {}: {}. You must provide '{}'"
3530 " in format start_ID - end_ID and start_ID < end_ID ".format(
3531 text_vlan_range, vlanID_range, text_vlan_range
3532 )
3533 )
3534
3535 def get_hosts_info(self):
3536 """Get the information of deployed hosts
3537 Returns the hosts content"""
3538 if self.debug:
3539 print("osconnector: Getting Host info from VIM")
3540
3541 try:
3542 h_list = []
3543 self._reload_connection()
3544 hypervisors = self.nova.hypervisors.list()
3545
3546 for hype in hypervisors:
3547 h_list.append(hype.to_dict())
3548
3549 return 1, {"hosts": h_list}
3550 except nvExceptions.NotFound as e:
3551 error_value = -vimconn.HTTP_Not_Found
3552 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3553 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3554 error_value = -vimconn.HTTP_Bad_Request
3555 error_text = (
3556 type(e).__name__
3557 + ": "
3558 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3559 )
3560
3561 # TODO insert exception vimconn.HTTP_Unauthorized
3562 # if reaching here is because an exception
3563 self.logger.debug("get_hosts_info " + error_text)
3564
3565 return error_value, error_text
3566
3567 def get_hosts(self, vim_tenant):
3568 """Get the hosts and deployed instances
3569 Returns the hosts content"""
3570 r, hype_dict = self.get_hosts_info()
3571
3572 if r < 0:
3573 return r, hype_dict
3574
3575 hypervisors = hype_dict["hosts"]
3576
3577 try:
3578 servers = self.nova.servers.list()
3579 for hype in hypervisors:
3580 for server in servers:
3581 if (
3582 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3583 == hype["hypervisor_hostname"]
3584 ):
3585 if "vm" in hype:
3586 hype["vm"].append(server.id)
3587 else:
3588 hype["vm"] = [server.id]
3589
3590 return 1, hype_dict
3591 except nvExceptions.NotFound as e:
3592 error_value = -vimconn.HTTP_Not_Found
3593 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3594 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3595 error_value = -vimconn.HTTP_Bad_Request
3596 error_text = (
3597 type(e).__name__
3598 + ": "
3599 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3600 )
3601
3602 # TODO insert exception vimconn.HTTP_Unauthorized
3603 # if reaching here is because an exception
3604 self.logger.debug("get_hosts " + error_text)
3605
3606 return error_value, error_text
3607
3608 def new_affinity_group(self, affinity_group_data):
3609 """Adds a server group to VIM
3610 affinity_group_data contains a dictionary with information, keys:
3611 name: name in VIM for the server group
3612 type: affinity or anti-affinity
3613 scope: Only nfvi-node allowed
3614 Returns the server group identifier"""
3615 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3616
3617 try:
3618 name = affinity_group_data["name"]
3619 policy = affinity_group_data["type"]
3620
3621 self._reload_connection()
3622 new_server_group = self.nova.server_groups.create(name, policy)
3623
3624 return new_server_group.id
3625 except (
3626 ksExceptions.ClientException,
3627 nvExceptions.ClientException,
3628 ConnectionError,
3629 KeyError,
3630 ) as e:
3631 self._format_exception(e)
3632
3633 def get_affinity_group(self, affinity_group_id):
3634 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3635 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3636 try:
3637 self._reload_connection()
3638 server_group = self.nova.server_groups.find(id=affinity_group_id)
3639
3640 return server_group.to_dict()
3641 except (
3642 nvExceptions.NotFound,
3643 nvExceptions.ClientException,
3644 ksExceptions.ClientException,
3645 ConnectionError,
3646 ) as e:
3647 self._format_exception(e)
3648
3649 def delete_affinity_group(self, affinity_group_id):
3650 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3651 self.logger.debug("Getting server group '%s'", affinity_group_id)
3652 try:
3653 self._reload_connection()
3654 self.nova.server_groups.delete(affinity_group_id)
3655
3656 return affinity_group_id
3657 except (
3658 nvExceptions.NotFound,
3659 ksExceptions.ClientException,
3660 nvExceptions.ClientException,
3661 ConnectionError,
3662 ) as e:
3663 self._format_exception(e)
3664
3665 def get_vdu_state(self, vm_id):
3666 """
3667 Getting the state of a vdu
3668 param:
3669 vm_id: ID of an instance
3670 """
3671 self.logger.debug("Getting the status of VM")
3672 self.logger.debug("VIM VM ID %s", vm_id)
3673 self._reload_connection()
3674 server = self.nova.servers.find(id=vm_id)
3675 server_dict = server.to_dict()
3676 vdu_data = [
3677 server_dict["status"],
3678 server_dict["flavor"]["id"],
3679 server_dict["OS-EXT-SRV-ATTR:host"],
3680 server_dict["OS-EXT-AZ:availability_zone"],
3681 ]
3682 self.logger.debug("vdu_data %s", vdu_data)
3683 return vdu_data
3684
3685 def check_compute_availability(self, host, server_flavor_details):
3686 self._reload_connection()
3687 hypervisor_search = self.nova.hypervisors.search(
3688 hypervisor_match=host, servers=True
3689 )
3690 for hypervisor in hypervisor_search:
3691 hypervisor_id = hypervisor.to_dict()["id"]
3692 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3693 hypervisor_dict = hypervisor_details.to_dict()
3694 hypervisor_temp = json.dumps(hypervisor_dict)
3695 hypervisor_json = json.loads(hypervisor_temp)
3696 resources_available = [
3697 hypervisor_json["free_ram_mb"],
3698 hypervisor_json["disk_available_least"],
3699 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3700 ]
3701 compute_available = all(
3702 x > y for x, y in zip(resources_available, server_flavor_details)
3703 )
3704 if compute_available:
3705 return host
3706
3707 def check_availability_zone(
3708 self, old_az, server_flavor_details, old_host, host=None
3709 ):
3710 self._reload_connection()
3711 az_check = {"zone_check": False, "compute_availability": None}
3712 aggregates_list = self.nova.aggregates.list()
3713 for aggregate in aggregates_list:
3714 aggregate_details = aggregate.to_dict()
3715 aggregate_temp = json.dumps(aggregate_details)
3716 aggregate_json = json.loads(aggregate_temp)
3717 if aggregate_json["availability_zone"] == old_az:
3718 hosts_list = aggregate_json["hosts"]
3719 if host is not None:
3720 if host in hosts_list:
3721 az_check["zone_check"] = True
3722 available_compute_id = self.check_compute_availability(
3723 host, server_flavor_details
3724 )
3725 if available_compute_id is not None:
3726 az_check["compute_availability"] = available_compute_id
3727 else:
3728 for check_host in hosts_list:
3729 if check_host != old_host:
3730 available_compute_id = self.check_compute_availability(
3731 check_host, server_flavor_details
3732 )
3733 if available_compute_id is not None:
3734 az_check["zone_check"] = True
3735 az_check["compute_availability"] = available_compute_id
3736 break
3737 else:
3738 az_check["zone_check"] = True
3739 return az_check
3740
3741 def migrate_instance(self, vm_id, compute_host=None):
3742 """
3743 Migrate a vdu
3744 param:
3745 vm_id: ID of an instance
3746 compute_host: Host to migrate the vdu to
3747 """
3748 self._reload_connection()
3749 vm_state = False
3750 instance_state = self.get_vdu_state(vm_id)
3751 server_flavor_id = instance_state[1]
3752 server_hypervisor_name = instance_state[2]
3753 server_availability_zone = instance_state[3]
3754 try:
3755 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3756 server_flavor_details = [
3757 server_flavor["ram"],
3758 server_flavor["disk"],
3759 server_flavor["vcpus"],
3760 ]
3761 if compute_host == server_hypervisor_name:
3762 raise vimconn.VimConnException(
3763 "Unable to migrate instance '{}' to the same host '{}'".format(
3764 vm_id, compute_host
3765 ),
3766 http_code=vimconn.HTTP_Bad_Request,
3767 )
3768 az_status = self.check_availability_zone(
3769 server_availability_zone,
3770 server_flavor_details,
3771 server_hypervisor_name,
3772 compute_host,
3773 )
3774 availability_zone_check = az_status["zone_check"]
3775 available_compute_id = az_status.get("compute_availability")
3776
3777 if availability_zone_check is False:
3778 raise vimconn.VimConnException(
3779 "Unable to migrate instance '{}' to a different availability zone".format(
3780 vm_id
3781 ),
3782 http_code=vimconn.HTTP_Bad_Request,
3783 )
3784 if available_compute_id is not None:
3785 self.nova.servers.live_migrate(
3786 server=vm_id,
3787 host=available_compute_id,
3788 block_migration=True,
3789 disk_over_commit=False,
3790 )
3791 state = "MIGRATING"
3792 changed_compute_host = ""
3793 if state == "MIGRATING":
3794 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3795 changed_compute_host = self.get_vdu_state(vm_id)[2]
3796 if vm_state and changed_compute_host == available_compute_id:
3797 self.logger.debug(
3798 "Instance '{}' migrated to the new compute host '{}'".format(
3799 vm_id, changed_compute_host
3800 )
3801 )
3802 return state, available_compute_id
3803 else:
3804 raise vimconn.VimConnException(
3805 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3806 vm_id, available_compute_id
3807 ),
3808 http_code=vimconn.HTTP_Bad_Request,
3809 )
3810 else:
3811 raise vimconn.VimConnException(
3812 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3813 available_compute_id
3814 ),
3815 http_code=vimconn.HTTP_Bad_Request,
3816 )
3817 except (
3818 nvExceptions.BadRequest,
3819 nvExceptions.ClientException,
3820 nvExceptions.NotFound,
3821 ) as e:
3822 self._format_exception(e)
3823
3824 def resize_instance(self, vm_id, new_flavor_id):
3825 """
3826 For resizing the vm based on the given
3827 flavor details
3828 param:
3829 vm_id : ID of an instance
3830 new_flavor_id : Flavor id to be resized
3831 Return the status of a resized instance
3832 """
3833 self._reload_connection()
3834 self.logger.debug("resize the flavor of an instance")
3835 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3836 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3837 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3838 try:
3839 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3840 if old_flavor_disk > new_flavor_disk:
3841 raise nvExceptions.BadRequest(
3842 400,
3843 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3844 )
3845 else:
3846 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3847 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3848 if vm_state:
3849 instance_resized_status = self.confirm_resize(vm_id)
3850 return instance_resized_status
3851 else:
3852 raise nvExceptions.BadRequest(
3853 409,
3854 message="Cannot 'resize' vm_state is in ERROR",
3855 )
3856
3857 else:
3858 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3859 raise nvExceptions.BadRequest(
3860 409,
3861 message="Cannot 'resize' instance while it is in vm_state resized",
3862 )
3863 except (
3864 nvExceptions.BadRequest,
3865 nvExceptions.ClientException,
3866 nvExceptions.NotFound,
3867 ) as e:
3868 self._format_exception(e)
3869
3870 def confirm_resize(self, vm_id):
3871 """
3872 Confirm the resize of an instance
3873 param:
3874 vm_id: ID of an instance
3875 """
3876 self._reload_connection()
3877 self.nova.servers.confirm_resize(server=vm_id)
3878 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3879 self.__wait_for_vm(vm_id, "ACTIVE")
3880 instance_status = self.get_vdu_state(vm_id)[0]
3881 return instance_status
3882
3883 def get_monitoring_data(self):
3884 try:
3885 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3886 self._reload_connection()
3887 all_servers = self.nova.servers.list(detailed=True)
3888 all_ports = self.neutron.list_ports()
3889 return all_servers, all_ports
3890 except (
3891 vimconn.VimConnException,
3892 vimconn.VimConnNotFoundException,
3893 vimconn.VimConnConnectionException,
3894 ) as e:
3895 raise vimconn.VimConnException(
3896 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3897 )