e4ee15b8ceeab43ddef08428f8081b90a0a83fac
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 raise vimconn.VimConnException(
571 type(exception).__name__ + ": " + message_error
572 )
573 elif isinstance(
574 exception,
575 (
576 nvExceptions.ClientException,
577 ksExceptions.ClientException,
578 neExceptions.NeutronException,
579 ),
580 ):
581 raise vimconn.VimConnUnexpectedResponse(
582 type(exception).__name__ + ": " + message_error
583 )
584 elif isinstance(exception, nvExceptions.Conflict):
585 raise vimconn.VimConnConflictException(
586 type(exception).__name__ + ": " + message_error
587 )
588 elif isinstance(exception, vimconn.VimConnException):
589 raise exception
590 else: # ()
591 self.logger.error("General Exception " + message_error, exc_info=True)
592
593 raise vimconn.VimConnConnectionException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 def _get_ids_from_name(self):
598 """
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600 :return: None
601 """
602 # get tenant_id if only tenant_name is supplied
603 self._reload_connection()
604
605 if not self.my_tenant_id:
606 raise vimconn.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self.tenant_name, self.tenant_id
609 )
610 )
611
612 if self.config.get("security_groups") and not self.security_groups_id:
613 # convert from name to id
614 neutron_sg_list = self.neutron.list_security_groups(
615 tenant_id=self.my_tenant_id
616 )["security_groups"]
617
618 self.security_groups_id = []
619 for sg in self.config.get("security_groups"):
620 for neutron_sg in neutron_sg_list:
621 if sg in (neutron_sg["id"], neutron_sg["name"]):
622 self.security_groups_id.append(neutron_sg["id"])
623 break
624 else:
625 self.security_groups_id = None
626
627 raise vimconn.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg)
629 )
630
631 def check_vim_connectivity(self):
632 # just get network list to check connectivity and credentials
633 self.get_network_list(filter_dict={})
634
635 def get_tenant_list(self, filter_dict={}):
636 """Obtain tenants of VIM
637 filter_dict can contain the following keys:
638 name: filter by tenant name
639 id: filter by tenant uuid/id
640 <other VIM specific>
641 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
642 """
643 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
644
645 try:
646 self._reload_connection()
647
648 if self.api_version3:
649 project_class_list = self.keystone.projects.list(
650 name=filter_dict.get("name")
651 )
652 else:
653 project_class_list = self.keystone.tenants.findall(**filter_dict)
654
655 project_list = []
656
657 for project in project_class_list:
658 if filter_dict.get("id") and filter_dict["id"] != project.id:
659 continue
660
661 project_list.append(project.to_dict())
662
663 return project_list
664 except (
665 ksExceptions.ConnectionError,
666 ksExceptions.ClientException,
667 ConnectionError,
668 ) as e:
669 self._format_exception(e)
670
671 def new_tenant(self, tenant_name, tenant_description):
672 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
673 self.logger.debug("Adding a new tenant name: %s", tenant_name)
674
675 try:
676 self._reload_connection()
677
678 if self.api_version3:
679 project = self.keystone.projects.create(
680 tenant_name,
681 self.config.get("project_domain_id", "default"),
682 description=tenant_description,
683 is_domain=False,
684 )
685 else:
686 project = self.keystone.tenants.create(tenant_name, tenant_description)
687
688 return project.id
689 except (
690 ksExceptions.ConnectionError,
691 ksExceptions.ClientException,
692 ksExceptions.BadRequest,
693 ConnectionError,
694 ) as e:
695 self._format_exception(e)
696
697 def delete_tenant(self, tenant_id):
698 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
699 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
700
701 try:
702 self._reload_connection()
703
704 if self.api_version3:
705 self.keystone.projects.delete(tenant_id)
706 else:
707 self.keystone.tenants.delete(tenant_id)
708
709 return tenant_id
710 except (
711 ksExceptions.ConnectionError,
712 ksExceptions.ClientException,
713 ksExceptions.NotFound,
714 ConnectionError,
715 ) as e:
716 self._format_exception(e)
717
718 def new_network(
719 self,
720 net_name,
721 net_type,
722 ip_profile=None,
723 shared=False,
724 provider_network_profile=None,
725 ):
726 """Adds a tenant network to VIM
727 Params:
728 'net_name': name of the network
729 'net_type': one of:
730 'bridge': overlay isolated network
731 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
732 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
733 'ip_profile': is a dict containing the IP parameters of the network
734 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
735 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
736 'gateway_address': (Optional) ip_schema, that is X.X.X.X
737 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
738 'dhcp_enabled': True or False
739 'dhcp_start_address': ip_schema, first IP to grant
740 'dhcp_count': number of IPs to grant.
741 'shared': if this network can be seen/use by other tenants/organization
742 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
743 physical-network: physnet-label}
744 Returns a tuple with the network identifier and created_items, or raises an exception on error
745 created_items can be None or a dictionary where this method can include key-values that will be passed to
746 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
747 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
748 as not present.
749 """
750 self.logger.debug(
751 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
752 )
753 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
754
755 try:
756 vlan = None
757
758 if provider_network_profile:
759 vlan = provider_network_profile.get("segmentation-id")
760
761 new_net = None
762 created_items = {}
763 self._reload_connection()
764 network_dict = {"name": net_name, "admin_state_up": True}
765
766 if net_type in ("data", "ptp") or provider_network_profile:
767 provider_physical_network = None
768
769 if provider_network_profile and provider_network_profile.get(
770 "physical-network"
771 ):
772 provider_physical_network = provider_network_profile.get(
773 "physical-network"
774 )
775
776 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
777 # or not declared, just ignore the checking
778 if (
779 isinstance(
780 self.config.get("dataplane_physical_net"), (tuple, list)
781 )
782 and provider_physical_network
783 not in self.config["dataplane_physical_net"]
784 ):
785 raise vimconn.VimConnConflictException(
786 "Invalid parameter 'provider-network:physical-network' "
787 "for network creation. '{}' is not one of the declared "
788 "list at VIM_config:dataplane_physical_net".format(
789 provider_physical_network
790 )
791 )
792
793 # use the default dataplane_physical_net
794 if not provider_physical_network:
795 provider_physical_network = self.config.get(
796 "dataplane_physical_net"
797 )
798
799 # if it is non empty list, use the first value. If it is a string use the value directly
800 if (
801 isinstance(provider_physical_network, (tuple, list))
802 and provider_physical_network
803 ):
804 provider_physical_network = provider_physical_network[0]
805
806 if not provider_physical_network:
807 raise vimconn.VimConnConflictException(
808 "missing information needed for underlay networks. Provide "
809 "'dataplane_physical_net' configuration at VIM or use the NS "
810 "instantiation parameter 'provider-network.physical-network'"
811 " for the VLD"
812 )
813
814 if not self.config.get("multisegment_support"):
815 network_dict[
816 "provider:physical_network"
817 ] = provider_physical_network
818
819 if (
820 provider_network_profile
821 and "network-type" in provider_network_profile
822 ):
823 network_dict[
824 "provider:network_type"
825 ] = provider_network_profile["network-type"]
826 else:
827 network_dict["provider:network_type"] = self.config.get(
828 "dataplane_network_type", "vlan"
829 )
830
831 if vlan:
832 network_dict["provider:segmentation_id"] = vlan
833 else:
834 # Multi-segment case
835 segment_list = []
836 segment1_dict = {
837 "provider:physical_network": "",
838 "provider:network_type": "vxlan",
839 }
840 segment_list.append(segment1_dict)
841 segment2_dict = {
842 "provider:physical_network": provider_physical_network,
843 "provider:network_type": "vlan",
844 }
845
846 if vlan:
847 segment2_dict["provider:segmentation_id"] = vlan
848 elif self.config.get("multisegment_vlan_range"):
849 vlanID = self._generate_multisegment_vlanID()
850 segment2_dict["provider:segmentation_id"] = vlanID
851
852 # else
853 # raise vimconn.VimConnConflictException(
854 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
855 # network")
856 segment_list.append(segment2_dict)
857 network_dict["segments"] = segment_list
858
859 # VIO Specific Changes. It needs a concrete VLAN
860 if self.vim_type == "VIO" and vlan is None:
861 if self.config.get("dataplane_net_vlan_range") is None:
862 raise vimconn.VimConnConflictException(
863 "You must provide 'dataplane_net_vlan_range' in format "
864 "[start_ID - end_ID] at VIM_config for creating underlay "
865 "networks"
866 )
867
868 network_dict["provider:segmentation_id"] = self._generate_vlanID()
869
870 network_dict["shared"] = shared
871
872 if self.config.get("disable_network_port_security"):
873 network_dict["port_security_enabled"] = False
874
875 if self.config.get("neutron_availability_zone_hints"):
876 hints = self.config.get("neutron_availability_zone_hints")
877
878 if isinstance(hints, str):
879 hints = [hints]
880
881 network_dict["availability_zone_hints"] = hints
882
883 new_net = self.neutron.create_network({"network": network_dict})
884 # print new_net
885 # create subnetwork, even if there is no profile
886
887 if not ip_profile:
888 ip_profile = {}
889
890 if not ip_profile.get("subnet_address"):
891 # Fake subnet is required
892 subnet_rand = random.randint(0, 255)
893 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
894
895 if "ip_version" not in ip_profile:
896 ip_profile["ip_version"] = "IPv4"
897
898 subnet = {
899 "name": net_name + "-subnet",
900 "network_id": new_net["network"]["id"],
901 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
902 "cidr": ip_profile["subnet_address"],
903 }
904
905 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
906 if ip_profile.get("gateway_address"):
907 subnet["gateway_ip"] = ip_profile["gateway_address"]
908 else:
909 subnet["gateway_ip"] = None
910
911 if ip_profile.get("dns_address"):
912 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
913
914 if "dhcp_enabled" in ip_profile:
915 subnet["enable_dhcp"] = (
916 False
917 if ip_profile["dhcp_enabled"] == "false"
918 or ip_profile["dhcp_enabled"] is False
919 else True
920 )
921
922 if ip_profile.get("dhcp_start_address"):
923 subnet["allocation_pools"] = []
924 subnet["allocation_pools"].append(dict())
925 subnet["allocation_pools"][0]["start"] = ip_profile[
926 "dhcp_start_address"
927 ]
928
929 if ip_profile.get("dhcp_count"):
930 # parts = ip_profile["dhcp_start_address"].split(".")
931 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
932 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
933 ip_int += ip_profile["dhcp_count"] - 1
934 ip_str = str(netaddr.IPAddress(ip_int))
935 subnet["allocation_pools"][0]["end"] = ip_str
936
937 if (
938 ip_profile.get("ipv6_address_mode")
939 and ip_profile["ip_version"] != "IPv4"
940 ):
941 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
942 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
943 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
944 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
945
946 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
947 self.neutron.create_subnet({"subnet": subnet})
948
949 if net_type == "data" and self.config.get("multisegment_support"):
950 if self.config.get("l2gw_support"):
951 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
952 for l2gw in l2gw_list:
953 l2gw_conn = {
954 "l2_gateway_id": l2gw["id"],
955 "network_id": new_net["network"]["id"],
956 "segmentation_id": str(vlanID),
957 }
958 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
959 {"l2_gateway_connection": l2gw_conn}
960 )
961 created_items[
962 "l2gwconn:"
963 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
964 ] = True
965
966 return new_net["network"]["id"], created_items
967 except Exception as e:
968 # delete l2gw connections (if any) before deleting the network
969 for k, v in created_items.items():
970 if not v: # skip already deleted
971 continue
972
973 try:
974 k_item, _, k_id = k.partition(":")
975
976 if k_item == "l2gwconn":
977 self.neutron.delete_l2_gateway_connection(k_id)
978 except Exception as e2:
979 self.logger.error(
980 "Error deleting l2 gateway connection: {}: {}".format(
981 type(e2).__name__, e2
982 )
983 )
984
985 if new_net:
986 self.neutron.delete_network(new_net["network"]["id"])
987
988 self._format_exception(e)
989
990 def get_network_list(self, filter_dict={}):
991 """Obtain tenant networks of VIM
992 Filter_dict can be:
993 name: network name
994 id: network uuid
995 shared: boolean
996 tenant_id: tenant
997 admin_state_up: boolean
998 status: 'ACTIVE'
999 Returns the network list of dictionaries
1000 """
1001 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1002
1003 try:
1004 self._reload_connection()
1005 filter_dict_os = filter_dict.copy()
1006
1007 if self.api_version3 and "tenant_id" in filter_dict_os:
1008 # TODO check
1009 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1010
1011 net_dict = self.neutron.list_networks(**filter_dict_os)
1012 net_list = net_dict["networks"]
1013 self.__net_os2mano(net_list)
1014
1015 return net_list
1016 except (
1017 neExceptions.ConnectionFailed,
1018 ksExceptions.ClientException,
1019 neExceptions.NeutronException,
1020 ConnectionError,
1021 ) as e:
1022 self._format_exception(e)
1023
1024 def get_network(self, net_id):
1025 """Obtain details of network from VIM
1026 Returns the network information from a network id"""
1027 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1028 filter_dict = {"id": net_id}
1029 net_list = self.get_network_list(filter_dict)
1030
1031 if len(net_list) == 0:
1032 raise vimconn.VimConnNotFoundException(
1033 "Network '{}' not found".format(net_id)
1034 )
1035 elif len(net_list) > 1:
1036 raise vimconn.VimConnConflictException(
1037 "Found more than one network with this criteria"
1038 )
1039
1040 net = net_list[0]
1041 subnets = []
1042 for subnet_id in net.get("subnets", ()):
1043 try:
1044 subnet = self.neutron.show_subnet(subnet_id)
1045 except Exception as e:
1046 self.logger.error(
1047 "osconnector.get_network(): Error getting subnet %s %s"
1048 % (net_id, str(e))
1049 )
1050 subnet = {"id": subnet_id, "fault": str(e)}
1051
1052 subnets.append(subnet)
1053
1054 net["subnets"] = subnets
1055 net["encapsulation"] = net.get("provider:network_type")
1056 net["encapsulation_type"] = net.get("provider:network_type")
1057 net["segmentation_id"] = net.get("provider:segmentation_id")
1058 net["encapsulation_id"] = net.get("provider:segmentation_id")
1059
1060 return net
1061
1062 def delete_network(self, net_id, created_items=None):
1063 """
1064 Removes a tenant network from VIM and its associated elements
1065 :param net_id: VIM identifier of the network, provided by method new_network
1066 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1067 Returns the network identifier or raises an exception upon error or when network is not found
1068 """
1069 self.logger.debug("Deleting network '%s' from VIM", net_id)
1070
1071 if created_items is None:
1072 created_items = {}
1073
1074 try:
1075 self._reload_connection()
1076 # delete l2gw connections (if any) before deleting the network
1077 for k, v in created_items.items():
1078 if not v: # skip already deleted
1079 continue
1080
1081 try:
1082 k_item, _, k_id = k.partition(":")
1083 if k_item == "l2gwconn":
1084 self.neutron.delete_l2_gateway_connection(k_id)
1085 except Exception as e:
1086 self.logger.error(
1087 "Error deleting l2 gateway connection: {}: {}".format(
1088 type(e).__name__, e
1089 )
1090 )
1091
1092 # delete VM ports attached to this networks before the network
1093 ports = self.neutron.list_ports(network_id=net_id)
1094 for p in ports["ports"]:
1095 try:
1096 self.neutron.delete_port(p["id"])
1097 except Exception as e:
1098 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1099
1100 self.neutron.delete_network(net_id)
1101
1102 return net_id
1103 except (
1104 neExceptions.ConnectionFailed,
1105 neExceptions.NetworkNotFoundClient,
1106 neExceptions.NeutronException,
1107 ksExceptions.ClientException,
1108 neExceptions.NeutronException,
1109 ConnectionError,
1110 ) as e:
1111 self._format_exception(e)
1112
1113 def refresh_nets_status(self, net_list):
1114 """Get the status of the networks
1115 Params: the list of network identifiers
1116 Returns a dictionary with:
1117 net_id: #VIM id of this network
1118 status: #Mandatory. Text with one of:
1119 # DELETED (not found at vim)
1120 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1121 # OTHER (Vim reported other status not understood)
1122 # ERROR (VIM indicates an ERROR status)
1123 # ACTIVE, INACTIVE, DOWN (admin down),
1124 # BUILD (on building process)
1125 #
1126 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1127 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1128 """
1129 net_dict = {}
1130
1131 for net_id in net_list:
1132 net = {}
1133
1134 try:
1135 net_vim = self.get_network(net_id)
1136
1137 if net_vim["status"] in netStatus2manoFormat:
1138 net["status"] = netStatus2manoFormat[net_vim["status"]]
1139 else:
1140 net["status"] = "OTHER"
1141 net["error_msg"] = "VIM status reported " + net_vim["status"]
1142
1143 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1144 net["status"] = "DOWN"
1145
1146 net["vim_info"] = self.serialize(net_vim)
1147
1148 if net_vim.get("fault"): # TODO
1149 net["error_msg"] = str(net_vim["fault"])
1150 except vimconn.VimConnNotFoundException as e:
1151 self.logger.error("Exception getting net status: %s", str(e))
1152 net["status"] = "DELETED"
1153 net["error_msg"] = str(e)
1154 except vimconn.VimConnException as e:
1155 self.logger.error("Exception getting net status: %s", str(e))
1156 net["status"] = "VIM_ERROR"
1157 net["error_msg"] = str(e)
1158 net_dict[net_id] = net
1159 return net_dict
1160
1161 def get_flavor(self, flavor_id):
1162 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1163 self.logger.debug("Getting flavor '%s'", flavor_id)
1164
1165 try:
1166 self._reload_connection()
1167 flavor = self.nova.flavors.find(id=flavor_id)
1168 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1169
1170 return flavor.to_dict()
1171 except (
1172 nvExceptions.NotFound,
1173 nvExceptions.ClientException,
1174 ksExceptions.ClientException,
1175 ConnectionError,
1176 ) as e:
1177 self._format_exception(e)
1178
1179 def get_flavor_id_from_data(self, flavor_dict):
1180 """Obtain flavor id that match the flavor description
1181 Returns the flavor_id or raises a vimconnNotFoundException
1182 flavor_dict: contains the required ram, vcpus, disk
1183 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1184 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1185 vimconnNotFoundException is raised
1186 """
1187 exact_match = False if self.config.get("use_existing_flavors") else True
1188
1189 try:
1190 self._reload_connection()
1191 flavor_candidate_id = None
1192 flavor_candidate_data = (10000, 10000, 10000)
1193 flavor_target = (
1194 flavor_dict["ram"],
1195 flavor_dict["vcpus"],
1196 flavor_dict["disk"],
1197 flavor_dict.get("ephemeral", 0),
1198 flavor_dict.get("swap", 0),
1199 )
1200 # numa=None
1201 extended = flavor_dict.get("extended", {})
1202 if extended:
1203 # TODO
1204 raise vimconn.VimConnNotFoundException(
1205 "Flavor with EPA still not implemented"
1206 )
1207 # if len(numas) > 1:
1208 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1209 # numa=numas[0]
1210 # numas = extended.get("numas")
1211 for flavor in self.nova.flavors.list():
1212 epa = flavor.get_keys()
1213
1214 if epa:
1215 continue
1216 # TODO
1217
1218 flavor_data = (
1219 flavor.ram,
1220 flavor.vcpus,
1221 flavor.disk,
1222 flavor.ephemeral,
1223 flavor.swap if isinstance(flavor.swap, int) else 0,
1224 )
1225 if flavor_data == flavor_target:
1226 return flavor.id
1227 elif (
1228 not exact_match
1229 and flavor_target < flavor_data < flavor_candidate_data
1230 ):
1231 flavor_candidate_id = flavor.id
1232 flavor_candidate_data = flavor_data
1233
1234 if not exact_match and flavor_candidate_id:
1235 return flavor_candidate_id
1236
1237 raise vimconn.VimConnNotFoundException(
1238 "Cannot find any flavor matching '{}'".format(flavor_dict)
1239 )
1240 except (
1241 nvExceptions.NotFound,
1242 nvExceptions.ClientException,
1243 ksExceptions.ClientException,
1244 ConnectionError,
1245 ) as e:
1246 self._format_exception(e)
1247
1248 @staticmethod
1249 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1250 """Process resource quota and fill up extra_specs.
1251 Args:
1252 quota (dict): Keeping the quota of resurces
1253 prefix (str) Prefix
1254 extra_specs (dict) Dict to be filled to be used during flavor creation
1255
1256 """
1257 if "limit" in quota:
1258 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1259
1260 if "reserve" in quota:
1261 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1262
1263 if "shares" in quota:
1264 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1265 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1266
1267 @staticmethod
1268 def process_numa_memory(
1269 numa: dict, node_id: Optional[int], extra_specs: dict
1270 ) -> None:
1271 """Set the memory in extra_specs.
1272 Args:
1273 numa (dict): A dictionary which includes numa information
1274 node_id (int): ID of numa node
1275 extra_specs (dict): To be filled.
1276
1277 """
1278 if not numa.get("memory"):
1279 return
1280 memory_mb = numa["memory"] * 1024
1281 memory = "hw:numa_mem.{}".format(node_id)
1282 extra_specs[memory] = int(memory_mb)
1283
1284 @staticmethod
1285 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1286 """Set the cpu in extra_specs.
1287 Args:
1288 numa (dict): A dictionary which includes numa information
1289 node_id (int): ID of numa node
1290 extra_specs (dict): To be filled.
1291
1292 """
1293 if not numa.get("vcpu"):
1294 return
1295 vcpu = numa["vcpu"]
1296 cpu = "hw:numa_cpus.{}".format(node_id)
1297 vcpu = ",".join(map(str, vcpu))
1298 extra_specs[cpu] = vcpu
1299
1300 @staticmethod
1301 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1302 """Fill up extra_specs if numa has paired-threads.
1303 Args:
1304 numa (dict): A dictionary which includes numa information
1305 extra_specs (dict): To be filled.
1306
1307 Returns:
1308 threads (int) Number of virtual cpus
1309
1310 """
1311 if not numa.get("paired-threads"):
1312 return
1313
1314 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1315 threads = numa["paired-threads"] * 2
1316 extra_specs["hw:cpu_thread_policy"] = "require"
1317 extra_specs["hw:cpu_policy"] = "dedicated"
1318 return threads
1319
1320 @staticmethod
1321 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1322 """Fill up extra_specs if numa has cores.
1323 Args:
1324 numa (dict): A dictionary which includes numa information
1325 extra_specs (dict): To be filled.
1326
1327 Returns:
1328 cores (int) Number of virtual cpus
1329
1330 """
1331 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1332 # architecture, or a non-SMT architecture will be emulated
1333 if not numa.get("cores"):
1334 return
1335 cores = numa["cores"]
1336 extra_specs["hw:cpu_thread_policy"] = "isolate"
1337 extra_specs["hw:cpu_policy"] = "dedicated"
1338 return cores
1339
1340 @staticmethod
1341 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1342 """Fill up extra_specs if numa has threads.
1343 Args:
1344 numa (dict): A dictionary which includes numa information
1345 extra_specs (dict): To be filled.
1346
1347 Returns:
1348 threads (int) Number of virtual cpus
1349
1350 """
1351 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1352 if not numa.get("threads"):
1353 return
1354 threads = numa["threads"]
1355 extra_specs["hw:cpu_thread_policy"] = "prefer"
1356 extra_specs["hw:cpu_policy"] = "dedicated"
1357 return threads
1358
1359 def _process_numa_parameters_of_flavor(
1360 self, numas: List, extra_specs: Dict
1361 ) -> None:
1362 """Process numa parameters and fill up extra_specs.
1363
1364 Args:
1365 numas (list): List of dictionary which includes numa information
1366 extra_specs (dict): To be filled.
1367
1368 """
1369 numa_nodes = len(numas)
1370 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1371 cpu_cores, cpu_threads = 0, 0
1372
1373 if self.vim_type == "VIO":
1374 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1375
1376 for numa in numas:
1377 if "id" in numa:
1378 node_id = numa["id"]
1379 # overwrite ram and vcpus
1380 # check if key "memory" is present in numa else use ram value at flavor
1381 self.process_numa_memory(numa, node_id, extra_specs)
1382 self.process_numa_vcpu(numa, node_id, extra_specs)
1383
1384 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1385 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1386
1387 if "paired-threads" in numa:
1388 threads = self.process_numa_paired_threads(numa, extra_specs)
1389 cpu_threads += threads
1390
1391 elif "cores" in numa:
1392 cores = self.process_numa_cores(numa, extra_specs)
1393 cpu_cores += cores
1394
1395 elif "threads" in numa:
1396 threads = self.process_numa_threads(numa, extra_specs)
1397 cpu_threads += threads
1398
1399 if cpu_cores:
1400 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1401 if cpu_threads:
1402 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1403
1404 @staticmethod
1405 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1406 """According to number of numa nodes, updates the extra_specs for VIO.
1407
1408 Args:
1409
1410 numa_nodes (int): List keeps the numa node numbers
1411 extra_specs (dict): Extra specs dict to be updated
1412
1413 """
1414 # If there are several numas, we do not define specific affinity.
1415 extra_specs["vmware:latency_sensitivity_level"] = "high"
1416
1417 def _change_flavor_name(
1418 self, name: str, name_suffix: int, flavor_data: dict
1419 ) -> str:
1420 """Change the flavor name if the name already exists.
1421
1422 Args:
1423 name (str): Flavor name to be checked
1424 name_suffix (int): Suffix to be appended to name
1425 flavor_data (dict): Flavor dict
1426
1427 Returns:
1428 name (str): New flavor name to be used
1429
1430 """
1431 # Get used names
1432 fl = self.nova.flavors.list()
1433 fl_names = [f.name for f in fl]
1434
1435 while name in fl_names:
1436 name_suffix += 1
1437 name = flavor_data["name"] + "-" + str(name_suffix)
1438
1439 return name
1440
1441 def _process_extended_config_of_flavor(
1442 self, extended: dict, extra_specs: dict
1443 ) -> None:
1444 """Process the extended dict to fill up extra_specs.
1445 Args:
1446
1447 extended (dict): Keeping the extra specification of flavor
1448 extra_specs (dict) Dict to be filled to be used during flavor creation
1449
1450 """
1451 quotas = {
1452 "cpu-quota": "cpu",
1453 "mem-quota": "memory",
1454 "vif-quota": "vif",
1455 "disk-io-quota": "disk_io",
1456 }
1457
1458 page_sizes = {
1459 "LARGE": "large",
1460 "SMALL": "small",
1461 "SIZE_2MB": "2MB",
1462 "SIZE_1GB": "1GB",
1463 "PREFER_LARGE": "any",
1464 }
1465
1466 policies = {
1467 "cpu-pinning-policy": "hw:cpu_policy",
1468 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1469 "mem-policy": "hw:numa_mempolicy",
1470 }
1471
1472 numas = extended.get("numas")
1473 if numas:
1474 self._process_numa_parameters_of_flavor(numas, extra_specs)
1475
1476 for quota, item in quotas.items():
1477 if quota in extended.keys():
1478 self.process_resource_quota(extended.get(quota), item, extra_specs)
1479
1480 # Set the mempage size as specified in the descriptor
1481 if extended.get("mempage-size"):
1482 if extended["mempage-size"] in page_sizes.keys():
1483 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1484 else:
1485 # Normally, validations in NBI should not allow to this condition.
1486 self.logger.debug(
1487 "Invalid mempage-size %s. Will be ignored",
1488 extended.get("mempage-size"),
1489 )
1490
1491 for policy, hw_policy in policies.items():
1492 if extended.get(policy):
1493 extra_specs[hw_policy] = extended[policy].lower()
1494
1495 @staticmethod
1496 def _get_flavor_details(flavor_data: dict) -> Tuple:
1497 """Returns the details of flavor
1498 Args:
1499 flavor_data (dict): Dictionary that includes required flavor details
1500
1501 Returns:
1502 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1503
1504 """
1505 return (
1506 flavor_data.get("ram", 64),
1507 flavor_data.get("vcpus", 1),
1508 {},
1509 flavor_data.get("extended"),
1510 )
1511
1512 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1513 """Adds a tenant flavor to openstack VIM.
1514 if change_name_if_used is True, it will change name in case of conflict,
1515 because it is not supported name repetition.
1516
1517 Args:
1518 flavor_data (dict): Flavor details to be processed
1519 change_name_if_used (bool): Change name in case of conflict
1520
1521 Returns:
1522 flavor_id (str): flavor identifier
1523
1524 """
1525 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1526 retry = 0
1527 max_retries = 3
1528 name_suffix = 0
1529
1530 try:
1531 name = flavor_data["name"]
1532 while retry < max_retries:
1533 retry += 1
1534 try:
1535 self._reload_connection()
1536
1537 if change_name_if_used:
1538 name = self._change_flavor_name(name, name_suffix, flavor_data)
1539
1540 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1541 flavor_data
1542 )
1543 if extended:
1544 self._process_extended_config_of_flavor(extended, extra_specs)
1545
1546 # Create flavor
1547
1548 new_flavor = self.nova.flavors.create(
1549 name=name,
1550 ram=ram,
1551 vcpus=vcpus,
1552 disk=flavor_data.get("disk", 0),
1553 ephemeral=flavor_data.get("ephemeral", 0),
1554 swap=flavor_data.get("swap", 0),
1555 is_public=flavor_data.get("is_public", True),
1556 )
1557
1558 # Add metadata
1559 if extra_specs:
1560 new_flavor.set_keys(extra_specs)
1561
1562 return new_flavor.id
1563
1564 except nvExceptions.Conflict as e:
1565 if change_name_if_used and retry < max_retries:
1566 continue
1567
1568 self._format_exception(e)
1569
1570 except (
1571 ksExceptions.ClientException,
1572 nvExceptions.ClientException,
1573 ConnectionError,
1574 KeyError,
1575 ) as e:
1576 self._format_exception(e)
1577
1578 def delete_flavor(self, flavor_id):
1579 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1580 try:
1581 self._reload_connection()
1582 self.nova.flavors.delete(flavor_id)
1583
1584 return flavor_id
1585 # except nvExceptions.BadRequest as e:
1586 except (
1587 nvExceptions.NotFound,
1588 ksExceptions.ClientException,
1589 nvExceptions.ClientException,
1590 ConnectionError,
1591 ) as e:
1592 self._format_exception(e)
1593
1594 def new_image(self, image_dict):
1595 """
1596 Adds a tenant image to VIM. imge_dict is a dictionary with:
1597 name: name
1598 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1599 location: path or URI
1600 public: "yes" or "no"
1601 metadata: metadata of the image
1602 Returns the image_id
1603 """
1604 retry = 0
1605 max_retries = 3
1606
1607 while retry < max_retries:
1608 retry += 1
1609 try:
1610 self._reload_connection()
1611
1612 # determine format http://docs.openstack.org/developer/glance/formats.html
1613 if "disk_format" in image_dict:
1614 disk_format = image_dict["disk_format"]
1615 else: # autodiscover based on extension
1616 if image_dict["location"].endswith(".qcow2"):
1617 disk_format = "qcow2"
1618 elif image_dict["location"].endswith(".vhd"):
1619 disk_format = "vhd"
1620 elif image_dict["location"].endswith(".vmdk"):
1621 disk_format = "vmdk"
1622 elif image_dict["location"].endswith(".vdi"):
1623 disk_format = "vdi"
1624 elif image_dict["location"].endswith(".iso"):
1625 disk_format = "iso"
1626 elif image_dict["location"].endswith(".aki"):
1627 disk_format = "aki"
1628 elif image_dict["location"].endswith(".ari"):
1629 disk_format = "ari"
1630 elif image_dict["location"].endswith(".ami"):
1631 disk_format = "ami"
1632 else:
1633 disk_format = "raw"
1634
1635 self.logger.debug(
1636 "new_image: '%s' loading from '%s'",
1637 image_dict["name"],
1638 image_dict["location"],
1639 )
1640 if self.vim_type == "VIO":
1641 container_format = "bare"
1642 if "container_format" in image_dict:
1643 container_format = image_dict["container_format"]
1644
1645 new_image = self.glance.images.create(
1646 name=image_dict["name"],
1647 container_format=container_format,
1648 disk_format=disk_format,
1649 )
1650 else:
1651 new_image = self.glance.images.create(name=image_dict["name"])
1652
1653 if image_dict["location"].startswith("http"):
1654 # TODO there is not a method to direct download. It must be downloaded locally with requests
1655 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1656 else: # local path
1657 with open(image_dict["location"]) as fimage:
1658 self.glance.images.upload(new_image.id, fimage)
1659 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1660 # image_dict.get("public","yes")=="yes",
1661 # container_format="bare", data=fimage, disk_format=disk_format)
1662
1663 metadata_to_load = image_dict.get("metadata")
1664
1665 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1666 # for openstack
1667 if self.vim_type == "VIO":
1668 metadata_to_load["upload_location"] = image_dict["location"]
1669 else:
1670 metadata_to_load["location"] = image_dict["location"]
1671
1672 self.glance.images.update(new_image.id, **metadata_to_load)
1673
1674 return new_image.id
1675 except (
1676 nvExceptions.Conflict,
1677 ksExceptions.ClientException,
1678 nvExceptions.ClientException,
1679 ) as e:
1680 self._format_exception(e)
1681 except (
1682 HTTPException,
1683 gl1Exceptions.HTTPException,
1684 gl1Exceptions.CommunicationError,
1685 ConnectionError,
1686 ) as e:
1687 if retry == max_retries:
1688 continue
1689
1690 self._format_exception(e)
1691 except IOError as e: # can not open the file
1692 raise vimconn.VimConnConnectionException(
1693 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1694 http_code=vimconn.HTTP_Bad_Request,
1695 )
1696
1697 def delete_image(self, image_id):
1698 """Deletes a tenant image from openstack VIM. Returns the old id"""
1699 try:
1700 self._reload_connection()
1701 self.glance.images.delete(image_id)
1702
1703 return image_id
1704 except (
1705 nvExceptions.NotFound,
1706 ksExceptions.ClientException,
1707 nvExceptions.ClientException,
1708 gl1Exceptions.CommunicationError,
1709 gl1Exceptions.HTTPNotFound,
1710 ConnectionError,
1711 ) as e: # TODO remove
1712 self._format_exception(e)
1713
1714 def get_image_id_from_path(self, path):
1715 """Get the image id from image path in the VIM database. Returns the image_id"""
1716 try:
1717 self._reload_connection()
1718 images = self.glance.images.list()
1719
1720 for image in images:
1721 if image.metadata.get("location") == path:
1722 return image.id
1723
1724 raise vimconn.VimConnNotFoundException(
1725 "image with location '{}' not found".format(path)
1726 )
1727 except (
1728 ksExceptions.ClientException,
1729 nvExceptions.ClientException,
1730 gl1Exceptions.CommunicationError,
1731 ConnectionError,
1732 ) as e:
1733 self._format_exception(e)
1734
1735 def get_image_list(self, filter_dict={}):
1736 """Obtain tenant images from VIM
1737 Filter_dict can be:
1738 id: image id
1739 name: image name
1740 checksum: image checksum
1741 Returns the image list of dictionaries:
1742 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1743 List can be empty
1744 """
1745 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1746
1747 try:
1748 self._reload_connection()
1749 # filter_dict_os = filter_dict.copy()
1750 # First we filter by the available filter fields: name, id. The others are removed.
1751 image_list = self.glance.images.list()
1752 filtered_list = []
1753
1754 for image in image_list:
1755 try:
1756 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1757 continue
1758
1759 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1760 continue
1761
1762 if (
1763 filter_dict.get("checksum")
1764 and image["checksum"] != filter_dict["checksum"]
1765 ):
1766 continue
1767
1768 filtered_list.append(image.copy())
1769 except gl1Exceptions.HTTPNotFound:
1770 pass
1771
1772 return filtered_list
1773 except (
1774 ksExceptions.ClientException,
1775 nvExceptions.ClientException,
1776 gl1Exceptions.CommunicationError,
1777 ConnectionError,
1778 ) as e:
1779 self._format_exception(e)
1780
1781 def __wait_for_vm(self, vm_id, status):
1782 """wait until vm is in the desired status and return True.
1783 If the VM gets in ERROR status, return false.
1784 If the timeout is reached generate an exception"""
1785 elapsed_time = 0
1786 while elapsed_time < server_timeout:
1787 vm_status = self.nova.servers.get(vm_id).status
1788
1789 if vm_status == status:
1790 return True
1791
1792 if vm_status == "ERROR":
1793 return False
1794
1795 time.sleep(5)
1796 elapsed_time += 5
1797
1798 # if we exceeded the timeout rollback
1799 if elapsed_time >= server_timeout:
1800 raise vimconn.VimConnException(
1801 "Timeout waiting for instance " + vm_id + " to get " + status,
1802 http_code=vimconn.HTTP_Request_Timeout,
1803 )
1804
1805 def _get_openstack_availablity_zones(self):
1806 """
1807 Get from openstack availability zones available
1808 :return:
1809 """
1810 try:
1811 openstack_availability_zone = self.nova.availability_zones.list()
1812 openstack_availability_zone = [
1813 str(zone.zoneName)
1814 for zone in openstack_availability_zone
1815 if zone.zoneName != "internal"
1816 ]
1817
1818 return openstack_availability_zone
1819 except Exception:
1820 return None
1821
1822 def _set_availablity_zones(self):
1823 """
1824 Set vim availablity zone
1825 :return:
1826 """
1827 if "availability_zone" in self.config:
1828 vim_availability_zones = self.config.get("availability_zone")
1829
1830 if isinstance(vim_availability_zones, str):
1831 self.availability_zone = [vim_availability_zones]
1832 elif isinstance(vim_availability_zones, list):
1833 self.availability_zone = vim_availability_zones
1834 else:
1835 self.availability_zone = self._get_openstack_availablity_zones()
1836
1837 def _get_vm_availability_zone(
1838 self, availability_zone_index, availability_zone_list
1839 ):
1840 """
1841 Return thge availability zone to be used by the created VM.
1842 :return: The VIM availability zone to be used or None
1843 """
1844 if availability_zone_index is None:
1845 if not self.config.get("availability_zone"):
1846 return None
1847 elif isinstance(self.config.get("availability_zone"), str):
1848 return self.config["availability_zone"]
1849 else:
1850 # TODO consider using a different parameter at config for default AV and AV list match
1851 return self.config["availability_zone"][0]
1852
1853 vim_availability_zones = self.availability_zone
1854 # check if VIM offer enough availability zones describe in the VNFD
1855 if vim_availability_zones and len(availability_zone_list) <= len(
1856 vim_availability_zones
1857 ):
1858 # check if all the names of NFV AV match VIM AV names
1859 match_by_index = False
1860 for av in availability_zone_list:
1861 if av not in vim_availability_zones:
1862 match_by_index = True
1863 break
1864
1865 if match_by_index:
1866 return vim_availability_zones[availability_zone_index]
1867 else:
1868 return availability_zone_list[availability_zone_index]
1869 else:
1870 raise vimconn.VimConnConflictException(
1871 "No enough availability zones at VIM for this deployment"
1872 )
1873
1874 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1875 """Fill up the security_groups in the port_dict.
1876
1877 Args:
1878 net (dict): Network details
1879 port_dict (dict): Port details
1880
1881 """
1882 if (
1883 self.config.get("security_groups")
1884 and net.get("port_security") is not False
1885 and not self.config.get("no_port_security_extension")
1886 ):
1887 if not self.security_groups_id:
1888 self._get_ids_from_name()
1889
1890 port_dict["security_groups"] = self.security_groups_id
1891
1892 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1893 """Fill up the network binding depending on network type in the port_dict.
1894
1895 Args:
1896 net (dict): Network details
1897 port_dict (dict): Port details
1898
1899 """
1900 if not net.get("type"):
1901 raise vimconn.VimConnException("Type is missing in the network details.")
1902
1903 if net["type"] == "virtual":
1904 pass
1905
1906 # For VF
1907 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1908 port_dict["binding:vnic_type"] = "direct"
1909
1910 # VIO specific Changes
1911 if self.vim_type == "VIO":
1912 # Need to create port with port_security_enabled = False and no-security-groups
1913 port_dict["port_security_enabled"] = False
1914 port_dict["provider_security_groups"] = []
1915 port_dict["security_groups"] = []
1916
1917 else:
1918 # For PT PCI-PASSTHROUGH
1919 port_dict["binding:vnic_type"] = "direct-physical"
1920
1921 @staticmethod
1922 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1923 """Set the "ip" parameter in net dictionary.
1924
1925 Args:
1926 new_port (dict): New created port
1927 net (dict): Network details
1928
1929 """
1930 fixed_ips = new_port["port"].get("fixed_ips")
1931
1932 if fixed_ips:
1933 net["ip"] = fixed_ips[0].get("ip_address")
1934 else:
1935 net["ip"] = None
1936
1937 @staticmethod
1938 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1939 """Fill up the mac_address and fixed_ips in port_dict.
1940
1941 Args:
1942 net (dict): Network details
1943 port_dict (dict): Port details
1944
1945 """
1946 if net.get("mac_address"):
1947 port_dict["mac_address"] = net["mac_address"]
1948
1949 if net.get("ip_address"):
1950 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1951 # TODO add "subnet_id": <subnet_id>
1952
1953 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1954 """Create new port using neutron.
1955
1956 Args:
1957 port_dict (dict): Port details
1958 created_items (dict): All created items
1959 net (dict): Network details
1960
1961 Returns:
1962 new_port (dict): New created port
1963
1964 """
1965 new_port = self.neutron.create_port({"port": port_dict})
1966 created_items["port:" + str(new_port["port"]["id"])] = True
1967 net["mac_adress"] = new_port["port"]["mac_address"]
1968 net["vim_id"] = new_port["port"]["id"]
1969
1970 return new_port
1971
1972 def _create_port(
1973 self, net: dict, name: str, created_items: dict
1974 ) -> Tuple[dict, dict]:
1975 """Create port using net details.
1976
1977 Args:
1978 net (dict): Network details
1979 name (str): Name to be used as network name if net dict does not include name
1980 created_items (dict): All created items
1981
1982 Returns:
1983 new_port, port New created port, port dictionary
1984
1985 """
1986
1987 port_dict = {
1988 "network_id": net["net_id"],
1989 "name": net.get("name"),
1990 "admin_state_up": True,
1991 }
1992
1993 if not port_dict["name"]:
1994 port_dict["name"] = name
1995
1996 self._prepare_port_dict_security_groups(net, port_dict)
1997
1998 self._prepare_port_dict_binding(net, port_dict)
1999
2000 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2001
2002 new_port = self._create_new_port(port_dict, created_items, net)
2003
2004 vimconnector._set_fixed_ip(new_port, net)
2005
2006 port = {"port-id": new_port["port"]["id"]}
2007
2008 if float(self.nova.api_version.get_string()) >= 2.32:
2009 port["tag"] = new_port["port"]["name"]
2010
2011 return new_port, port
2012
2013 def _prepare_network_for_vminstance(
2014 self,
2015 name: str,
2016 net_list: list,
2017 created_items: dict,
2018 net_list_vim: list,
2019 external_network: list,
2020 no_secured_ports: list,
2021 ) -> None:
2022 """Create port and fill up net dictionary for new VM instance creation.
2023
2024 Args:
2025 name (str): Name of network
2026 net_list (list): List of networks
2027 created_items (dict): All created items belongs to a VM
2028 net_list_vim (list): List of ports
2029 external_network (list): List of external-networks
2030 no_secured_ports (list): Port security disabled ports
2031 """
2032
2033 self._reload_connection()
2034
2035 for net in net_list:
2036 # Skip non-connected iface
2037 if not net.get("net_id"):
2038 continue
2039
2040 new_port, port = self._create_port(net, name, created_items)
2041
2042 net_list_vim.append(port)
2043
2044 if net.get("floating_ip", False):
2045 net["exit_on_floating_ip_error"] = True
2046 external_network.append(net)
2047
2048 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2049 net["exit_on_floating_ip_error"] = False
2050 external_network.append(net)
2051 net["floating_ip"] = self.config.get("use_floating_ip")
2052
2053 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2054 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2055 if net.get("port_security") is False and not self.config.get(
2056 "no_port_security_extension"
2057 ):
2058 no_secured_ports.append(
2059 (
2060 new_port["port"]["id"],
2061 net.get("port_security_disable_strategy"),
2062 )
2063 )
2064
2065 def _prepare_persistent_root_volumes(
2066 self,
2067 name: str,
2068 vm_av_zone: list,
2069 disk: dict,
2070 base_disk_index: int,
2071 block_device_mapping: dict,
2072 existing_vim_volumes: list,
2073 created_items: dict,
2074 ) -> Optional[str]:
2075 """Prepare persistent root volumes for new VM instance.
2076
2077 Args:
2078 name (str): Name of VM instance
2079 vm_av_zone (list): List of availability zones
2080 disk (dict): Disk details
2081 base_disk_index (int): Disk index
2082 block_device_mapping (dict): Block device details
2083 existing_vim_volumes (list): Existing disk details
2084 created_items (dict): All created items belongs to VM
2085
2086 Returns:
2087 boot_volume_id (str): ID of boot volume
2088
2089 """
2090 # Disk may include only vim_volume_id or only vim_id."
2091 # Use existing persistent root volume finding with volume_id or vim_id
2092 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2093
2094 if disk.get(key_id):
2095 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2096 existing_vim_volumes.append({"id": disk[key_id]})
2097
2098 else:
2099 # Create persistent root volume
2100 volume = self.cinder.volumes.create(
2101 size=disk["size"],
2102 name=name + "vd" + chr(base_disk_index),
2103 imageRef=disk["image_id"],
2104 # Make sure volume is in the same AZ as the VM to be attached to
2105 availability_zone=vm_av_zone,
2106 )
2107 boot_volume_id = volume.id
2108 self.update_block_device_mapping(
2109 volume=volume,
2110 block_device_mapping=block_device_mapping,
2111 base_disk_index=base_disk_index,
2112 disk=disk,
2113 created_items=created_items,
2114 )
2115
2116 return boot_volume_id
2117
2118 @staticmethod
2119 def update_block_device_mapping(
2120 volume: object,
2121 block_device_mapping: dict,
2122 base_disk_index: int,
2123 disk: dict,
2124 created_items: dict,
2125 ) -> None:
2126 """Add volume information to block device mapping dict.
2127 Args:
2128 volume (object): Created volume object
2129 block_device_mapping (dict): Block device details
2130 base_disk_index (int): Disk index
2131 disk (dict): Disk details
2132 created_items (dict): All created items belongs to VM
2133 """
2134 if not volume:
2135 raise vimconn.VimConnException("Volume is empty.")
2136
2137 if not hasattr(volume, "id"):
2138 raise vimconn.VimConnException(
2139 "Created volume is not valid, does not have id attribute."
2140 )
2141
2142 volume_txt = "volume:" + str(volume.id)
2143 if disk.get("keep"):
2144 volume_txt += ":keep"
2145 created_items[volume_txt] = True
2146 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2147
2148 def _prepare_non_root_persistent_volumes(
2149 self,
2150 name: str,
2151 disk: dict,
2152 vm_av_zone: list,
2153 block_device_mapping: dict,
2154 base_disk_index: int,
2155 existing_vim_volumes: list,
2156 created_items: dict,
2157 ) -> None:
2158 """Prepare persistent volumes for new VM instance.
2159
2160 Args:
2161 name (str): Name of VM instance
2162 disk (dict): Disk details
2163 vm_av_zone (list): List of availability zones
2164 block_device_mapping (dict): Block device details
2165 base_disk_index (int): Disk index
2166 existing_vim_volumes (list): Existing disk details
2167 created_items (dict): All created items belongs to VM
2168 """
2169 # Non-root persistent volumes
2170 # Disk may include only vim_volume_id or only vim_id."
2171 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2172
2173 if disk.get(key_id):
2174 # Use existing persistent volume
2175 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2176 existing_vim_volumes.append({"id": disk[key_id]})
2177
2178 else:
2179 # Create persistent volume
2180 volume = self.cinder.volumes.create(
2181 size=disk["size"],
2182 name=name + "vd" + chr(base_disk_index),
2183 # Make sure volume is in the same AZ as the VM to be attached to
2184 availability_zone=vm_av_zone,
2185 )
2186 self.update_block_device_mapping(
2187 volume=volume,
2188 block_device_mapping=block_device_mapping,
2189 base_disk_index=base_disk_index,
2190 disk=disk,
2191 created_items=created_items,
2192 )
2193
2194 def _wait_for_created_volumes_availability(
2195 self, elapsed_time: int, created_items: dict
2196 ) -> Optional[int]:
2197 """Wait till created volumes become available.
2198
2199 Args:
2200 elapsed_time (int): Passed time while waiting
2201 created_items (dict): All created items belongs to VM
2202
2203 Returns:
2204 elapsed_time (int): Time spent while waiting
2205
2206 """
2207
2208 while elapsed_time < volume_timeout:
2209 for created_item in created_items:
2210 v, volume_id = (
2211 created_item.split(":")[0],
2212 created_item.split(":")[1],
2213 )
2214 if v == "volume":
2215 if self.cinder.volumes.get(volume_id).status != "available":
2216 break
2217 else:
2218 # All ready: break from while
2219 break
2220
2221 time.sleep(5)
2222 elapsed_time += 5
2223
2224 return elapsed_time
2225
2226 def _wait_for_existing_volumes_availability(
2227 self, elapsed_time: int, existing_vim_volumes: list
2228 ) -> Optional[int]:
2229 """Wait till existing volumes become available.
2230
2231 Args:
2232 elapsed_time (int): Passed time while waiting
2233 existing_vim_volumes (list): Existing volume details
2234
2235 Returns:
2236 elapsed_time (int): Time spent while waiting
2237
2238 """
2239
2240 while elapsed_time < volume_timeout:
2241 for volume in existing_vim_volumes:
2242 if self.cinder.volumes.get(volume["id"]).status != "available":
2243 break
2244 else: # all ready: break from while
2245 break
2246
2247 time.sleep(5)
2248 elapsed_time += 5
2249
2250 return elapsed_time
2251
2252 def _prepare_disk_for_vminstance(
2253 self,
2254 name: str,
2255 existing_vim_volumes: list,
2256 created_items: dict,
2257 vm_av_zone: list,
2258 block_device_mapping: dict,
2259 disk_list: list = None,
2260 ) -> None:
2261 """Prepare all volumes for new VM instance.
2262
2263 Args:
2264 name (str): Name of Instance
2265 existing_vim_volumes (list): List of existing volumes
2266 created_items (dict): All created items belongs to VM
2267 vm_av_zone (list): VM availability zone
2268 block_device_mapping (dict): Block devices to be attached to VM
2269 disk_list (list): List of disks
2270
2271 """
2272 # Create additional volumes in case these are present in disk_list
2273 base_disk_index = ord("b")
2274 boot_volume_id = None
2275 elapsed_time = 0
2276
2277 for disk in disk_list:
2278 if "image_id" in disk:
2279 # Root persistent volume
2280 base_disk_index = ord("a")
2281 boot_volume_id = self._prepare_persistent_root_volumes(
2282 name=name,
2283 vm_av_zone=vm_av_zone,
2284 disk=disk,
2285 base_disk_index=base_disk_index,
2286 block_device_mapping=block_device_mapping,
2287 existing_vim_volumes=existing_vim_volumes,
2288 created_items=created_items,
2289 )
2290 else:
2291 # Non-root persistent volume
2292 self._prepare_non_root_persistent_volumes(
2293 name=name,
2294 disk=disk,
2295 vm_av_zone=vm_av_zone,
2296 block_device_mapping=block_device_mapping,
2297 base_disk_index=base_disk_index,
2298 existing_vim_volumes=existing_vim_volumes,
2299 created_items=created_items,
2300 )
2301 base_disk_index += 1
2302
2303 # Wait until created volumes are with status available
2304 elapsed_time = self._wait_for_created_volumes_availability(
2305 elapsed_time, created_items
2306 )
2307 # Wait until existing volumes in vim are with status available
2308 elapsed_time = self._wait_for_existing_volumes_availability(
2309 elapsed_time, existing_vim_volumes
2310 )
2311 # If we exceeded the timeout rollback
2312 if elapsed_time >= volume_timeout:
2313 raise vimconn.VimConnException(
2314 "Timeout creating volumes for instance " + name,
2315 http_code=vimconn.HTTP_Request_Timeout,
2316 )
2317 if boot_volume_id:
2318 self.cinder.volumes.set_bootable(boot_volume_id, True)
2319
2320 def _find_the_external_network_for_floating_ip(self):
2321 """Get the external network ip in order to create floating IP.
2322
2323 Returns:
2324 pool_id (str): External network pool ID
2325
2326 """
2327
2328 # Find the external network
2329 external_nets = list()
2330
2331 for net in self.neutron.list_networks()["networks"]:
2332 if net["router:external"]:
2333 external_nets.append(net)
2334
2335 if len(external_nets) == 0:
2336 raise vimconn.VimConnException(
2337 "Cannot create floating_ip automatically since "
2338 "no external network is present",
2339 http_code=vimconn.HTTP_Conflict,
2340 )
2341
2342 if len(external_nets) > 1:
2343 raise vimconn.VimConnException(
2344 "Cannot create floating_ip automatically since "
2345 "multiple external networks are present",
2346 http_code=vimconn.HTTP_Conflict,
2347 )
2348
2349 # Pool ID
2350 return external_nets[0].get("id")
2351
2352 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2353 """Trigger neutron to create a new floating IP using external network ID.
2354
2355 Args:
2356 param (dict): Input parameters to create a floating IP
2357 created_items (dict): All created items belongs to new VM instance
2358
2359 Raises:
2360
2361 VimConnException
2362 """
2363 try:
2364 self.logger.debug("Creating floating IP")
2365 new_floating_ip = self.neutron.create_floatingip(param)
2366 free_floating_ip = new_floating_ip["floatingip"]["id"]
2367 created_items["floating_ip:" + str(free_floating_ip)] = True
2368
2369 except Exception as e:
2370 raise vimconn.VimConnException(
2371 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2372 http_code=vimconn.HTTP_Conflict,
2373 )
2374
2375 def _create_floating_ip(
2376 self, floating_network: dict, server: object, created_items: dict
2377 ) -> None:
2378 """Get the available Pool ID and create a new floating IP.
2379
2380 Args:
2381 floating_network (dict): Dict including external network ID
2382 server (object): Server object
2383 created_items (dict): All created items belongs to new VM instance
2384
2385 """
2386
2387 # Pool_id is available
2388 if (
2389 isinstance(floating_network["floating_ip"], str)
2390 and floating_network["floating_ip"].lower() != "true"
2391 ):
2392 pool_id = floating_network["floating_ip"]
2393
2394 # Find the Pool_id
2395 else:
2396 pool_id = self._find_the_external_network_for_floating_ip()
2397
2398 param = {
2399 "floatingip": {
2400 "floating_network_id": pool_id,
2401 "tenant_id": server.tenant_id,
2402 }
2403 }
2404
2405 self._neutron_create_float_ip(param, created_items)
2406
2407 def _find_floating_ip(
2408 self,
2409 server: object,
2410 floating_ips: list,
2411 floating_network: dict,
2412 ) -> Optional[str]:
2413 """Find the available free floating IPs if there are.
2414
2415 Args:
2416 server (object): Server object
2417 floating_ips (list): List of floating IPs
2418 floating_network (dict): Details of floating network such as ID
2419
2420 Returns:
2421 free_floating_ip (str): Free floating ip address
2422
2423 """
2424 for fip in floating_ips:
2425 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2426 continue
2427
2428 if isinstance(floating_network["floating_ip"], str):
2429 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2430 continue
2431
2432 return fip["id"]
2433
2434 def _assign_floating_ip(
2435 self, free_floating_ip: str, floating_network: dict
2436 ) -> Dict:
2437 """Assign the free floating ip address to port.
2438
2439 Args:
2440 free_floating_ip (str): Floating IP to be assigned
2441 floating_network (dict): ID of floating network
2442
2443 Returns:
2444 fip (dict) (dict): Floating ip details
2445
2446 """
2447 # The vim_id key contains the neutron.port_id
2448 self.neutron.update_floatingip(
2449 free_floating_ip,
2450 {"floatingip": {"port_id": floating_network["vim_id"]}},
2451 )
2452 # For race condition ensure not re-assigned to other VM after 5 seconds
2453 time.sleep(5)
2454
2455 return self.neutron.show_floatingip(free_floating_ip)
2456
2457 def _get_free_floating_ip(
2458 self, server: object, floating_network: dict
2459 ) -> Optional[str]:
2460 """Get the free floating IP address.
2461
2462 Args:
2463 server (object): Server Object
2464 floating_network (dict): Floating network details
2465
2466 Returns:
2467 free_floating_ip (str): Free floating ip addr
2468
2469 """
2470
2471 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2472
2473 # Randomize
2474 random.shuffle(floating_ips)
2475
2476 return self._find_floating_ip(server, floating_ips, floating_network)
2477
2478 def _prepare_external_network_for_vminstance(
2479 self,
2480 external_network: list,
2481 server: object,
2482 created_items: dict,
2483 vm_start_time: float,
2484 ) -> None:
2485 """Assign floating IP address for VM instance.
2486
2487 Args:
2488 external_network (list): ID of External network
2489 server (object): Server Object
2490 created_items (dict): All created items belongs to new VM instance
2491 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2492
2493 Raises:
2494 VimConnException
2495
2496 """
2497 for floating_network in external_network:
2498 try:
2499 assigned = False
2500 floating_ip_retries = 3
2501 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2502 # several times
2503 while not assigned:
2504 free_floating_ip = self._get_free_floating_ip(
2505 server, floating_network
2506 )
2507
2508 if not free_floating_ip:
2509 self._create_floating_ip(
2510 floating_network, server, created_items
2511 )
2512
2513 try:
2514 # For race condition ensure not already assigned
2515 fip = self.neutron.show_floatingip(free_floating_ip)
2516
2517 if fip["floatingip"].get("port_id"):
2518 continue
2519
2520 # Assign floating ip
2521 fip = self._assign_floating_ip(
2522 free_floating_ip, floating_network
2523 )
2524
2525 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2526 self.logger.warning(
2527 "floating_ip {} re-assigned to other port".format(
2528 free_floating_ip
2529 )
2530 )
2531 continue
2532
2533 self.logger.debug(
2534 "Assigned floating_ip {} to VM {}".format(
2535 free_floating_ip, server.id
2536 )
2537 )
2538
2539 assigned = True
2540
2541 except Exception as e:
2542 # Openstack need some time after VM creation to assign an IP. So retry if fails
2543 vm_status = self.nova.servers.get(server.id).status
2544
2545 if vm_status not in ("ACTIVE", "ERROR"):
2546 if time.time() - vm_start_time < server_timeout:
2547 time.sleep(5)
2548 continue
2549 elif floating_ip_retries > 0:
2550 floating_ip_retries -= 1
2551 continue
2552
2553 raise vimconn.VimConnException(
2554 "Cannot create floating_ip: {} {}".format(
2555 type(e).__name__, e
2556 ),
2557 http_code=vimconn.HTTP_Conflict,
2558 )
2559
2560 except Exception as e:
2561 if not floating_network["exit_on_floating_ip_error"]:
2562 self.logger.error("Cannot create floating_ip. %s", str(e))
2563 continue
2564
2565 raise
2566
2567 def _update_port_security_for_vminstance(
2568 self,
2569 no_secured_ports: list,
2570 server: object,
2571 ) -> None:
2572 """Updates the port security according to no_secured_ports list.
2573
2574 Args:
2575 no_secured_ports (list): List of ports that security will be disabled
2576 server (object): Server Object
2577
2578 Raises:
2579 VimConnException
2580
2581 """
2582 # Wait until the VM is active and then disable the port-security
2583 if no_secured_ports:
2584 self.__wait_for_vm(server.id, "ACTIVE")
2585
2586 for port in no_secured_ports:
2587 port_update = {
2588 "port": {"port_security_enabled": False, "security_groups": None}
2589 }
2590
2591 if port[1] == "allow-address-pairs":
2592 port_update = {
2593 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2594 }
2595
2596 try:
2597 self.neutron.update_port(port[0], port_update)
2598
2599 except Exception:
2600 raise vimconn.VimConnException(
2601 "It was not possible to disable port security for port {}".format(
2602 port[0]
2603 )
2604 )
2605
2606 def new_vminstance(
2607 self,
2608 name: str,
2609 description: str,
2610 start: bool,
2611 image_id: str,
2612 flavor_id: str,
2613 affinity_group_list: list,
2614 net_list: list,
2615 cloud_config=None,
2616 disk_list=None,
2617 availability_zone_index=None,
2618 availability_zone_list=None,
2619 ) -> tuple:
2620 """Adds a VM instance to VIM.
2621
2622 Args:
2623 name (str): name of VM
2624 description (str): description
2625 start (bool): indicates if VM must start or boot in pause mode. Ignored
2626 image_id (str) image uuid
2627 flavor_id (str) flavor uuid
2628 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2629 net_list (list): list of interfaces, each one is a dictionary with:
2630 name: name of network
2631 net_id: network uuid to connect
2632 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2633 model: interface model, ignored #TODO
2634 mac_address: used for SR-IOV ifaces #TODO for other types
2635 use: 'data', 'bridge', 'mgmt'
2636 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2637 vim_id: filled/added by this function
2638 floating_ip: True/False (or it can be None)
2639 port_security: True/False
2640 cloud_config (dict): (optional) dictionary with:
2641 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2642 users: (optional) list of users to be inserted, each item is a dict with:
2643 name: (mandatory) user name,
2644 key-pairs: (optional) list of strings with the public key to be inserted to the user
2645 user-data: (optional) string is a text script to be passed directly to cloud-init
2646 config-files: (optional). List of files to be transferred. Each item is a dict with:
2647 dest: (mandatory) string with the destination absolute path
2648 encoding: (optional, by default text). Can be one of:
2649 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2650 content : (mandatory) string with the content of the file
2651 permissions: (optional) string with file permissions, typically octal notation '0644'
2652 owner: (optional) file owner, string with the format 'owner:group'
2653 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2654 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2655 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2656 size: (mandatory) string with the size of the disk in GB
2657 vim_id: (optional) should use this existing volume id
2658 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2659 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2660 availability_zone_index is None
2661 #TODO ip, security groups
2662
2663 Returns:
2664 A tuple with the instance identifier and created_items or raises an exception on error
2665 created_items can be None or a dictionary where this method can include key-values that will be passed to
2666 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2667 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2668 as not present.
2669
2670 """
2671 self.logger.debug(
2672 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2673 image_id,
2674 flavor_id,
2675 str(net_list),
2676 )
2677
2678 try:
2679 server = None
2680 created_items = {}
2681 net_list_vim = []
2682 # list of external networks to be connected to instance, later on used to create floating_ip
2683 external_network = []
2684 # List of ports with port-security disabled
2685 no_secured_ports = []
2686 block_device_mapping = {}
2687 existing_vim_volumes = []
2688 server_group_id = None
2689 scheduller_hints = {}
2690
2691 # Check the Openstack Connection
2692 self._reload_connection()
2693
2694 # Prepare network list
2695 self._prepare_network_for_vminstance(
2696 name=name,
2697 net_list=net_list,
2698 created_items=created_items,
2699 net_list_vim=net_list_vim,
2700 external_network=external_network,
2701 no_secured_ports=no_secured_ports,
2702 )
2703
2704 # Cloud config
2705 config_drive, userdata = self._create_user_data(cloud_config)
2706
2707 # Get availability Zone
2708 vm_av_zone = self._get_vm_availability_zone(
2709 availability_zone_index, availability_zone_list
2710 )
2711
2712 if disk_list:
2713 # Prepare disks
2714 self._prepare_disk_for_vminstance(
2715 name=name,
2716 existing_vim_volumes=existing_vim_volumes,
2717 created_items=created_items,
2718 vm_av_zone=vm_av_zone,
2719 block_device_mapping=block_device_mapping,
2720 disk_list=disk_list,
2721 )
2722
2723 if affinity_group_list:
2724 # Only first id on the list will be used. Openstack restriction
2725 server_group_id = affinity_group_list[0]["affinity_group_id"]
2726 scheduller_hints["group"] = server_group_id
2727
2728 self.logger.debug(
2729 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2730 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2731 "block_device_mapping={}, server_group={})".format(
2732 name,
2733 image_id,
2734 flavor_id,
2735 net_list_vim,
2736 self.config.get("security_groups"),
2737 vm_av_zone,
2738 self.config.get("keypair"),
2739 userdata,
2740 config_drive,
2741 block_device_mapping,
2742 server_group_id,
2743 )
2744 )
2745
2746 # Create VM
2747 server = self.nova.servers.create(
2748 name=name,
2749 image=image_id,
2750 flavor=flavor_id,
2751 nics=net_list_vim,
2752 security_groups=self.config.get("security_groups"),
2753 # TODO remove security_groups in future versions. Already at neutron port
2754 availability_zone=vm_av_zone,
2755 key_name=self.config.get("keypair"),
2756 userdata=userdata,
2757 config_drive=config_drive,
2758 block_device_mapping=block_device_mapping,
2759 scheduler_hints=scheduller_hints,
2760 )
2761
2762 vm_start_time = time.time()
2763
2764 self._update_port_security_for_vminstance(no_secured_ports, server)
2765
2766 self._prepare_external_network_for_vminstance(
2767 external_network=external_network,
2768 server=server,
2769 created_items=created_items,
2770 vm_start_time=vm_start_time,
2771 )
2772
2773 return server.id, created_items
2774
2775 except Exception as e:
2776 server_id = None
2777 if server:
2778 server_id = server.id
2779
2780 try:
2781 created_items = self.remove_keep_tag_from_persistent_volumes(
2782 created_items
2783 )
2784
2785 self.delete_vminstance(server_id, created_items)
2786
2787 except Exception as e2:
2788 self.logger.error("new_vminstance rollback fail {}".format(e2))
2789
2790 self._format_exception(e)
2791
2792 @staticmethod
2793 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2794 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2795
2796 Args:
2797 created_items (dict): All created items belongs to VM
2798
2799 Returns:
2800 updated_created_items (dict): Dict which does not include keep flag for volumes.
2801
2802 """
2803 return {
2804 key.replace(":keep", ""): value for (key, value) in created_items.items()
2805 }
2806
2807 def get_vminstance(self, vm_id):
2808 """Returns the VM instance information from VIM"""
2809 # self.logger.debug("Getting VM from VIM")
2810 try:
2811 self._reload_connection()
2812 server = self.nova.servers.find(id=vm_id)
2813 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2814
2815 return server.to_dict()
2816 except (
2817 ksExceptions.ClientException,
2818 nvExceptions.ClientException,
2819 nvExceptions.NotFound,
2820 ConnectionError,
2821 ) as e:
2822 self._format_exception(e)
2823
2824 def get_vminstance_console(self, vm_id, console_type="vnc"):
2825 """
2826 Get a console for the virtual machine
2827 Params:
2828 vm_id: uuid of the VM
2829 console_type, can be:
2830 "novnc" (by default), "xvpvnc" for VNC types,
2831 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2832 Returns dict with the console parameters:
2833 protocol: ssh, ftp, http, https, ...
2834 server: usually ip address
2835 port: the http, ssh, ... port
2836 suffix: extra text, e.g. the http path and query string
2837 """
2838 self.logger.debug("Getting VM CONSOLE from VIM")
2839
2840 try:
2841 self._reload_connection()
2842 server = self.nova.servers.find(id=vm_id)
2843
2844 if console_type is None or console_type == "novnc":
2845 console_dict = server.get_vnc_console("novnc")
2846 elif console_type == "xvpvnc":
2847 console_dict = server.get_vnc_console(console_type)
2848 elif console_type == "rdp-html5":
2849 console_dict = server.get_rdp_console(console_type)
2850 elif console_type == "spice-html5":
2851 console_dict = server.get_spice_console(console_type)
2852 else:
2853 raise vimconn.VimConnException(
2854 "console type '{}' not allowed".format(console_type),
2855 http_code=vimconn.HTTP_Bad_Request,
2856 )
2857
2858 console_dict1 = console_dict.get("console")
2859
2860 if console_dict1:
2861 console_url = console_dict1.get("url")
2862
2863 if console_url:
2864 # parse console_url
2865 protocol_index = console_url.find("//")
2866 suffix_index = (
2867 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2868 )
2869 port_index = (
2870 console_url[protocol_index + 2 : suffix_index].find(":")
2871 + protocol_index
2872 + 2
2873 )
2874
2875 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2876 return (
2877 -vimconn.HTTP_Internal_Server_Error,
2878 "Unexpected response from VIM",
2879 )
2880
2881 console_dict = {
2882 "protocol": console_url[0:protocol_index],
2883 "server": console_url[protocol_index + 2 : port_index],
2884 "port": console_url[port_index:suffix_index],
2885 "suffix": console_url[suffix_index + 1 :],
2886 }
2887 protocol_index += 2
2888
2889 return console_dict
2890 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2891 except (
2892 nvExceptions.NotFound,
2893 ksExceptions.ClientException,
2894 nvExceptions.ClientException,
2895 nvExceptions.BadRequest,
2896 ConnectionError,
2897 ) as e:
2898 self._format_exception(e)
2899
2900 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2901 """Neutron delete ports by id.
2902 Args:
2903 k_id (str): Port id in the VIM
2904 """
2905 try:
2906 port_dict = self.neutron.list_ports()
2907 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2908
2909 if k_id in existing_ports:
2910 self.neutron.delete_port(k_id)
2911
2912 except Exception as e:
2913 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2914
2915 def _delete_volumes_by_id_wth_cinder(
2916 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2917 ) -> bool:
2918 """Cinder delete volume by id.
2919 Args:
2920 k (str): Full item name in created_items
2921 k_id (str): ID of floating ip in VIM
2922 volumes_to_hold (list): Volumes not to delete
2923 created_items (dict): All created items belongs to VM
2924 """
2925 try:
2926 if k_id in volumes_to_hold:
2927 return
2928
2929 if self.cinder.volumes.get(k_id).status != "available":
2930 return True
2931
2932 else:
2933 self.cinder.volumes.delete(k_id)
2934 created_items[k] = None
2935
2936 except Exception as e:
2937 self.logger.error(
2938 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2939 )
2940
2941 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2942 """Neutron delete floating ip by id.
2943 Args:
2944 k (str): Full item name in created_items
2945 k_id (str): ID of floating ip in VIM
2946 created_items (dict): All created items belongs to VM
2947 """
2948 try:
2949 self.neutron.delete_floatingip(k_id)
2950 created_items[k] = None
2951
2952 except Exception as e:
2953 self.logger.error(
2954 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2955 )
2956
2957 @staticmethod
2958 def _get_item_name_id(k: str) -> Tuple[str, str]:
2959 k_item, _, k_id = k.partition(":")
2960 return k_item, k_id
2961
2962 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2963 """Delete VM ports attached to the networks before deleting virtual machine.
2964 Args:
2965 created_items (dict): All created items belongs to VM
2966 """
2967
2968 for k, v in created_items.items():
2969 if not v: # skip already deleted
2970 continue
2971
2972 try:
2973 k_item, k_id = self._get_item_name_id(k)
2974 if k_item == "port":
2975 self._delete_ports_by_id_wth_neutron(k_id)
2976
2977 except Exception as e:
2978 self.logger.error(
2979 "Error deleting port: {}: {}".format(type(e).__name__, e)
2980 )
2981
2982 def _delete_created_items(
2983 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2984 ) -> bool:
2985 """Delete Volumes and floating ip if they exist in created_items."""
2986 for k, v in created_items.items():
2987 if not v: # skip already deleted
2988 continue
2989
2990 try:
2991 k_item, k_id = self._get_item_name_id(k)
2992
2993 if k_item == "volume":
2994 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2995 k, k_id, volumes_to_hold, created_items
2996 )
2997
2998 if unavailable_vol:
2999 keep_waiting = True
3000
3001 elif k_item == "floating_ip":
3002 self._delete_floating_ip_by_id(k, k_id, created_items)
3003
3004 except Exception as e:
3005 self.logger.error("Error deleting {}: {}".format(k, e))
3006
3007 return keep_waiting
3008
3009 @staticmethod
3010 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3011 """Remove the volumes which has key flag from created_items
3012
3013 Args:
3014 created_items (dict): All created items belongs to VM
3015
3016 Returns:
3017 created_items (dict): Persistent volumes eliminated created_items
3018 """
3019 return {
3020 key: value
3021 for (key, value) in created_items.items()
3022 if len(key.split(":")) == 2
3023 }
3024
3025 def delete_vminstance(
3026 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3027 ) -> None:
3028 """Removes a VM instance from VIM. Returns the old identifier.
3029 Args:
3030 vm_id (str): Identifier of VM instance
3031 created_items (dict): All created items belongs to VM
3032 volumes_to_hold (list): Volumes_to_hold
3033 """
3034 if created_items is None:
3035 created_items = {}
3036 if volumes_to_hold is None:
3037 volumes_to_hold = []
3038
3039 try:
3040 created_items = self._extract_items_wth_keep_flag_from_created_items(
3041 created_items
3042 )
3043
3044 self._reload_connection()
3045
3046 # Delete VM ports attached to the networks before the virtual machine
3047 if created_items:
3048 self._delete_vm_ports_attached_to_network(created_items)
3049
3050 if vm_id:
3051 self.nova.servers.delete(vm_id)
3052
3053 # Although having detached, volumes should have in active status before deleting.
3054 # We ensure in this loop
3055 keep_waiting = True
3056 elapsed_time = 0
3057
3058 while keep_waiting and elapsed_time < volume_timeout:
3059 keep_waiting = False
3060
3061 # Delete volumes and floating IP.
3062 keep_waiting = self._delete_created_items(
3063 created_items, volumes_to_hold, keep_waiting
3064 )
3065
3066 if keep_waiting:
3067 time.sleep(1)
3068 elapsed_time += 1
3069
3070 except (
3071 nvExceptions.NotFound,
3072 ksExceptions.ClientException,
3073 nvExceptions.ClientException,
3074 ConnectionError,
3075 ) as e:
3076 self._format_exception(e)
3077
3078 def refresh_vms_status(self, vm_list):
3079 """Get the status of the virtual machines and their interfaces/ports
3080 Params: the list of VM identifiers
3081 Returns a dictionary with:
3082 vm_id: #VIM id of this Virtual Machine
3083 status: #Mandatory. Text with one of:
3084 # DELETED (not found at vim)
3085 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3086 # OTHER (Vim reported other status not understood)
3087 # ERROR (VIM indicates an ERROR status)
3088 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3089 # CREATING (on building process), ERROR
3090 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3091 #
3092 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3093 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3094 interfaces:
3095 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3096 mac_address: #Text format XX:XX:XX:XX:XX:XX
3097 vim_net_id: #network id where this interface is connected
3098 vim_interface_id: #interface/port VIM id
3099 ip_address: #null, or text with IPv4, IPv6 address
3100 compute_node: #identification of compute node where PF,VF interface is allocated
3101 pci: #PCI address of the NIC that hosts the PF,VF
3102 vlan: #physical VLAN used for VF
3103 """
3104 vm_dict = {}
3105 self.logger.debug(
3106 "refresh_vms status: Getting tenant VM instance information from VIM"
3107 )
3108
3109 for vm_id in vm_list:
3110 vm = {}
3111
3112 try:
3113 vm_vim = self.get_vminstance(vm_id)
3114
3115 if vm_vim["status"] in vmStatus2manoFormat:
3116 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3117 else:
3118 vm["status"] = "OTHER"
3119 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3120
3121 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3122 vm_vim.pop("user_data", None)
3123 vm["vim_info"] = self.serialize(vm_vim)
3124
3125 vm["interfaces"] = []
3126 if vm_vim.get("fault"):
3127 vm["error_msg"] = str(vm_vim["fault"])
3128
3129 # get interfaces
3130 try:
3131 self._reload_connection()
3132 port_dict = self.neutron.list_ports(device_id=vm_id)
3133
3134 for port in port_dict["ports"]:
3135 interface = {}
3136 interface["vim_info"] = self.serialize(port)
3137 interface["mac_address"] = port.get("mac_address")
3138 interface["vim_net_id"] = port["network_id"]
3139 interface["vim_interface_id"] = port["id"]
3140 # check if OS-EXT-SRV-ATTR:host is there,
3141 # in case of non-admin credentials, it will be missing
3142
3143 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3144 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3145
3146 interface["pci"] = None
3147
3148 # check if binding:profile is there,
3149 # in case of non-admin credentials, it will be missing
3150 if port.get("binding:profile"):
3151 if port["binding:profile"].get("pci_slot"):
3152 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3153 # the slot to 0x00
3154 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3155 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3156 pci = port["binding:profile"]["pci_slot"]
3157 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3158 interface["pci"] = pci
3159
3160 interface["vlan"] = None
3161
3162 if port.get("binding:vif_details"):
3163 interface["vlan"] = port["binding:vif_details"].get("vlan")
3164
3165 # Get vlan from network in case not present in port for those old openstacks and cases where
3166 # it is needed vlan at PT
3167 if not interface["vlan"]:
3168 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3169 network = self.neutron.show_network(port["network_id"])
3170
3171 if (
3172 network["network"].get("provider:network_type")
3173 == "vlan"
3174 ):
3175 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3176 interface["vlan"] = network["network"].get(
3177 "provider:segmentation_id"
3178 )
3179
3180 ips = []
3181 # look for floating ip address
3182 try:
3183 floating_ip_dict = self.neutron.list_floatingips(
3184 port_id=port["id"]
3185 )
3186
3187 if floating_ip_dict.get("floatingips"):
3188 ips.append(
3189 floating_ip_dict["floatingips"][0].get(
3190 "floating_ip_address"
3191 )
3192 )
3193 except Exception:
3194 pass
3195
3196 for subnet in port["fixed_ips"]:
3197 ips.append(subnet["ip_address"])
3198
3199 interface["ip_address"] = ";".join(ips)
3200 vm["interfaces"].append(interface)
3201 except Exception as e:
3202 self.logger.error(
3203 "Error getting vm interface information {}: {}".format(
3204 type(e).__name__, e
3205 ),
3206 exc_info=True,
3207 )
3208 except vimconn.VimConnNotFoundException as e:
3209 self.logger.error("Exception getting vm status: %s", str(e))
3210 vm["status"] = "DELETED"
3211 vm["error_msg"] = str(e)
3212 except vimconn.VimConnException as e:
3213 self.logger.error("Exception getting vm status: %s", str(e))
3214 vm["status"] = "VIM_ERROR"
3215 vm["error_msg"] = str(e)
3216
3217 vm_dict[vm_id] = vm
3218
3219 return vm_dict
3220
3221 def action_vminstance(self, vm_id, action_dict, created_items={}):
3222 """Send and action over a VM instance from VIM
3223 Returns None or the console dict if the action was successfully sent to the VIM
3224 """
3225 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3226
3227 try:
3228 self._reload_connection()
3229 server = self.nova.servers.find(id=vm_id)
3230
3231 if "start" in action_dict:
3232 if action_dict["start"] == "rebuild":
3233 server.rebuild()
3234 else:
3235 if server.status == "PAUSED":
3236 server.unpause()
3237 elif server.status == "SUSPENDED":
3238 server.resume()
3239 elif server.status == "SHUTOFF":
3240 server.start()
3241 else:
3242 self.logger.debug(
3243 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3244 )
3245 raise vimconn.VimConnException(
3246 "Cannot 'start' instance while it is in active state",
3247 http_code=vimconn.HTTP_Bad_Request,
3248 )
3249
3250 elif "pause" in action_dict:
3251 server.pause()
3252 elif "resume" in action_dict:
3253 server.resume()
3254 elif "shutoff" in action_dict or "shutdown" in action_dict:
3255 self.logger.debug("server status %s", server.status)
3256 if server.status == "ACTIVE":
3257 server.stop()
3258 else:
3259 self.logger.debug("ERROR: VM is not in Active state")
3260 raise vimconn.VimConnException(
3261 "VM is not in active state, stop operation is not allowed",
3262 http_code=vimconn.HTTP_Bad_Request,
3263 )
3264 elif "forceOff" in action_dict:
3265 server.stop() # TODO
3266 elif "terminate" in action_dict:
3267 server.delete()
3268 elif "createImage" in action_dict:
3269 server.create_image()
3270 # "path":path_schema,
3271 # "description":description_schema,
3272 # "name":name_schema,
3273 # "metadata":metadata_schema,
3274 # "imageRef": id_schema,
3275 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3276 elif "rebuild" in action_dict:
3277 server.rebuild(server.image["id"])
3278 elif "reboot" in action_dict:
3279 server.reboot() # reboot_type="SOFT"
3280 elif "console" in action_dict:
3281 console_type = action_dict["console"]
3282
3283 if console_type is None or console_type == "novnc":
3284 console_dict = server.get_vnc_console("novnc")
3285 elif console_type == "xvpvnc":
3286 console_dict = server.get_vnc_console(console_type)
3287 elif console_type == "rdp-html5":
3288 console_dict = server.get_rdp_console(console_type)
3289 elif console_type == "spice-html5":
3290 console_dict = server.get_spice_console(console_type)
3291 else:
3292 raise vimconn.VimConnException(
3293 "console type '{}' not allowed".format(console_type),
3294 http_code=vimconn.HTTP_Bad_Request,
3295 )
3296
3297 try:
3298 console_url = console_dict["console"]["url"]
3299 # parse console_url
3300 protocol_index = console_url.find("//")
3301 suffix_index = (
3302 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3303 )
3304 port_index = (
3305 console_url[protocol_index + 2 : suffix_index].find(":")
3306 + protocol_index
3307 + 2
3308 )
3309
3310 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3311 raise vimconn.VimConnException(
3312 "Unexpected response from VIM " + str(console_dict)
3313 )
3314
3315 console_dict2 = {
3316 "protocol": console_url[0:protocol_index],
3317 "server": console_url[protocol_index + 2 : port_index],
3318 "port": int(console_url[port_index + 1 : suffix_index]),
3319 "suffix": console_url[suffix_index + 1 :],
3320 }
3321
3322 return console_dict2
3323 except Exception:
3324 raise vimconn.VimConnException(
3325 "Unexpected response from VIM " + str(console_dict)
3326 )
3327
3328 return None
3329 except (
3330 ksExceptions.ClientException,
3331 nvExceptions.ClientException,
3332 nvExceptions.NotFound,
3333 ConnectionError,
3334 ) as e:
3335 self._format_exception(e)
3336 # TODO insert exception vimconn.HTTP_Unauthorized
3337
3338 # ###### VIO Specific Changes #########
3339 def _generate_vlanID(self):
3340 """
3341 Method to get unused vlanID
3342 Args:
3343 None
3344 Returns:
3345 vlanID
3346 """
3347 # Get used VLAN IDs
3348 usedVlanIDs = []
3349 networks = self.get_network_list()
3350
3351 for net in networks:
3352 if net.get("provider:segmentation_id"):
3353 usedVlanIDs.append(net.get("provider:segmentation_id"))
3354
3355 used_vlanIDs = set(usedVlanIDs)
3356
3357 # find unused VLAN ID
3358 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3359 try:
3360 start_vlanid, end_vlanid = map(
3361 int, vlanID_range.replace(" ", "").split("-")
3362 )
3363
3364 for vlanID in range(start_vlanid, end_vlanid + 1):
3365 if vlanID not in used_vlanIDs:
3366 return vlanID
3367 except Exception as exp:
3368 raise vimconn.VimConnException(
3369 "Exception {} occurred while generating VLAN ID.".format(exp)
3370 )
3371 else:
3372 raise vimconn.VimConnConflictException(
3373 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3374 self.config.get("dataplane_net_vlan_range")
3375 )
3376 )
3377
3378 def _generate_multisegment_vlanID(self):
3379 """
3380 Method to get unused vlanID
3381 Args:
3382 None
3383 Returns:
3384 vlanID
3385 """
3386 # Get used VLAN IDs
3387 usedVlanIDs = []
3388 networks = self.get_network_list()
3389 for net in networks:
3390 if net.get("provider:network_type") == "vlan" and net.get(
3391 "provider:segmentation_id"
3392 ):
3393 usedVlanIDs.append(net.get("provider:segmentation_id"))
3394 elif net.get("segments"):
3395 for segment in net.get("segments"):
3396 if segment.get("provider:network_type") == "vlan" and segment.get(
3397 "provider:segmentation_id"
3398 ):
3399 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3400
3401 used_vlanIDs = set(usedVlanIDs)
3402
3403 # find unused VLAN ID
3404 for vlanID_range in self.config.get("multisegment_vlan_range"):
3405 try:
3406 start_vlanid, end_vlanid = map(
3407 int, vlanID_range.replace(" ", "").split("-")
3408 )
3409
3410 for vlanID in range(start_vlanid, end_vlanid + 1):
3411 if vlanID not in used_vlanIDs:
3412 return vlanID
3413 except Exception as exp:
3414 raise vimconn.VimConnException(
3415 "Exception {} occurred while generating VLAN ID.".format(exp)
3416 )
3417 else:
3418 raise vimconn.VimConnConflictException(
3419 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3420 self.config.get("multisegment_vlan_range")
3421 )
3422 )
3423
3424 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3425 """
3426 Method to validate user given vlanID ranges
3427 Args: None
3428 Returns: None
3429 """
3430 for vlanID_range in input_vlan_range:
3431 vlan_range = vlanID_range.replace(" ", "")
3432 # validate format
3433 vlanID_pattern = r"(\d)*-(\d)*$"
3434 match_obj = re.match(vlanID_pattern, vlan_range)
3435 if not match_obj:
3436 raise vimconn.VimConnConflictException(
3437 "Invalid VLAN range for {}: {}.You must provide "
3438 "'{}' in format [start_ID - end_ID].".format(
3439 text_vlan_range, vlanID_range, text_vlan_range
3440 )
3441 )
3442
3443 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3444 if start_vlanid <= 0:
3445 raise vimconn.VimConnConflictException(
3446 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3447 "networks valid IDs are 1 to 4094 ".format(
3448 text_vlan_range, vlanID_range
3449 )
3450 )
3451
3452 if end_vlanid > 4094:
3453 raise vimconn.VimConnConflictException(
3454 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3455 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3456 text_vlan_range, vlanID_range
3457 )
3458 )
3459
3460 if start_vlanid > end_vlanid:
3461 raise vimconn.VimConnConflictException(
3462 "Invalid VLAN range for {}: {}. You must provide '{}'"
3463 " in format start_ID - end_ID and start_ID < end_ID ".format(
3464 text_vlan_range, vlanID_range, text_vlan_range
3465 )
3466 )
3467
3468 def get_hosts_info(self):
3469 """Get the information of deployed hosts
3470 Returns the hosts content"""
3471 if self.debug:
3472 print("osconnector: Getting Host info from VIM")
3473
3474 try:
3475 h_list = []
3476 self._reload_connection()
3477 hypervisors = self.nova.hypervisors.list()
3478
3479 for hype in hypervisors:
3480 h_list.append(hype.to_dict())
3481
3482 return 1, {"hosts": h_list}
3483 except nvExceptions.NotFound as e:
3484 error_value = -vimconn.HTTP_Not_Found
3485 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3486 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3487 error_value = -vimconn.HTTP_Bad_Request
3488 error_text = (
3489 type(e).__name__
3490 + ": "
3491 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3492 )
3493
3494 # TODO insert exception vimconn.HTTP_Unauthorized
3495 # if reaching here is because an exception
3496 self.logger.debug("get_hosts_info " + error_text)
3497
3498 return error_value, error_text
3499
3500 def get_hosts(self, vim_tenant):
3501 """Get the hosts and deployed instances
3502 Returns the hosts content"""
3503 r, hype_dict = self.get_hosts_info()
3504
3505 if r < 0:
3506 return r, hype_dict
3507
3508 hypervisors = hype_dict["hosts"]
3509
3510 try:
3511 servers = self.nova.servers.list()
3512 for hype in hypervisors:
3513 for server in servers:
3514 if (
3515 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3516 == hype["hypervisor_hostname"]
3517 ):
3518 if "vm" in hype:
3519 hype["vm"].append(server.id)
3520 else:
3521 hype["vm"] = [server.id]
3522
3523 return 1, hype_dict
3524 except nvExceptions.NotFound as e:
3525 error_value = -vimconn.HTTP_Not_Found
3526 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3527 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3528 error_value = -vimconn.HTTP_Bad_Request
3529 error_text = (
3530 type(e).__name__
3531 + ": "
3532 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3533 )
3534
3535 # TODO insert exception vimconn.HTTP_Unauthorized
3536 # if reaching here is because an exception
3537 self.logger.debug("get_hosts " + error_text)
3538
3539 return error_value, error_text
3540
3541 def new_affinity_group(self, affinity_group_data):
3542 """Adds a server group to VIM
3543 affinity_group_data contains a dictionary with information, keys:
3544 name: name in VIM for the server group
3545 type: affinity or anti-affinity
3546 scope: Only nfvi-node allowed
3547 Returns the server group identifier"""
3548 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3549
3550 try:
3551 name = affinity_group_data["name"]
3552 policy = affinity_group_data["type"]
3553
3554 self._reload_connection()
3555 new_server_group = self.nova.server_groups.create(name, policy)
3556
3557 return new_server_group.id
3558 except (
3559 ksExceptions.ClientException,
3560 nvExceptions.ClientException,
3561 ConnectionError,
3562 KeyError,
3563 ) as e:
3564 self._format_exception(e)
3565
3566 def get_affinity_group(self, affinity_group_id):
3567 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3568 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3569 try:
3570 self._reload_connection()
3571 server_group = self.nova.server_groups.find(id=affinity_group_id)
3572
3573 return server_group.to_dict()
3574 except (
3575 nvExceptions.NotFound,
3576 nvExceptions.ClientException,
3577 ksExceptions.ClientException,
3578 ConnectionError,
3579 ) as e:
3580 self._format_exception(e)
3581
3582 def delete_affinity_group(self, affinity_group_id):
3583 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3584 self.logger.debug("Getting server group '%s'", affinity_group_id)
3585 try:
3586 self._reload_connection()
3587 self.nova.server_groups.delete(affinity_group_id)
3588
3589 return affinity_group_id
3590 except (
3591 nvExceptions.NotFound,
3592 ksExceptions.ClientException,
3593 nvExceptions.ClientException,
3594 ConnectionError,
3595 ) as e:
3596 self._format_exception(e)
3597
3598 def get_vdu_state(self, vm_id):
3599 """
3600 Getting the state of a vdu
3601 param:
3602 vm_id: ID of an instance
3603 """
3604 self.logger.debug("Getting the status of VM")
3605 self.logger.debug("VIM VM ID %s", vm_id)
3606 self._reload_connection()
3607 server = self.nova.servers.find(id=vm_id)
3608 server_dict = server.to_dict()
3609 vdu_data = [
3610 server_dict["status"],
3611 server_dict["flavor"]["id"],
3612 server_dict["OS-EXT-SRV-ATTR:host"],
3613 server_dict["OS-EXT-AZ:availability_zone"],
3614 ]
3615 self.logger.debug("vdu_data %s", vdu_data)
3616 return vdu_data
3617
3618 def check_compute_availability(self, host, server_flavor_details):
3619 self._reload_connection()
3620 hypervisor_search = self.nova.hypervisors.search(
3621 hypervisor_match=host, servers=True
3622 )
3623 for hypervisor in hypervisor_search:
3624 hypervisor_id = hypervisor.to_dict()["id"]
3625 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3626 hypervisor_dict = hypervisor_details.to_dict()
3627 hypervisor_temp = json.dumps(hypervisor_dict)
3628 hypervisor_json = json.loads(hypervisor_temp)
3629 resources_available = [
3630 hypervisor_json["free_ram_mb"],
3631 hypervisor_json["disk_available_least"],
3632 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3633 ]
3634 compute_available = all(
3635 x > y for x, y in zip(resources_available, server_flavor_details)
3636 )
3637 if compute_available:
3638 return host
3639
3640 def check_availability_zone(
3641 self, old_az, server_flavor_details, old_host, host=None
3642 ):
3643 self._reload_connection()
3644 az_check = {"zone_check": False, "compute_availability": None}
3645 aggregates_list = self.nova.aggregates.list()
3646 for aggregate in aggregates_list:
3647 aggregate_details = aggregate.to_dict()
3648 aggregate_temp = json.dumps(aggregate_details)
3649 aggregate_json = json.loads(aggregate_temp)
3650 if aggregate_json["availability_zone"] == old_az:
3651 hosts_list = aggregate_json["hosts"]
3652 if host is not None:
3653 if host in hosts_list:
3654 az_check["zone_check"] = True
3655 available_compute_id = self.check_compute_availability(
3656 host, server_flavor_details
3657 )
3658 if available_compute_id is not None:
3659 az_check["compute_availability"] = available_compute_id
3660 else:
3661 for check_host in hosts_list:
3662 if check_host != old_host:
3663 available_compute_id = self.check_compute_availability(
3664 check_host, server_flavor_details
3665 )
3666 if available_compute_id is not None:
3667 az_check["zone_check"] = True
3668 az_check["compute_availability"] = available_compute_id
3669 break
3670 else:
3671 az_check["zone_check"] = True
3672 return az_check
3673
3674 def migrate_instance(self, vm_id, compute_host=None):
3675 """
3676 Migrate a vdu
3677 param:
3678 vm_id: ID of an instance
3679 compute_host: Host to migrate the vdu to
3680 """
3681 self._reload_connection()
3682 vm_state = False
3683 instance_state = self.get_vdu_state(vm_id)
3684 server_flavor_id = instance_state[1]
3685 server_hypervisor_name = instance_state[2]
3686 server_availability_zone = instance_state[3]
3687 try:
3688 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3689 server_flavor_details = [
3690 server_flavor["ram"],
3691 server_flavor["disk"],
3692 server_flavor["vcpus"],
3693 ]
3694 if compute_host == server_hypervisor_name:
3695 raise vimconn.VimConnException(
3696 "Unable to migrate instance '{}' to the same host '{}'".format(
3697 vm_id, compute_host
3698 ),
3699 http_code=vimconn.HTTP_Bad_Request,
3700 )
3701 az_status = self.check_availability_zone(
3702 server_availability_zone,
3703 server_flavor_details,
3704 server_hypervisor_name,
3705 compute_host,
3706 )
3707 availability_zone_check = az_status["zone_check"]
3708 available_compute_id = az_status.get("compute_availability")
3709
3710 if availability_zone_check is False:
3711 raise vimconn.VimConnException(
3712 "Unable to migrate instance '{}' to a different availability zone".format(
3713 vm_id
3714 ),
3715 http_code=vimconn.HTTP_Bad_Request,
3716 )
3717 if available_compute_id is not None:
3718 self.nova.servers.live_migrate(
3719 server=vm_id,
3720 host=available_compute_id,
3721 block_migration=True,
3722 disk_over_commit=False,
3723 )
3724 state = "MIGRATING"
3725 changed_compute_host = ""
3726 if state == "MIGRATING":
3727 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3728 changed_compute_host = self.get_vdu_state(vm_id)[2]
3729 if vm_state and changed_compute_host == available_compute_id:
3730 self.logger.debug(
3731 "Instance '{}' migrated to the new compute host '{}'".format(
3732 vm_id, changed_compute_host
3733 )
3734 )
3735 return state, available_compute_id
3736 else:
3737 raise vimconn.VimConnException(
3738 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3739 vm_id, available_compute_id
3740 ),
3741 http_code=vimconn.HTTP_Bad_Request,
3742 )
3743 else:
3744 raise vimconn.VimConnException(
3745 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3746 available_compute_id
3747 ),
3748 http_code=vimconn.HTTP_Bad_Request,
3749 )
3750 except (
3751 nvExceptions.BadRequest,
3752 nvExceptions.ClientException,
3753 nvExceptions.NotFound,
3754 ) as e:
3755 self._format_exception(e)
3756
3757 def resize_instance(self, vm_id, new_flavor_id):
3758 """
3759 For resizing the vm based on the given
3760 flavor details
3761 param:
3762 vm_id : ID of an instance
3763 new_flavor_id : Flavor id to be resized
3764 Return the status of a resized instance
3765 """
3766 self._reload_connection()
3767 self.logger.debug("resize the flavor of an instance")
3768 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3769 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3770 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3771 try:
3772 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3773 if old_flavor_disk > new_flavor_disk:
3774 raise nvExceptions.BadRequest(
3775 400,
3776 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3777 )
3778 else:
3779 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3780 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3781 if vm_state:
3782 instance_resized_status = self.confirm_resize(vm_id)
3783 return instance_resized_status
3784 else:
3785 raise nvExceptions.BadRequest(
3786 409,
3787 message="Cannot 'resize' vm_state is in ERROR",
3788 )
3789
3790 else:
3791 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3792 raise nvExceptions.BadRequest(
3793 409,
3794 message="Cannot 'resize' instance while it is in vm_state resized",
3795 )
3796 except (
3797 nvExceptions.BadRequest,
3798 nvExceptions.ClientException,
3799 nvExceptions.NotFound,
3800 ) as e:
3801 self._format_exception(e)
3802
3803 def confirm_resize(self, vm_id):
3804 """
3805 Confirm the resize of an instance
3806 param:
3807 vm_id: ID of an instance
3808 """
3809 self._reload_connection()
3810 self.nova.servers.confirm_resize(server=vm_id)
3811 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3812 self.__wait_for_vm(vm_id, "ACTIVE")
3813 instance_status = self.get_vdu_state(vm_id)[0]
3814 return instance_status
3815
3816 def get_monitoring_data(self):
3817 try:
3818 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3819 self._reload_connection()
3820 all_servers = self.nova.servers.list(detailed=True)
3821 all_ports = self.neutron.list_ports()
3822 return all_servers, all_ports
3823 except (
3824 vimconn.VimConnException,
3825 vimconn.VimConnNotFoundException,
3826 vimconn.VimConnConnectionException,
3827 ) as e:
3828 raise vimconn.VimConnException(
3829 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3830 )