Bug 2109. Fix VIM info DB update after vertical scaling, migrate and operate
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.60"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 if message_error == "OS-EXT-SRV-ATTR:host":
571 tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
572 raise vimconn.VimConnInsufficientCredentials(
573 type(exception).__name__ + ": " + message_error + tip
574 )
575 raise vimconn.VimConnException(
576 type(exception).__name__ + ": " + message_error
577 )
578
579 elif isinstance(
580 exception,
581 (
582 nvExceptions.ClientException,
583 ksExceptions.ClientException,
584 neExceptions.NeutronException,
585 ),
586 ):
587 raise vimconn.VimConnUnexpectedResponse(
588 type(exception).__name__ + ": " + message_error
589 )
590 elif isinstance(exception, nvExceptions.Conflict):
591 raise vimconn.VimConnConflictException(
592 type(exception).__name__ + ": " + message_error
593 )
594 elif isinstance(exception, vimconn.VimConnException):
595 raise exception
596 else: # ()
597 self.logger.error("General Exception " + message_error, exc_info=True)
598
599 raise vimconn.VimConnConnectionException(
600 type(exception).__name__ + ": " + message_error
601 )
602
603 def _get_ids_from_name(self):
604 """
605 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
606 :return: None
607 """
608 # get tenant_id if only tenant_name is supplied
609 self._reload_connection()
610
611 if not self.my_tenant_id:
612 raise vimconn.VimConnConnectionException(
613 "Error getting tenant information from name={} id={}".format(
614 self.tenant_name, self.tenant_id
615 )
616 )
617
618 if self.config.get("security_groups") and not self.security_groups_id:
619 # convert from name to id
620 neutron_sg_list = self.neutron.list_security_groups(
621 tenant_id=self.my_tenant_id
622 )["security_groups"]
623
624 self.security_groups_id = []
625 for sg in self.config.get("security_groups"):
626 for neutron_sg in neutron_sg_list:
627 if sg in (neutron_sg["id"], neutron_sg["name"]):
628 self.security_groups_id.append(neutron_sg["id"])
629 break
630 else:
631 self.security_groups_id = None
632
633 raise vimconn.VimConnConnectionException(
634 "Not found security group {} for this tenant".format(sg)
635 )
636
637 def _find_nova_server(self, vm_id):
638 """
639 Returns the VM instance from Openstack and completes it with flavor ID
640 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
641 """
642 try:
643 self._reload_connection()
644 server = self.nova.servers.find(id=vm_id)
645 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
646 server_dict = server.to_dict()
647 try:
648 server_dict["flavor"]["id"] = self.nova.flavors.find(
649 name=server_dict["flavor"]["original_name"]
650 ).id
651 except nClient.exceptions.NotFound as e:
652 self.logger.warning(str(e.message))
653 return server_dict
654 except (
655 ksExceptions.ClientException,
656 nvExceptions.ClientException,
657 nvExceptions.NotFound,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def check_vim_connectivity(self):
663 # just get network list to check connectivity and credentials
664 self.get_network_list(filter_dict={})
665
666 def get_tenant_list(self, filter_dict={}):
667 """Obtain tenants of VIM
668 filter_dict can contain the following keys:
669 name: filter by tenant name
670 id: filter by tenant uuid/id
671 <other VIM specific>
672 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
673 """
674 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
675
676 try:
677 self._reload_connection()
678
679 if self.api_version3:
680 project_class_list = self.keystone.projects.list(
681 name=filter_dict.get("name")
682 )
683 else:
684 project_class_list = self.keystone.tenants.findall(**filter_dict)
685
686 project_list = []
687
688 for project in project_class_list:
689 if filter_dict.get("id") and filter_dict["id"] != project.id:
690 continue
691
692 project_list.append(project.to_dict())
693
694 return project_list
695 except (
696 ksExceptions.ConnectionError,
697 ksExceptions.ClientException,
698 ConnectionError,
699 ) as e:
700 self._format_exception(e)
701
702 def new_tenant(self, tenant_name, tenant_description):
703 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
704 self.logger.debug("Adding a new tenant name: %s", tenant_name)
705
706 try:
707 self._reload_connection()
708
709 if self.api_version3:
710 project = self.keystone.projects.create(
711 tenant_name,
712 self.config.get("project_domain_id", "default"),
713 description=tenant_description,
714 is_domain=False,
715 )
716 else:
717 project = self.keystone.tenants.create(tenant_name, tenant_description)
718
719 return project.id
720 except (
721 ksExceptions.ConnectionError,
722 ksExceptions.ClientException,
723 ksExceptions.BadRequest,
724 ConnectionError,
725 ) as e:
726 self._format_exception(e)
727
728 def delete_tenant(self, tenant_id):
729 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
730 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
731
732 try:
733 self._reload_connection()
734
735 if self.api_version3:
736 self.keystone.projects.delete(tenant_id)
737 else:
738 self.keystone.tenants.delete(tenant_id)
739
740 return tenant_id
741 except (
742 ksExceptions.ConnectionError,
743 ksExceptions.ClientException,
744 ksExceptions.NotFound,
745 ConnectionError,
746 ) as e:
747 self._format_exception(e)
748
749 def new_network(
750 self,
751 net_name,
752 net_type,
753 ip_profile=None,
754 shared=False,
755 provider_network_profile=None,
756 ):
757 """Adds a tenant network to VIM
758 Params:
759 'net_name': name of the network
760 'net_type': one of:
761 'bridge': overlay isolated network
762 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
763 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
764 'ip_profile': is a dict containing the IP parameters of the network
765 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
766 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
767 'gateway_address': (Optional) ip_schema, that is X.X.X.X
768 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
769 'dhcp_enabled': True or False
770 'dhcp_start_address': ip_schema, first IP to grant
771 'dhcp_count': number of IPs to grant.
772 'shared': if this network can be seen/use by other tenants/organization
773 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
774 physical-network: physnet-label}
775 Returns a tuple with the network identifier and created_items, or raises an exception on error
776 created_items can be None or a dictionary where this method can include key-values that will be passed to
777 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
778 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
779 as not present.
780 """
781 self.logger.debug(
782 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
783 )
784 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
785
786 try:
787 vlan = None
788
789 if provider_network_profile:
790 vlan = provider_network_profile.get("segmentation-id")
791
792 new_net = None
793 created_items = {}
794 self._reload_connection()
795 network_dict = {"name": net_name, "admin_state_up": True}
796
797 if net_type in ("data", "ptp") or provider_network_profile:
798 provider_physical_network = None
799
800 if provider_network_profile and provider_network_profile.get(
801 "physical-network"
802 ):
803 provider_physical_network = provider_network_profile.get(
804 "physical-network"
805 )
806
807 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
808 # or not declared, just ignore the checking
809 if (
810 isinstance(
811 self.config.get("dataplane_physical_net"), (tuple, list)
812 )
813 and provider_physical_network
814 not in self.config["dataplane_physical_net"]
815 ):
816 raise vimconn.VimConnConflictException(
817 "Invalid parameter 'provider-network:physical-network' "
818 "for network creation. '{}' is not one of the declared "
819 "list at VIM_config:dataplane_physical_net".format(
820 provider_physical_network
821 )
822 )
823
824 # use the default dataplane_physical_net
825 if not provider_physical_network:
826 provider_physical_network = self.config.get(
827 "dataplane_physical_net"
828 )
829
830 # if it is non empty list, use the first value. If it is a string use the value directly
831 if (
832 isinstance(provider_physical_network, (tuple, list))
833 and provider_physical_network
834 ):
835 provider_physical_network = provider_physical_network[0]
836
837 if not provider_physical_network:
838 raise vimconn.VimConnConflictException(
839 "missing information needed for underlay networks. Provide "
840 "'dataplane_physical_net' configuration at VIM or use the NS "
841 "instantiation parameter 'provider-network.physical-network'"
842 " for the VLD"
843 )
844
845 if not self.config.get("multisegment_support"):
846 network_dict[
847 "provider:physical_network"
848 ] = provider_physical_network
849
850 if (
851 provider_network_profile
852 and "network-type" in provider_network_profile
853 ):
854 network_dict[
855 "provider:network_type"
856 ] = provider_network_profile["network-type"]
857 else:
858 network_dict["provider:network_type"] = self.config.get(
859 "dataplane_network_type", "vlan"
860 )
861
862 if vlan:
863 network_dict["provider:segmentation_id"] = vlan
864 else:
865 # Multi-segment case
866 segment_list = []
867 segment1_dict = {
868 "provider:physical_network": "",
869 "provider:network_type": "vxlan",
870 }
871 segment_list.append(segment1_dict)
872 segment2_dict = {
873 "provider:physical_network": provider_physical_network,
874 "provider:network_type": "vlan",
875 }
876
877 if vlan:
878 segment2_dict["provider:segmentation_id"] = vlan
879 elif self.config.get("multisegment_vlan_range"):
880 vlanID = self._generate_multisegment_vlanID()
881 segment2_dict["provider:segmentation_id"] = vlanID
882
883 # else
884 # raise vimconn.VimConnConflictException(
885 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
886 # network")
887 segment_list.append(segment2_dict)
888 network_dict["segments"] = segment_list
889
890 # VIO Specific Changes. It needs a concrete VLAN
891 if self.vim_type == "VIO" and vlan is None:
892 if self.config.get("dataplane_net_vlan_range") is None:
893 raise vimconn.VimConnConflictException(
894 "You must provide 'dataplane_net_vlan_range' in format "
895 "[start_ID - end_ID] at VIM_config for creating underlay "
896 "networks"
897 )
898
899 network_dict["provider:segmentation_id"] = self._generate_vlanID()
900
901 network_dict["shared"] = shared
902
903 if self.config.get("disable_network_port_security"):
904 network_dict["port_security_enabled"] = False
905
906 if self.config.get("neutron_availability_zone_hints"):
907 hints = self.config.get("neutron_availability_zone_hints")
908
909 if isinstance(hints, str):
910 hints = [hints]
911
912 network_dict["availability_zone_hints"] = hints
913
914 new_net = self.neutron.create_network({"network": network_dict})
915 # print new_net
916 # create subnetwork, even if there is no profile
917
918 if not ip_profile:
919 ip_profile = {}
920
921 if not ip_profile.get("subnet_address"):
922 # Fake subnet is required
923 subnet_rand = random.SystemRandom().randint(0, 255)
924 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
925
926 if "ip_version" not in ip_profile:
927 ip_profile["ip_version"] = "IPv4"
928
929 subnet = {
930 "name": net_name + "-subnet",
931 "network_id": new_net["network"]["id"],
932 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
933 "cidr": ip_profile["subnet_address"],
934 }
935
936 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
937 if ip_profile.get("gateway_address"):
938 subnet["gateway_ip"] = ip_profile["gateway_address"]
939 else:
940 subnet["gateway_ip"] = None
941
942 if ip_profile.get("dns_address"):
943 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
944
945 if "dhcp_enabled" in ip_profile:
946 subnet["enable_dhcp"] = (
947 False
948 if ip_profile["dhcp_enabled"] == "false"
949 or ip_profile["dhcp_enabled"] is False
950 else True
951 )
952
953 if ip_profile.get("dhcp_start_address"):
954 subnet["allocation_pools"] = []
955 subnet["allocation_pools"].append(dict())
956 subnet["allocation_pools"][0]["start"] = ip_profile[
957 "dhcp_start_address"
958 ]
959
960 if ip_profile.get("dhcp_count"):
961 # parts = ip_profile["dhcp_start_address"].split(".")
962 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
963 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
964 ip_int += ip_profile["dhcp_count"] - 1
965 ip_str = str(netaddr.IPAddress(ip_int))
966 subnet["allocation_pools"][0]["end"] = ip_str
967
968 if (
969 ip_profile.get("ipv6_address_mode")
970 and ip_profile["ip_version"] != "IPv4"
971 ):
972 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
973 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
974 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
975 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
976
977 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
978 self.neutron.create_subnet({"subnet": subnet})
979
980 if net_type == "data" and self.config.get("multisegment_support"):
981 if self.config.get("l2gw_support"):
982 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
983 for l2gw in l2gw_list:
984 l2gw_conn = {
985 "l2_gateway_id": l2gw["id"],
986 "network_id": new_net["network"]["id"],
987 "segmentation_id": str(vlanID),
988 }
989 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
990 {"l2_gateway_connection": l2gw_conn}
991 )
992 created_items[
993 "l2gwconn:"
994 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
995 ] = True
996
997 return new_net["network"]["id"], created_items
998 except Exception as e:
999 # delete l2gw connections (if any) before deleting the network
1000 for k, v in created_items.items():
1001 if not v: # skip already deleted
1002 continue
1003
1004 try:
1005 k_item, _, k_id = k.partition(":")
1006
1007 if k_item == "l2gwconn":
1008 self.neutron.delete_l2_gateway_connection(k_id)
1009 except Exception as e2:
1010 self.logger.error(
1011 "Error deleting l2 gateway connection: {}: {}".format(
1012 type(e2).__name__, e2
1013 )
1014 )
1015
1016 if new_net:
1017 self.neutron.delete_network(new_net["network"]["id"])
1018
1019 self._format_exception(e)
1020
1021 def get_network_list(self, filter_dict={}):
1022 """Obtain tenant networks of VIM
1023 Filter_dict can be:
1024 name: network name
1025 id: network uuid
1026 shared: boolean
1027 tenant_id: tenant
1028 admin_state_up: boolean
1029 status: 'ACTIVE'
1030 Returns the network list of dictionaries
1031 """
1032 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1033
1034 try:
1035 self._reload_connection()
1036 filter_dict_os = filter_dict.copy()
1037
1038 if self.api_version3 and "tenant_id" in filter_dict_os:
1039 # TODO check
1040 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1041
1042 net_dict = self.neutron.list_networks(**filter_dict_os)
1043 net_list = net_dict["networks"]
1044 self.__net_os2mano(net_list)
1045
1046 return net_list
1047 except (
1048 neExceptions.ConnectionFailed,
1049 ksExceptions.ClientException,
1050 neExceptions.NeutronException,
1051 ConnectionError,
1052 ) as e:
1053 self._format_exception(e)
1054
1055 def get_network(self, net_id):
1056 """Obtain details of network from VIM
1057 Returns the network information from a network id"""
1058 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1059 filter_dict = {"id": net_id}
1060 net_list = self.get_network_list(filter_dict)
1061
1062 if len(net_list) == 0:
1063 raise vimconn.VimConnNotFoundException(
1064 "Network '{}' not found".format(net_id)
1065 )
1066 elif len(net_list) > 1:
1067 raise vimconn.VimConnConflictException(
1068 "Found more than one network with this criteria"
1069 )
1070
1071 net = net_list[0]
1072 subnets = []
1073 for subnet_id in net.get("subnets", ()):
1074 try:
1075 subnet = self.neutron.show_subnet(subnet_id)
1076 except Exception as e:
1077 self.logger.error(
1078 "osconnector.get_network(): Error getting subnet %s %s"
1079 % (net_id, str(e))
1080 )
1081 subnet = {"id": subnet_id, "fault": str(e)}
1082
1083 subnets.append(subnet)
1084
1085 net["subnets"] = subnets
1086 net["encapsulation"] = net.get("provider:network_type")
1087 net["encapsulation_type"] = net.get("provider:network_type")
1088 net["segmentation_id"] = net.get("provider:segmentation_id")
1089 net["encapsulation_id"] = net.get("provider:segmentation_id")
1090
1091 return net
1092
1093 def delete_network(self, net_id, created_items=None):
1094 """
1095 Removes a tenant network from VIM and its associated elements
1096 :param net_id: VIM identifier of the network, provided by method new_network
1097 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1098 Returns the network identifier or raises an exception upon error or when network is not found
1099 """
1100 self.logger.debug("Deleting network '%s' from VIM", net_id)
1101
1102 if created_items is None:
1103 created_items = {}
1104
1105 try:
1106 self._reload_connection()
1107 # delete l2gw connections (if any) before deleting the network
1108 for k, v in created_items.items():
1109 if not v: # skip already deleted
1110 continue
1111
1112 try:
1113 k_item, _, k_id = k.partition(":")
1114 if k_item == "l2gwconn":
1115 self.neutron.delete_l2_gateway_connection(k_id)
1116 except Exception as e:
1117 self.logger.error(
1118 "Error deleting l2 gateway connection: {}: {}".format(
1119 type(e).__name__, e
1120 )
1121 )
1122
1123 # delete VM ports attached to this networks before the network
1124 ports = self.neutron.list_ports(network_id=net_id)
1125 for p in ports["ports"]:
1126 try:
1127 self.neutron.delete_port(p["id"])
1128 except Exception as e:
1129 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1130
1131 self.neutron.delete_network(net_id)
1132
1133 return net_id
1134 except (
1135 neExceptions.ConnectionFailed,
1136 neExceptions.NetworkNotFoundClient,
1137 neExceptions.NeutronException,
1138 ksExceptions.ClientException,
1139 neExceptions.NeutronException,
1140 ConnectionError,
1141 ) as e:
1142 self._format_exception(e)
1143
1144 def refresh_nets_status(self, net_list):
1145 """Get the status of the networks
1146 Params: the list of network identifiers
1147 Returns a dictionary with:
1148 net_id: #VIM id of this network
1149 status: #Mandatory. Text with one of:
1150 # DELETED (not found at vim)
1151 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1152 # OTHER (Vim reported other status not understood)
1153 # ERROR (VIM indicates an ERROR status)
1154 # ACTIVE, INACTIVE, DOWN (admin down),
1155 # BUILD (on building process)
1156 #
1157 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1158 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1159 """
1160 net_dict = {}
1161
1162 for net_id in net_list:
1163 net = {}
1164
1165 try:
1166 net_vim = self.get_network(net_id)
1167
1168 if net_vim["status"] in netStatus2manoFormat:
1169 net["status"] = netStatus2manoFormat[net_vim["status"]]
1170 else:
1171 net["status"] = "OTHER"
1172 net["error_msg"] = "VIM status reported " + net_vim["status"]
1173
1174 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1175 net["status"] = "DOWN"
1176
1177 net["vim_info"] = self.serialize(net_vim)
1178
1179 if net_vim.get("fault"): # TODO
1180 net["error_msg"] = str(net_vim["fault"])
1181 except vimconn.VimConnNotFoundException as e:
1182 self.logger.error("Exception getting net status: %s", str(e))
1183 net["status"] = "DELETED"
1184 net["error_msg"] = str(e)
1185 except vimconn.VimConnException as e:
1186 self.logger.error("Exception getting net status: %s", str(e))
1187 net["status"] = "VIM_ERROR"
1188 net["error_msg"] = str(e)
1189 net_dict[net_id] = net
1190 return net_dict
1191
1192 def get_flavor(self, flavor_id):
1193 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1194 self.logger.debug("Getting flavor '%s'", flavor_id)
1195
1196 try:
1197 self._reload_connection()
1198 flavor = self.nova.flavors.find(id=flavor_id)
1199 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1200
1201 return flavor.to_dict()
1202 except (
1203 nvExceptions.NotFound,
1204 nvExceptions.ClientException,
1205 ksExceptions.ClientException,
1206 ConnectionError,
1207 ) as e:
1208 self._format_exception(e)
1209
1210 def get_flavor_id_from_data(self, flavor_dict):
1211 """Obtain flavor id that match the flavor description
1212 Returns the flavor_id or raises a vimconnNotFoundException
1213 flavor_dict: contains the required ram, vcpus, disk
1214 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1215 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1216 vimconnNotFoundException is raised
1217 """
1218 exact_match = False if self.config.get("use_existing_flavors") else True
1219
1220 try:
1221 self._reload_connection()
1222 flavor_candidate_id = None
1223 flavor_candidate_data = (10000, 10000, 10000)
1224 flavor_target = (
1225 flavor_dict["ram"],
1226 flavor_dict["vcpus"],
1227 flavor_dict["disk"],
1228 flavor_dict.get("ephemeral", 0),
1229 flavor_dict.get("swap", 0),
1230 )
1231 # numa=None
1232 extended = flavor_dict.get("extended", {})
1233 if extended:
1234 # TODO
1235 raise vimconn.VimConnNotFoundException(
1236 "Flavor with EPA still not implemented"
1237 )
1238 # if len(numas) > 1:
1239 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1240 # numa=numas[0]
1241 # numas = extended.get("numas")
1242 for flavor in self.nova.flavors.list():
1243 epa = flavor.get_keys()
1244
1245 if epa:
1246 continue
1247 # TODO
1248
1249 flavor_data = (
1250 flavor.ram,
1251 flavor.vcpus,
1252 flavor.disk,
1253 flavor.ephemeral,
1254 flavor.swap if isinstance(flavor.swap, int) else 0,
1255 )
1256 if flavor_data == flavor_target:
1257 return flavor.id
1258 elif (
1259 not exact_match
1260 and flavor_target < flavor_data < flavor_candidate_data
1261 ):
1262 flavor_candidate_id = flavor.id
1263 flavor_candidate_data = flavor_data
1264
1265 if not exact_match and flavor_candidate_id:
1266 return flavor_candidate_id
1267
1268 raise vimconn.VimConnNotFoundException(
1269 "Cannot find any flavor matching '{}'".format(flavor_dict)
1270 )
1271 except (
1272 nvExceptions.NotFound,
1273 nvExceptions.ClientException,
1274 ksExceptions.ClientException,
1275 ConnectionError,
1276 ) as e:
1277 self._format_exception(e)
1278
1279 @staticmethod
1280 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1281 """Process resource quota and fill up extra_specs.
1282 Args:
1283 quota (dict): Keeping the quota of resurces
1284 prefix (str) Prefix
1285 extra_specs (dict) Dict to be filled to be used during flavor creation
1286
1287 """
1288 if "limit" in quota:
1289 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1290
1291 if "reserve" in quota:
1292 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1293
1294 if "shares" in quota:
1295 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1296 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1297
1298 @staticmethod
1299 def process_numa_memory(
1300 numa: dict, node_id: Optional[int], extra_specs: dict
1301 ) -> None:
1302 """Set the memory in extra_specs.
1303 Args:
1304 numa (dict): A dictionary which includes numa information
1305 node_id (int): ID of numa node
1306 extra_specs (dict): To be filled.
1307
1308 """
1309 if not numa.get("memory"):
1310 return
1311 memory_mb = numa["memory"] * 1024
1312 memory = "hw:numa_mem.{}".format(node_id)
1313 extra_specs[memory] = int(memory_mb)
1314
1315 @staticmethod
1316 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1317 """Set the cpu in extra_specs.
1318 Args:
1319 numa (dict): A dictionary which includes numa information
1320 node_id (int): ID of numa node
1321 extra_specs (dict): To be filled.
1322
1323 """
1324 if not numa.get("vcpu"):
1325 return
1326 vcpu = numa["vcpu"]
1327 cpu = "hw:numa_cpus.{}".format(node_id)
1328 vcpu = ",".join(map(str, vcpu))
1329 extra_specs[cpu] = vcpu
1330
1331 @staticmethod
1332 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1333 """Fill up extra_specs if numa has paired-threads.
1334 Args:
1335 numa (dict): A dictionary which includes numa information
1336 extra_specs (dict): To be filled.
1337
1338 Returns:
1339 threads (int) Number of virtual cpus
1340
1341 """
1342 if not numa.get("paired-threads"):
1343 return
1344
1345 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1346 threads = numa["paired-threads"] * 2
1347 extra_specs["hw:cpu_thread_policy"] = "require"
1348 extra_specs["hw:cpu_policy"] = "dedicated"
1349 return threads
1350
1351 @staticmethod
1352 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1353 """Fill up extra_specs if numa has cores.
1354 Args:
1355 numa (dict): A dictionary which includes numa information
1356 extra_specs (dict): To be filled.
1357
1358 Returns:
1359 cores (int) Number of virtual cpus
1360
1361 """
1362 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1363 # architecture, or a non-SMT architecture will be emulated
1364 if not numa.get("cores"):
1365 return
1366 cores = numa["cores"]
1367 extra_specs["hw:cpu_thread_policy"] = "isolate"
1368 extra_specs["hw:cpu_policy"] = "dedicated"
1369 return cores
1370
1371 @staticmethod
1372 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1373 """Fill up extra_specs if numa has threads.
1374 Args:
1375 numa (dict): A dictionary which includes numa information
1376 extra_specs (dict): To be filled.
1377
1378 Returns:
1379 threads (int) Number of virtual cpus
1380
1381 """
1382 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1383 if not numa.get("threads"):
1384 return
1385 threads = numa["threads"]
1386 extra_specs["hw:cpu_thread_policy"] = "prefer"
1387 extra_specs["hw:cpu_policy"] = "dedicated"
1388 return threads
1389
1390 def _process_numa_parameters_of_flavor(
1391 self, numas: List, extra_specs: Dict
1392 ) -> None:
1393 """Process numa parameters and fill up extra_specs.
1394
1395 Args:
1396 numas (list): List of dictionary which includes numa information
1397 extra_specs (dict): To be filled.
1398
1399 """
1400 numa_nodes = len(numas)
1401 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1402 cpu_cores, cpu_threads = 0, 0
1403
1404 if self.vim_type == "VIO":
1405 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1406
1407 for numa in numas:
1408 if "id" in numa:
1409 node_id = numa["id"]
1410 # overwrite ram and vcpus
1411 # check if key "memory" is present in numa else use ram value at flavor
1412 self.process_numa_memory(numa, node_id, extra_specs)
1413 self.process_numa_vcpu(numa, node_id, extra_specs)
1414
1415 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1416 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1417
1418 if "paired-threads" in numa:
1419 threads = self.process_numa_paired_threads(numa, extra_specs)
1420 cpu_threads += threads
1421
1422 elif "cores" in numa:
1423 cores = self.process_numa_cores(numa, extra_specs)
1424 cpu_cores += cores
1425
1426 elif "threads" in numa:
1427 threads = self.process_numa_threads(numa, extra_specs)
1428 cpu_threads += threads
1429
1430 if cpu_cores:
1431 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1432 if cpu_threads:
1433 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1434
1435 @staticmethod
1436 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1437 """According to number of numa nodes, updates the extra_specs for VIO.
1438
1439 Args:
1440
1441 numa_nodes (int): List keeps the numa node numbers
1442 extra_specs (dict): Extra specs dict to be updated
1443
1444 """
1445 # If there are several numas, we do not define specific affinity.
1446 extra_specs["vmware:latency_sensitivity_level"] = "high"
1447
1448 def _change_flavor_name(
1449 self, name: str, name_suffix: int, flavor_data: dict
1450 ) -> str:
1451 """Change the flavor name if the name already exists.
1452
1453 Args:
1454 name (str): Flavor name to be checked
1455 name_suffix (int): Suffix to be appended to name
1456 flavor_data (dict): Flavor dict
1457
1458 Returns:
1459 name (str): New flavor name to be used
1460
1461 """
1462 # Get used names
1463 fl = self.nova.flavors.list()
1464 fl_names = [f.name for f in fl]
1465
1466 while name in fl_names:
1467 name_suffix += 1
1468 name = flavor_data["name"] + "-" + str(name_suffix)
1469
1470 return name
1471
1472 def _process_extended_config_of_flavor(
1473 self, extended: dict, extra_specs: dict
1474 ) -> None:
1475 """Process the extended dict to fill up extra_specs.
1476 Args:
1477
1478 extended (dict): Keeping the extra specification of flavor
1479 extra_specs (dict) Dict to be filled to be used during flavor creation
1480
1481 """
1482 quotas = {
1483 "cpu-quota": "cpu",
1484 "mem-quota": "memory",
1485 "vif-quota": "vif",
1486 "disk-io-quota": "disk_io",
1487 }
1488
1489 page_sizes = {
1490 "LARGE": "large",
1491 "SMALL": "small",
1492 "SIZE_2MB": "2MB",
1493 "SIZE_1GB": "1GB",
1494 "PREFER_LARGE": "any",
1495 }
1496
1497 policies = {
1498 "cpu-pinning-policy": "hw:cpu_policy",
1499 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1500 "mem-policy": "hw:numa_mempolicy",
1501 }
1502
1503 numas = extended.get("numas")
1504 if numas:
1505 self._process_numa_parameters_of_flavor(numas, extra_specs)
1506
1507 for quota, item in quotas.items():
1508 if quota in extended.keys():
1509 self.process_resource_quota(extended.get(quota), item, extra_specs)
1510
1511 # Set the mempage size as specified in the descriptor
1512 if extended.get("mempage-size"):
1513 if extended["mempage-size"] in page_sizes.keys():
1514 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1515 else:
1516 # Normally, validations in NBI should not allow to this condition.
1517 self.logger.debug(
1518 "Invalid mempage-size %s. Will be ignored",
1519 extended.get("mempage-size"),
1520 )
1521
1522 for policy, hw_policy in policies.items():
1523 if extended.get(policy):
1524 extra_specs[hw_policy] = extended[policy].lower()
1525
1526 @staticmethod
1527 def _get_flavor_details(flavor_data: dict) -> Tuple:
1528 """Returns the details of flavor
1529 Args:
1530 flavor_data (dict): Dictionary that includes required flavor details
1531
1532 Returns:
1533 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1534
1535 """
1536 return (
1537 flavor_data.get("ram", 64),
1538 flavor_data.get("vcpus", 1),
1539 {},
1540 flavor_data.get("extended"),
1541 )
1542
1543 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1544 """Adds a tenant flavor to openstack VIM.
1545 if change_name_if_used is True, it will change name in case of conflict,
1546 because it is not supported name repetition.
1547
1548 Args:
1549 flavor_data (dict): Flavor details to be processed
1550 change_name_if_used (bool): Change name in case of conflict
1551
1552 Returns:
1553 flavor_id (str): flavor identifier
1554
1555 """
1556 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1557 retry = 0
1558 max_retries = 3
1559 name_suffix = 0
1560
1561 try:
1562 name = flavor_data["name"]
1563 while retry < max_retries:
1564 retry += 1
1565 try:
1566 self._reload_connection()
1567
1568 if change_name_if_used:
1569 name = self._change_flavor_name(name, name_suffix, flavor_data)
1570
1571 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1572 flavor_data
1573 )
1574 if extended:
1575 self._process_extended_config_of_flavor(extended, extra_specs)
1576
1577 # Create flavor
1578
1579 new_flavor = self.nova.flavors.create(
1580 name=name,
1581 ram=ram,
1582 vcpus=vcpus,
1583 disk=flavor_data.get("disk", 0),
1584 ephemeral=flavor_data.get("ephemeral", 0),
1585 swap=flavor_data.get("swap", 0),
1586 is_public=flavor_data.get("is_public", True),
1587 )
1588
1589 # Add metadata
1590 if extra_specs:
1591 new_flavor.set_keys(extra_specs)
1592
1593 return new_flavor.id
1594
1595 except nvExceptions.Conflict as e:
1596 if change_name_if_used and retry < max_retries:
1597 continue
1598
1599 self._format_exception(e)
1600
1601 except (
1602 ksExceptions.ClientException,
1603 nvExceptions.ClientException,
1604 ConnectionError,
1605 KeyError,
1606 ) as e:
1607 self._format_exception(e)
1608
1609 def delete_flavor(self, flavor_id):
1610 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1611 try:
1612 self._reload_connection()
1613 self.nova.flavors.delete(flavor_id)
1614
1615 return flavor_id
1616 # except nvExceptions.BadRequest as e:
1617 except (
1618 nvExceptions.NotFound,
1619 ksExceptions.ClientException,
1620 nvExceptions.ClientException,
1621 ConnectionError,
1622 ) as e:
1623 self._format_exception(e)
1624
1625 def new_image(self, image_dict):
1626 """
1627 Adds a tenant image to VIM. imge_dict is a dictionary with:
1628 name: name
1629 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1630 location: path or URI
1631 public: "yes" or "no"
1632 metadata: metadata of the image
1633 Returns the image_id
1634 """
1635 retry = 0
1636 max_retries = 3
1637
1638 while retry < max_retries:
1639 retry += 1
1640 try:
1641 self._reload_connection()
1642
1643 # determine format http://docs.openstack.org/developer/glance/formats.html
1644 if "disk_format" in image_dict:
1645 disk_format = image_dict["disk_format"]
1646 else: # autodiscover based on extension
1647 if image_dict["location"].endswith(".qcow2"):
1648 disk_format = "qcow2"
1649 elif image_dict["location"].endswith(".vhd"):
1650 disk_format = "vhd"
1651 elif image_dict["location"].endswith(".vmdk"):
1652 disk_format = "vmdk"
1653 elif image_dict["location"].endswith(".vdi"):
1654 disk_format = "vdi"
1655 elif image_dict["location"].endswith(".iso"):
1656 disk_format = "iso"
1657 elif image_dict["location"].endswith(".aki"):
1658 disk_format = "aki"
1659 elif image_dict["location"].endswith(".ari"):
1660 disk_format = "ari"
1661 elif image_dict["location"].endswith(".ami"):
1662 disk_format = "ami"
1663 else:
1664 disk_format = "raw"
1665
1666 self.logger.debug(
1667 "new_image: '%s' loading from '%s'",
1668 image_dict["name"],
1669 image_dict["location"],
1670 )
1671 if self.vim_type == "VIO":
1672 container_format = "bare"
1673 if "container_format" in image_dict:
1674 container_format = image_dict["container_format"]
1675
1676 new_image = self.glance.images.create(
1677 name=image_dict["name"],
1678 container_format=container_format,
1679 disk_format=disk_format,
1680 )
1681 else:
1682 new_image = self.glance.images.create(name=image_dict["name"])
1683
1684 if image_dict["location"].startswith("http"):
1685 # TODO there is not a method to direct download. It must be downloaded locally with requests
1686 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1687 else: # local path
1688 with open(image_dict["location"]) as fimage:
1689 self.glance.images.upload(new_image.id, fimage)
1690 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1691 # image_dict.get("public","yes")=="yes",
1692 # container_format="bare", data=fimage, disk_format=disk_format)
1693
1694 metadata_to_load = image_dict.get("metadata")
1695
1696 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1697 # for openstack
1698 if self.vim_type == "VIO":
1699 metadata_to_load["upload_location"] = image_dict["location"]
1700 else:
1701 metadata_to_load["location"] = image_dict["location"]
1702
1703 self.glance.images.update(new_image.id, **metadata_to_load)
1704
1705 return new_image.id
1706 except (
1707 nvExceptions.Conflict,
1708 ksExceptions.ClientException,
1709 nvExceptions.ClientException,
1710 ) as e:
1711 self._format_exception(e)
1712 except (
1713 HTTPException,
1714 gl1Exceptions.HTTPException,
1715 gl1Exceptions.CommunicationError,
1716 ConnectionError,
1717 ) as e:
1718 if retry == max_retries:
1719 continue
1720
1721 self._format_exception(e)
1722 except IOError as e: # can not open the file
1723 raise vimconn.VimConnConnectionException(
1724 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1725 http_code=vimconn.HTTP_Bad_Request,
1726 )
1727
1728 def delete_image(self, image_id):
1729 """Deletes a tenant image from openstack VIM. Returns the old id"""
1730 try:
1731 self._reload_connection()
1732 self.glance.images.delete(image_id)
1733
1734 return image_id
1735 except (
1736 nvExceptions.NotFound,
1737 ksExceptions.ClientException,
1738 nvExceptions.ClientException,
1739 gl1Exceptions.CommunicationError,
1740 gl1Exceptions.HTTPNotFound,
1741 ConnectionError,
1742 ) as e: # TODO remove
1743 self._format_exception(e)
1744
1745 def get_image_id_from_path(self, path):
1746 """Get the image id from image path in the VIM database. Returns the image_id"""
1747 try:
1748 self._reload_connection()
1749 images = self.glance.images.list()
1750
1751 for image in images:
1752 if image.metadata.get("location") == path:
1753 return image.id
1754
1755 raise vimconn.VimConnNotFoundException(
1756 "image with location '{}' not found".format(path)
1757 )
1758 except (
1759 ksExceptions.ClientException,
1760 nvExceptions.ClientException,
1761 gl1Exceptions.CommunicationError,
1762 ConnectionError,
1763 ) as e:
1764 self._format_exception(e)
1765
1766 def get_image_list(self, filter_dict={}):
1767 """Obtain tenant images from VIM
1768 Filter_dict can be:
1769 id: image id
1770 name: image name
1771 checksum: image checksum
1772 Returns the image list of dictionaries:
1773 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1774 List can be empty
1775 """
1776 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1777
1778 try:
1779 self._reload_connection()
1780 # filter_dict_os = filter_dict.copy()
1781 # First we filter by the available filter fields: name, id. The others are removed.
1782 image_list = self.glance.images.list()
1783 filtered_list = []
1784
1785 for image in image_list:
1786 try:
1787 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1788 continue
1789
1790 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1791 continue
1792
1793 if (
1794 filter_dict.get("checksum")
1795 and image["checksum"] != filter_dict["checksum"]
1796 ):
1797 continue
1798
1799 filtered_list.append(image.copy())
1800 except gl1Exceptions.HTTPNotFound:
1801 pass
1802
1803 return filtered_list
1804 except (
1805 ksExceptions.ClientException,
1806 nvExceptions.ClientException,
1807 gl1Exceptions.CommunicationError,
1808 ConnectionError,
1809 ) as e:
1810 self._format_exception(e)
1811
1812 def __wait_for_vm(self, vm_id, status):
1813 """wait until vm is in the desired status and return True.
1814 If the VM gets in ERROR status, return false.
1815 If the timeout is reached generate an exception"""
1816 elapsed_time = 0
1817 while elapsed_time < server_timeout:
1818 vm_status = self.nova.servers.get(vm_id).status
1819
1820 if vm_status == status:
1821 return True
1822
1823 if vm_status == "ERROR":
1824 return False
1825
1826 time.sleep(5)
1827 elapsed_time += 5
1828
1829 # if we exceeded the timeout rollback
1830 if elapsed_time >= server_timeout:
1831 raise vimconn.VimConnException(
1832 "Timeout waiting for instance " + vm_id + " to get " + status,
1833 http_code=vimconn.HTTP_Request_Timeout,
1834 )
1835
1836 def _get_openstack_availablity_zones(self):
1837 """
1838 Get from openstack availability zones available
1839 :return:
1840 """
1841 try:
1842 openstack_availability_zone = self.nova.availability_zones.list()
1843 openstack_availability_zone = [
1844 str(zone.zoneName)
1845 for zone in openstack_availability_zone
1846 if zone.zoneName != "internal"
1847 ]
1848
1849 return openstack_availability_zone
1850 except Exception:
1851 return None
1852
1853 def _set_availablity_zones(self):
1854 """
1855 Set vim availablity zone
1856 :return:
1857 """
1858 if "availability_zone" in self.config:
1859 vim_availability_zones = self.config.get("availability_zone")
1860
1861 if isinstance(vim_availability_zones, str):
1862 self.availability_zone = [vim_availability_zones]
1863 elif isinstance(vim_availability_zones, list):
1864 self.availability_zone = vim_availability_zones
1865 else:
1866 self.availability_zone = self._get_openstack_availablity_zones()
1867
1868 def _get_vm_availability_zone(
1869 self, availability_zone_index, availability_zone_list
1870 ):
1871 """
1872 Return thge availability zone to be used by the created VM.
1873 :return: The VIM availability zone to be used or None
1874 """
1875 if availability_zone_index is None:
1876 if not self.config.get("availability_zone"):
1877 return None
1878 elif isinstance(self.config.get("availability_zone"), str):
1879 return self.config["availability_zone"]
1880 else:
1881 # TODO consider using a different parameter at config for default AV and AV list match
1882 return self.config["availability_zone"][0]
1883
1884 vim_availability_zones = self.availability_zone
1885 # check if VIM offer enough availability zones describe in the VNFD
1886 if vim_availability_zones and len(availability_zone_list) <= len(
1887 vim_availability_zones
1888 ):
1889 # check if all the names of NFV AV match VIM AV names
1890 match_by_index = False
1891 for av in availability_zone_list:
1892 if av not in vim_availability_zones:
1893 match_by_index = True
1894 break
1895
1896 if match_by_index:
1897 return vim_availability_zones[availability_zone_index]
1898 else:
1899 return availability_zone_list[availability_zone_index]
1900 else:
1901 raise vimconn.VimConnConflictException(
1902 "No enough availability zones at VIM for this deployment"
1903 )
1904
1905 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1906 """Fill up the security_groups in the port_dict.
1907
1908 Args:
1909 net (dict): Network details
1910 port_dict (dict): Port details
1911
1912 """
1913 if (
1914 self.config.get("security_groups")
1915 and net.get("port_security") is not False
1916 and not self.config.get("no_port_security_extension")
1917 ):
1918 if not self.security_groups_id:
1919 self._get_ids_from_name()
1920
1921 port_dict["security_groups"] = self.security_groups_id
1922
1923 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1924 """Fill up the network binding depending on network type in the port_dict.
1925
1926 Args:
1927 net (dict): Network details
1928 port_dict (dict): Port details
1929
1930 """
1931 if not net.get("type"):
1932 raise vimconn.VimConnException("Type is missing in the network details.")
1933
1934 if net["type"] == "virtual":
1935 pass
1936
1937 # For VF
1938 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1939 port_dict["binding:vnic_type"] = "direct"
1940
1941 # VIO specific Changes
1942 if self.vim_type == "VIO":
1943 # Need to create port with port_security_enabled = False and no-security-groups
1944 port_dict["port_security_enabled"] = False
1945 port_dict["provider_security_groups"] = []
1946 port_dict["security_groups"] = []
1947
1948 else:
1949 # For PT PCI-PASSTHROUGH
1950 port_dict["binding:vnic_type"] = "direct-physical"
1951
1952 @staticmethod
1953 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1954 """Set the "ip" parameter in net dictionary.
1955
1956 Args:
1957 new_port (dict): New created port
1958 net (dict): Network details
1959
1960 """
1961 fixed_ips = new_port["port"].get("fixed_ips")
1962
1963 if fixed_ips:
1964 net["ip"] = fixed_ips[0].get("ip_address")
1965 else:
1966 net["ip"] = None
1967
1968 @staticmethod
1969 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1970 """Fill up the mac_address and fixed_ips in port_dict.
1971
1972 Args:
1973 net (dict): Network details
1974 port_dict (dict): Port details
1975
1976 """
1977 if net.get("mac_address"):
1978 port_dict["mac_address"] = net["mac_address"]
1979
1980 ip_dual_list = []
1981 if ip_list := net.get("ip_address"):
1982 if not isinstance(ip_list, list):
1983 ip_list = [ip_list]
1984 for ip in ip_list:
1985 ip_dict = {"ip_address": ip}
1986 ip_dual_list.append(ip_dict)
1987 port_dict["fixed_ips"] = ip_dual_list
1988 # TODO add "subnet_id": <subnet_id>
1989
1990 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1991 """Create new port using neutron.
1992
1993 Args:
1994 port_dict (dict): Port details
1995 created_items (dict): All created items
1996 net (dict): Network details
1997
1998 Returns:
1999 new_port (dict): New created port
2000
2001 """
2002 new_port = self.neutron.create_port({"port": port_dict})
2003 created_items["port:" + str(new_port["port"]["id"])] = True
2004 net["mac_address"] = new_port["port"]["mac_address"]
2005 net["vim_id"] = new_port["port"]["id"]
2006
2007 return new_port
2008
2009 def _create_port(
2010 self, net: dict, name: str, created_items: dict
2011 ) -> Tuple[dict, dict]:
2012 """Create port using net details.
2013
2014 Args:
2015 net (dict): Network details
2016 name (str): Name to be used as network name if net dict does not include name
2017 created_items (dict): All created items
2018
2019 Returns:
2020 new_port, port New created port, port dictionary
2021
2022 """
2023
2024 port_dict = {
2025 "network_id": net["net_id"],
2026 "name": net.get("name"),
2027 "admin_state_up": True,
2028 }
2029
2030 if not port_dict["name"]:
2031 port_dict["name"] = name
2032
2033 self._prepare_port_dict_security_groups(net, port_dict)
2034
2035 self._prepare_port_dict_binding(net, port_dict)
2036
2037 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2038
2039 new_port = self._create_new_port(port_dict, created_items, net)
2040
2041 vimconnector._set_fixed_ip(new_port, net)
2042
2043 port = {"port-id": new_port["port"]["id"]}
2044
2045 if float(self.nova.api_version.get_string()) >= 2.32:
2046 port["tag"] = new_port["port"]["name"]
2047
2048 return new_port, port
2049
2050 def _prepare_network_for_vminstance(
2051 self,
2052 name: str,
2053 net_list: list,
2054 created_items: dict,
2055 net_list_vim: list,
2056 external_network: list,
2057 no_secured_ports: list,
2058 ) -> None:
2059 """Create port and fill up net dictionary for new VM instance creation.
2060
2061 Args:
2062 name (str): Name of network
2063 net_list (list): List of networks
2064 created_items (dict): All created items belongs to a VM
2065 net_list_vim (list): List of ports
2066 external_network (list): List of external-networks
2067 no_secured_ports (list): Port security disabled ports
2068 """
2069
2070 self._reload_connection()
2071
2072 for net in net_list:
2073 # Skip non-connected iface
2074 if not net.get("net_id"):
2075 continue
2076
2077 new_port, port = self._create_port(net, name, created_items)
2078
2079 net_list_vim.append(port)
2080
2081 if net.get("floating_ip", False):
2082 net["exit_on_floating_ip_error"] = True
2083 external_network.append(net)
2084
2085 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2086 net["exit_on_floating_ip_error"] = False
2087 external_network.append(net)
2088 net["floating_ip"] = self.config.get("use_floating_ip")
2089
2090 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2091 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2092 if net.get("port_security") is False and not self.config.get(
2093 "no_port_security_extension"
2094 ):
2095 no_secured_ports.append(
2096 (
2097 new_port["port"]["id"],
2098 net.get("port_security_disable_strategy"),
2099 )
2100 )
2101
2102 def _prepare_persistent_root_volumes(
2103 self,
2104 name: str,
2105 vm_av_zone: list,
2106 disk: dict,
2107 base_disk_index: int,
2108 block_device_mapping: dict,
2109 existing_vim_volumes: list,
2110 created_items: dict,
2111 ) -> Optional[str]:
2112 """Prepare persistent root volumes for new VM instance.
2113
2114 Args:
2115 name (str): Name of VM instance
2116 vm_av_zone (list): List of availability zones
2117 disk (dict): Disk details
2118 base_disk_index (int): Disk index
2119 block_device_mapping (dict): Block device details
2120 existing_vim_volumes (list): Existing disk details
2121 created_items (dict): All created items belongs to VM
2122
2123 Returns:
2124 boot_volume_id (str): ID of boot volume
2125
2126 """
2127 # Disk may include only vim_volume_id or only vim_id."
2128 # Use existing persistent root volume finding with volume_id or vim_id
2129 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2130
2131 if disk.get(key_id):
2132 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2133 existing_vim_volumes.append({"id": disk[key_id]})
2134
2135 else:
2136 # Create persistent root volume
2137 volume = self.cinder.volumes.create(
2138 size=disk["size"],
2139 name=name + "vd" + chr(base_disk_index),
2140 imageRef=disk["image_id"],
2141 # Make sure volume is in the same AZ as the VM to be attached to
2142 availability_zone=vm_av_zone,
2143 )
2144 boot_volume_id = volume.id
2145 self.update_block_device_mapping(
2146 volume=volume,
2147 block_device_mapping=block_device_mapping,
2148 base_disk_index=base_disk_index,
2149 disk=disk,
2150 created_items=created_items,
2151 )
2152
2153 return boot_volume_id
2154
2155 @staticmethod
2156 def update_block_device_mapping(
2157 volume: object,
2158 block_device_mapping: dict,
2159 base_disk_index: int,
2160 disk: dict,
2161 created_items: dict,
2162 ) -> None:
2163 """Add volume information to block device mapping dict.
2164 Args:
2165 volume (object): Created volume object
2166 block_device_mapping (dict): Block device details
2167 base_disk_index (int): Disk index
2168 disk (dict): Disk details
2169 created_items (dict): All created items belongs to VM
2170 """
2171 if not volume:
2172 raise vimconn.VimConnException("Volume is empty.")
2173
2174 if not hasattr(volume, "id"):
2175 raise vimconn.VimConnException(
2176 "Created volume is not valid, does not have id attribute."
2177 )
2178
2179 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2180 if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
2181 return
2182 volume_txt = "volume:" + str(volume.id)
2183 if disk.get("keep"):
2184 volume_txt += ":keep"
2185 created_items[volume_txt] = True
2186
2187 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2188 try:
2189 volume = self.cinder.volumes.create(
2190 size=shared_volume_data["size"],
2191 name=shared_volume_data["name"],
2192 volume_type="multiattach",
2193 )
2194 return (volume.name, volume.id)
2195 except (ConnectionError, KeyError) as e:
2196 self._format_exception(e)
2197
2198 def _prepare_shared_volumes(
2199 self,
2200 name: str,
2201 disk: dict,
2202 base_disk_index: int,
2203 block_device_mapping: dict,
2204 existing_vim_volumes: list,
2205 created_items: dict,
2206 ):
2207 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2208 if volumes.get(disk["name"]):
2209 sv_id = volumes[disk["name"]]
2210 max_retries = 3
2211 vol_status = ""
2212 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2213 while max_retries:
2214 max_retries -= 1
2215 volume = self.cinder.volumes.get(sv_id)
2216 vol_status = volume.status
2217 if volume.status not in ("in-use", "available"):
2218 time.sleep(5)
2219 continue
2220 self.update_block_device_mapping(
2221 volume=volume,
2222 block_device_mapping=block_device_mapping,
2223 base_disk_index=base_disk_index,
2224 disk=disk,
2225 created_items=created_items,
2226 )
2227 return
2228 raise vimconn.VimConnException(
2229 "Shared volume is not prepared, status is: {}".format(vol_status),
2230 http_code=vimconn.HTTP_Internal_Server_Error,
2231 )
2232
2233 def _prepare_non_root_persistent_volumes(
2234 self,
2235 name: str,
2236 disk: dict,
2237 vm_av_zone: list,
2238 block_device_mapping: dict,
2239 base_disk_index: int,
2240 existing_vim_volumes: list,
2241 created_items: dict,
2242 ) -> None:
2243 """Prepare persistent volumes for new VM instance.
2244
2245 Args:
2246 name (str): Name of VM instance
2247 disk (dict): Disk details
2248 vm_av_zone (list): List of availability zones
2249 block_device_mapping (dict): Block device details
2250 base_disk_index (int): Disk index
2251 existing_vim_volumes (list): Existing disk details
2252 created_items (dict): All created items belongs to VM
2253 """
2254 # Non-root persistent volumes
2255 # Disk may include only vim_volume_id or only vim_id."
2256 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2257 if disk.get(key_id):
2258 # Use existing persistent volume
2259 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2260 existing_vim_volumes.append({"id": disk[key_id]})
2261 else:
2262 volume_name = f"{name}vd{chr(base_disk_index)}"
2263 volume = self.cinder.volumes.create(
2264 size=disk["size"],
2265 name=volume_name,
2266 # Make sure volume is in the same AZ as the VM to be attached to
2267 availability_zone=vm_av_zone,
2268 )
2269 self.update_block_device_mapping(
2270 volume=volume,
2271 block_device_mapping=block_device_mapping,
2272 base_disk_index=base_disk_index,
2273 disk=disk,
2274 created_items=created_items,
2275 )
2276
2277 def _wait_for_created_volumes_availability(
2278 self, elapsed_time: int, created_items: dict
2279 ) -> Optional[int]:
2280 """Wait till created volumes become available.
2281
2282 Args:
2283 elapsed_time (int): Passed time while waiting
2284 created_items (dict): All created items belongs to VM
2285
2286 Returns:
2287 elapsed_time (int): Time spent while waiting
2288
2289 """
2290 while elapsed_time < volume_timeout:
2291 for created_item in created_items:
2292 v, volume_id = (
2293 created_item.split(":")[0],
2294 created_item.split(":")[1],
2295 )
2296 if v == "volume":
2297 volume = self.cinder.volumes.get(volume_id)
2298 if (
2299 volume.volume_type == "multiattach"
2300 and volume.status == "in-use"
2301 ):
2302 return elapsed_time
2303 elif volume.status != "available":
2304 break
2305 else:
2306 # All ready: break from while
2307 break
2308
2309 time.sleep(5)
2310 elapsed_time += 5
2311
2312 return elapsed_time
2313
2314 def _wait_for_existing_volumes_availability(
2315 self, elapsed_time: int, existing_vim_volumes: list
2316 ) -> Optional[int]:
2317 """Wait till existing volumes become available.
2318
2319 Args:
2320 elapsed_time (int): Passed time while waiting
2321 existing_vim_volumes (list): Existing volume details
2322
2323 Returns:
2324 elapsed_time (int): Time spent while waiting
2325
2326 """
2327
2328 while elapsed_time < volume_timeout:
2329 for volume in existing_vim_volumes:
2330 v = self.cinder.volumes.get(volume["id"])
2331 if v.volume_type == "multiattach" and v.status == "in-use":
2332 return elapsed_time
2333 elif v.status != "available":
2334 break
2335 else: # all ready: break from while
2336 break
2337
2338 time.sleep(5)
2339 elapsed_time += 5
2340
2341 return elapsed_time
2342
2343 def _prepare_disk_for_vminstance(
2344 self,
2345 name: str,
2346 existing_vim_volumes: list,
2347 created_items: dict,
2348 vm_av_zone: list,
2349 block_device_mapping: dict,
2350 disk_list: list = None,
2351 ) -> None:
2352 """Prepare all volumes for new VM instance.
2353
2354 Args:
2355 name (str): Name of Instance
2356 existing_vim_volumes (list): List of existing volumes
2357 created_items (dict): All created items belongs to VM
2358 vm_av_zone (list): VM availability zone
2359 block_device_mapping (dict): Block devices to be attached to VM
2360 disk_list (list): List of disks
2361
2362 """
2363 # Create additional volumes in case these are present in disk_list
2364 base_disk_index = ord("b")
2365 boot_volume_id = None
2366 elapsed_time = 0
2367 for disk in disk_list:
2368 if "image_id" in disk:
2369 # Root persistent volume
2370 base_disk_index = ord("a")
2371 boot_volume_id = self._prepare_persistent_root_volumes(
2372 name=name,
2373 vm_av_zone=vm_av_zone,
2374 disk=disk,
2375 base_disk_index=base_disk_index,
2376 block_device_mapping=block_device_mapping,
2377 existing_vim_volumes=existing_vim_volumes,
2378 created_items=created_items,
2379 )
2380 elif disk.get("multiattach"):
2381 self._prepare_shared_volumes(
2382 name=name,
2383 disk=disk,
2384 base_disk_index=base_disk_index,
2385 block_device_mapping=block_device_mapping,
2386 existing_vim_volumes=existing_vim_volumes,
2387 created_items=created_items,
2388 )
2389 else:
2390 # Non-root persistent volume
2391 self._prepare_non_root_persistent_volumes(
2392 name=name,
2393 disk=disk,
2394 vm_av_zone=vm_av_zone,
2395 block_device_mapping=block_device_mapping,
2396 base_disk_index=base_disk_index,
2397 existing_vim_volumes=existing_vim_volumes,
2398 created_items=created_items,
2399 )
2400 base_disk_index += 1
2401
2402 # Wait until created volumes are with status available
2403 elapsed_time = self._wait_for_created_volumes_availability(
2404 elapsed_time, created_items
2405 )
2406 # Wait until existing volumes in vim are with status available
2407 elapsed_time = self._wait_for_existing_volumes_availability(
2408 elapsed_time, existing_vim_volumes
2409 )
2410 # If we exceeded the timeout rollback
2411 if elapsed_time >= volume_timeout:
2412 raise vimconn.VimConnException(
2413 "Timeout creating volumes for instance " + name,
2414 http_code=vimconn.HTTP_Request_Timeout,
2415 )
2416 if boot_volume_id:
2417 self.cinder.volumes.set_bootable(boot_volume_id, True)
2418
2419 def _find_the_external_network_for_floating_ip(self):
2420 """Get the external network ip in order to create floating IP.
2421
2422 Returns:
2423 pool_id (str): External network pool ID
2424
2425 """
2426
2427 # Find the external network
2428 external_nets = list()
2429
2430 for net in self.neutron.list_networks()["networks"]:
2431 if net["router:external"]:
2432 external_nets.append(net)
2433
2434 if len(external_nets) == 0:
2435 raise vimconn.VimConnException(
2436 "Cannot create floating_ip automatically since "
2437 "no external network is present",
2438 http_code=vimconn.HTTP_Conflict,
2439 )
2440
2441 if len(external_nets) > 1:
2442 raise vimconn.VimConnException(
2443 "Cannot create floating_ip automatically since "
2444 "multiple external networks are present",
2445 http_code=vimconn.HTTP_Conflict,
2446 )
2447
2448 # Pool ID
2449 return external_nets[0].get("id")
2450
2451 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2452 """Trigger neutron to create a new floating IP using external network ID.
2453
2454 Args:
2455 param (dict): Input parameters to create a floating IP
2456 created_items (dict): All created items belongs to new VM instance
2457
2458 Raises:
2459
2460 VimConnException
2461 """
2462 try:
2463 self.logger.debug("Creating floating IP")
2464 new_floating_ip = self.neutron.create_floatingip(param)
2465 free_floating_ip = new_floating_ip["floatingip"]["id"]
2466 created_items["floating_ip:" + str(free_floating_ip)] = True
2467
2468 except Exception as e:
2469 raise vimconn.VimConnException(
2470 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2471 http_code=vimconn.HTTP_Conflict,
2472 )
2473
2474 def _create_floating_ip(
2475 self, floating_network: dict, server: object, created_items: dict
2476 ) -> None:
2477 """Get the available Pool ID and create a new floating IP.
2478
2479 Args:
2480 floating_network (dict): Dict including external network ID
2481 server (object): Server object
2482 created_items (dict): All created items belongs to new VM instance
2483
2484 """
2485
2486 # Pool_id is available
2487 if (
2488 isinstance(floating_network["floating_ip"], str)
2489 and floating_network["floating_ip"].lower() != "true"
2490 ):
2491 pool_id = floating_network["floating_ip"]
2492
2493 # Find the Pool_id
2494 else:
2495 pool_id = self._find_the_external_network_for_floating_ip()
2496
2497 param = {
2498 "floatingip": {
2499 "floating_network_id": pool_id,
2500 "tenant_id": server.tenant_id,
2501 }
2502 }
2503
2504 self._neutron_create_float_ip(param, created_items)
2505
2506 def _find_floating_ip(
2507 self,
2508 server: object,
2509 floating_ips: list,
2510 floating_network: dict,
2511 ) -> Optional[str]:
2512 """Find the available free floating IPs if there are.
2513
2514 Args:
2515 server (object): Server object
2516 floating_ips (list): List of floating IPs
2517 floating_network (dict): Details of floating network such as ID
2518
2519 Returns:
2520 free_floating_ip (str): Free floating ip address
2521
2522 """
2523 for fip in floating_ips:
2524 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2525 continue
2526
2527 if isinstance(floating_network["floating_ip"], str):
2528 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2529 continue
2530
2531 return fip["id"]
2532
2533 def _assign_floating_ip(
2534 self, free_floating_ip: str, floating_network: dict
2535 ) -> Dict:
2536 """Assign the free floating ip address to port.
2537
2538 Args:
2539 free_floating_ip (str): Floating IP to be assigned
2540 floating_network (dict): ID of floating network
2541
2542 Returns:
2543 fip (dict) (dict): Floating ip details
2544
2545 """
2546 # The vim_id key contains the neutron.port_id
2547 self.neutron.update_floatingip(
2548 free_floating_ip,
2549 {"floatingip": {"port_id": floating_network["vim_id"]}},
2550 )
2551 # For race condition ensure not re-assigned to other VM after 5 seconds
2552 time.sleep(5)
2553
2554 return self.neutron.show_floatingip(free_floating_ip)
2555
2556 def _get_free_floating_ip(
2557 self, server: object, floating_network: dict
2558 ) -> Optional[str]:
2559 """Get the free floating IP address.
2560
2561 Args:
2562 server (object): Server Object
2563 floating_network (dict): Floating network details
2564
2565 Returns:
2566 free_floating_ip (str): Free floating ip addr
2567
2568 """
2569
2570 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2571
2572 # Randomize
2573 random.shuffle(floating_ips)
2574
2575 return self._find_floating_ip(server, floating_ips, floating_network)
2576
2577 def _prepare_external_network_for_vminstance(
2578 self,
2579 external_network: list,
2580 server: object,
2581 created_items: dict,
2582 vm_start_time: float,
2583 ) -> None:
2584 """Assign floating IP address for VM instance.
2585
2586 Args:
2587 external_network (list): ID of External network
2588 server (object): Server Object
2589 created_items (dict): All created items belongs to new VM instance
2590 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2591
2592 Raises:
2593 VimConnException
2594
2595 """
2596 for floating_network in external_network:
2597 try:
2598 assigned = False
2599 floating_ip_retries = 3
2600 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2601 # several times
2602 while not assigned:
2603 free_floating_ip = self._get_free_floating_ip(
2604 server, floating_network
2605 )
2606
2607 if not free_floating_ip:
2608 self._create_floating_ip(
2609 floating_network, server, created_items
2610 )
2611
2612 try:
2613 # For race condition ensure not already assigned
2614 fip = self.neutron.show_floatingip(free_floating_ip)
2615
2616 if fip["floatingip"].get("port_id"):
2617 continue
2618
2619 # Assign floating ip
2620 fip = self._assign_floating_ip(
2621 free_floating_ip, floating_network
2622 )
2623
2624 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2625 self.logger.warning(
2626 "floating_ip {} re-assigned to other port".format(
2627 free_floating_ip
2628 )
2629 )
2630 continue
2631
2632 self.logger.debug(
2633 "Assigned floating_ip {} to VM {}".format(
2634 free_floating_ip, server.id
2635 )
2636 )
2637
2638 assigned = True
2639
2640 except Exception as e:
2641 # Openstack need some time after VM creation to assign an IP. So retry if fails
2642 vm_status = self.nova.servers.get(server.id).status
2643
2644 if vm_status not in ("ACTIVE", "ERROR"):
2645 if time.time() - vm_start_time < server_timeout:
2646 time.sleep(5)
2647 continue
2648 elif floating_ip_retries > 0:
2649 floating_ip_retries -= 1
2650 continue
2651
2652 raise vimconn.VimConnException(
2653 "Cannot create floating_ip: {} {}".format(
2654 type(e).__name__, e
2655 ),
2656 http_code=vimconn.HTTP_Conflict,
2657 )
2658
2659 except Exception as e:
2660 if not floating_network["exit_on_floating_ip_error"]:
2661 self.logger.error("Cannot create floating_ip. %s", str(e))
2662 continue
2663
2664 raise
2665
2666 def _update_port_security_for_vminstance(
2667 self,
2668 no_secured_ports: list,
2669 server: object,
2670 ) -> None:
2671 """Updates the port security according to no_secured_ports list.
2672
2673 Args:
2674 no_secured_ports (list): List of ports that security will be disabled
2675 server (object): Server Object
2676
2677 Raises:
2678 VimConnException
2679
2680 """
2681 # Wait until the VM is active and then disable the port-security
2682 if no_secured_ports:
2683 self.__wait_for_vm(server.id, "ACTIVE")
2684
2685 for port in no_secured_ports:
2686 port_update = {
2687 "port": {"port_security_enabled": False, "security_groups": None}
2688 }
2689
2690 if port[1] == "allow-address-pairs":
2691 port_update = {
2692 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2693 }
2694
2695 try:
2696 self.neutron.update_port(port[0], port_update)
2697
2698 except Exception:
2699 raise vimconn.VimConnException(
2700 "It was not possible to disable port security for port {}".format(
2701 port[0]
2702 )
2703 )
2704
2705 def new_vminstance(
2706 self,
2707 name: str,
2708 description: str,
2709 start: bool,
2710 image_id: str,
2711 flavor_id: str,
2712 affinity_group_list: list,
2713 net_list: list,
2714 cloud_config=None,
2715 disk_list=None,
2716 availability_zone_index=None,
2717 availability_zone_list=None,
2718 ) -> tuple:
2719 """Adds a VM instance to VIM.
2720
2721 Args:
2722 name (str): name of VM
2723 description (str): description
2724 start (bool): indicates if VM must start or boot in pause mode. Ignored
2725 image_id (str) image uuid
2726 flavor_id (str) flavor uuid
2727 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2728 net_list (list): list of interfaces, each one is a dictionary with:
2729 name: name of network
2730 net_id: network uuid to connect
2731 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2732 model: interface model, ignored #TODO
2733 mac_address: used for SR-IOV ifaces #TODO for other types
2734 use: 'data', 'bridge', 'mgmt'
2735 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2736 vim_id: filled/added by this function
2737 floating_ip: True/False (or it can be None)
2738 port_security: True/False
2739 cloud_config (dict): (optional) dictionary with:
2740 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2741 users: (optional) list of users to be inserted, each item is a dict with:
2742 name: (mandatory) user name,
2743 key-pairs: (optional) list of strings with the public key to be inserted to the user
2744 user-data: (optional) string is a text script to be passed directly to cloud-init
2745 config-files: (optional). List of files to be transferred. Each item is a dict with:
2746 dest: (mandatory) string with the destination absolute path
2747 encoding: (optional, by default text). Can be one of:
2748 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2749 content : (mandatory) string with the content of the file
2750 permissions: (optional) string with file permissions, typically octal notation '0644'
2751 owner: (optional) file owner, string with the format 'owner:group'
2752 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2753 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2754 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2755 size: (mandatory) string with the size of the disk in GB
2756 vim_id: (optional) should use this existing volume id
2757 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2758 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2759 availability_zone_index is None
2760 #TODO ip, security groups
2761
2762 Returns:
2763 A tuple with the instance identifier and created_items or raises an exception on error
2764 created_items can be None or a dictionary where this method can include key-values that will be passed to
2765 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2766 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2767 as not present.
2768
2769 """
2770 self.logger.debug(
2771 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2772 image_id,
2773 flavor_id,
2774 str(net_list),
2775 )
2776
2777 try:
2778 server = None
2779 created_items = {}
2780 net_list_vim = []
2781 # list of external networks to be connected to instance, later on used to create floating_ip
2782 external_network = []
2783 # List of ports with port-security disabled
2784 no_secured_ports = []
2785 block_device_mapping = {}
2786 existing_vim_volumes = []
2787 server_group_id = None
2788 scheduller_hints = {}
2789
2790 # Check the Openstack Connection
2791 self._reload_connection()
2792
2793 # Prepare network list
2794 self._prepare_network_for_vminstance(
2795 name=name,
2796 net_list=net_list,
2797 created_items=created_items,
2798 net_list_vim=net_list_vim,
2799 external_network=external_network,
2800 no_secured_ports=no_secured_ports,
2801 )
2802
2803 # Cloud config
2804 config_drive, userdata = self._create_user_data(cloud_config)
2805
2806 # Get availability Zone
2807 vm_av_zone = self._get_vm_availability_zone(
2808 availability_zone_index, availability_zone_list
2809 )
2810
2811 if disk_list:
2812 # Prepare disks
2813 self._prepare_disk_for_vminstance(
2814 name=name,
2815 existing_vim_volumes=existing_vim_volumes,
2816 created_items=created_items,
2817 vm_av_zone=vm_av_zone,
2818 block_device_mapping=block_device_mapping,
2819 disk_list=disk_list,
2820 )
2821
2822 if affinity_group_list:
2823 # Only first id on the list will be used. Openstack restriction
2824 server_group_id = affinity_group_list[0]["affinity_group_id"]
2825 scheduller_hints["group"] = server_group_id
2826
2827 self.logger.debug(
2828 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2829 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2830 "block_device_mapping={}, server_group={})".format(
2831 name,
2832 image_id,
2833 flavor_id,
2834 net_list_vim,
2835 self.config.get("security_groups"),
2836 vm_av_zone,
2837 self.config.get("keypair"),
2838 userdata,
2839 config_drive,
2840 block_device_mapping,
2841 server_group_id,
2842 )
2843 )
2844 # Create VM
2845 server = self.nova.servers.create(
2846 name=name,
2847 image=image_id,
2848 flavor=flavor_id,
2849 nics=net_list_vim,
2850 security_groups=self.config.get("security_groups"),
2851 # TODO remove security_groups in future versions. Already at neutron port
2852 availability_zone=vm_av_zone,
2853 key_name=self.config.get("keypair"),
2854 userdata=userdata,
2855 config_drive=config_drive,
2856 block_device_mapping=block_device_mapping,
2857 scheduler_hints=scheduller_hints,
2858 )
2859
2860 vm_start_time = time.time()
2861
2862 self._update_port_security_for_vminstance(no_secured_ports, server)
2863
2864 self._prepare_external_network_for_vminstance(
2865 external_network=external_network,
2866 server=server,
2867 created_items=created_items,
2868 vm_start_time=vm_start_time,
2869 )
2870
2871 return server.id, created_items
2872
2873 except Exception as e:
2874 server_id = None
2875 if server:
2876 server_id = server.id
2877
2878 try:
2879 created_items = self.remove_keep_tag_from_persistent_volumes(
2880 created_items
2881 )
2882
2883 self.delete_vminstance(server_id, created_items)
2884
2885 except Exception as e2:
2886 self.logger.error("new_vminstance rollback fail {}".format(e2))
2887
2888 self._format_exception(e)
2889
2890 @staticmethod
2891 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2892 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2893
2894 Args:
2895 created_items (dict): All created items belongs to VM
2896
2897 Returns:
2898 updated_created_items (dict): Dict which does not include keep flag for volumes.
2899
2900 """
2901 return {
2902 key.replace(":keep", ""): value for (key, value) in created_items.items()
2903 }
2904
2905 def get_vminstance(self, vm_id):
2906 """Returns the VM instance information from VIM"""
2907 return self._find_nova_server(vm_id)
2908
2909 def get_vminstance_console(self, vm_id, console_type="vnc"):
2910 """
2911 Get a console for the virtual machine
2912 Params:
2913 vm_id: uuid of the VM
2914 console_type, can be:
2915 "novnc" (by default), "xvpvnc" for VNC types,
2916 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2917 Returns dict with the console parameters:
2918 protocol: ssh, ftp, http, https, ...
2919 server: usually ip address
2920 port: the http, ssh, ... port
2921 suffix: extra text, e.g. the http path and query string
2922 """
2923 self.logger.debug("Getting VM CONSOLE from VIM")
2924
2925 try:
2926 self._reload_connection()
2927 server = self.nova.servers.find(id=vm_id)
2928
2929 if console_type is None or console_type == "novnc":
2930 console_dict = server.get_vnc_console("novnc")
2931 elif console_type == "xvpvnc":
2932 console_dict = server.get_vnc_console(console_type)
2933 elif console_type == "rdp-html5":
2934 console_dict = server.get_rdp_console(console_type)
2935 elif console_type == "spice-html5":
2936 console_dict = server.get_spice_console(console_type)
2937 else:
2938 raise vimconn.VimConnException(
2939 "console type '{}' not allowed".format(console_type),
2940 http_code=vimconn.HTTP_Bad_Request,
2941 )
2942
2943 console_dict1 = console_dict.get("console")
2944
2945 if console_dict1:
2946 console_url = console_dict1.get("url")
2947
2948 if console_url:
2949 # parse console_url
2950 protocol_index = console_url.find("//")
2951 suffix_index = (
2952 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2953 )
2954 port_index = (
2955 console_url[protocol_index + 2 : suffix_index].find(":")
2956 + protocol_index
2957 + 2
2958 )
2959
2960 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2961 return (
2962 -vimconn.HTTP_Internal_Server_Error,
2963 "Unexpected response from VIM",
2964 )
2965
2966 console_dict = {
2967 "protocol": console_url[0:protocol_index],
2968 "server": console_url[protocol_index + 2 : port_index],
2969 "port": console_url[port_index:suffix_index],
2970 "suffix": console_url[suffix_index + 1 :],
2971 }
2972 protocol_index += 2
2973
2974 return console_dict
2975 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2976 except (
2977 nvExceptions.NotFound,
2978 ksExceptions.ClientException,
2979 nvExceptions.ClientException,
2980 nvExceptions.BadRequest,
2981 ConnectionError,
2982 ) as e:
2983 self._format_exception(e)
2984
2985 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2986 """Neutron delete ports by id.
2987 Args:
2988 k_id (str): Port id in the VIM
2989 """
2990 try:
2991 port_dict = self.neutron.list_ports()
2992 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2993
2994 if k_id in existing_ports:
2995 self.neutron.delete_port(k_id)
2996
2997 except Exception as e:
2998 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2999
3000 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
3001 """Cinder delete volume by id.
3002 Args:
3003 shared_volume_vim_id (str): ID of shared volume in VIM
3004 """
3005 elapsed_time = 0
3006 try:
3007 while elapsed_time < server_timeout:
3008 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3009 if vol_status == "available":
3010 self.cinder.volumes.delete(shared_volume_vim_id)
3011 return True
3012
3013 time.sleep(5)
3014 elapsed_time += 5
3015
3016 if elapsed_time >= server_timeout:
3017 raise vimconn.VimConnException(
3018 "Timeout waiting for volume "
3019 + shared_volume_vim_id
3020 + " to be available",
3021 http_code=vimconn.HTTP_Request_Timeout,
3022 )
3023
3024 except Exception as e:
3025 self.logger.error(
3026 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3027 )
3028 self._format_exception(e)
3029
3030 def _delete_volumes_by_id_wth_cinder(
3031 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3032 ) -> bool:
3033 """Cinder delete volume by id.
3034 Args:
3035 k (str): Full item name in created_items
3036 k_id (str): ID of floating ip in VIM
3037 volumes_to_hold (list): Volumes not to delete
3038 created_items (dict): All created items belongs to VM
3039 """
3040 try:
3041 if k_id in volumes_to_hold:
3042 return
3043
3044 if self.cinder.volumes.get(k_id).status != "available":
3045 return True
3046
3047 else:
3048 self.cinder.volumes.delete(k_id)
3049 created_items[k] = None
3050
3051 except Exception as e:
3052 self.logger.error(
3053 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3054 )
3055
3056 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3057 """Neutron delete floating ip by id.
3058 Args:
3059 k (str): Full item name in created_items
3060 k_id (str): ID of floating ip in VIM
3061 created_items (dict): All created items belongs to VM
3062 """
3063 try:
3064 self.neutron.delete_floatingip(k_id)
3065 created_items[k] = None
3066
3067 except Exception as e:
3068 self.logger.error(
3069 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3070 )
3071
3072 @staticmethod
3073 def _get_item_name_id(k: str) -> Tuple[str, str]:
3074 k_item, _, k_id = k.partition(":")
3075 return k_item, k_id
3076
3077 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3078 """Delete VM ports attached to the networks before deleting virtual machine.
3079 Args:
3080 created_items (dict): All created items belongs to VM
3081 """
3082
3083 for k, v in created_items.items():
3084 if not v: # skip already deleted
3085 continue
3086
3087 try:
3088 k_item, k_id = self._get_item_name_id(k)
3089 if k_item == "port":
3090 self._delete_ports_by_id_wth_neutron(k_id)
3091
3092 except Exception as e:
3093 self.logger.error(
3094 "Error deleting port: {}: {}".format(type(e).__name__, e)
3095 )
3096
3097 def _delete_created_items(
3098 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3099 ) -> bool:
3100 """Delete Volumes and floating ip if they exist in created_items."""
3101 for k, v in created_items.items():
3102 if not v: # skip already deleted
3103 continue
3104
3105 try:
3106 k_item, k_id = self._get_item_name_id(k)
3107 if k_item == "volume":
3108 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3109 k, k_id, volumes_to_hold, created_items
3110 )
3111
3112 if unavailable_vol:
3113 keep_waiting = True
3114
3115 elif k_item == "floating_ip":
3116 self._delete_floating_ip_by_id(k, k_id, created_items)
3117
3118 except Exception as e:
3119 self.logger.error("Error deleting {}: {}".format(k, e))
3120
3121 return keep_waiting
3122
3123 @staticmethod
3124 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3125 """Remove the volumes which has key flag from created_items
3126
3127 Args:
3128 created_items (dict): All created items belongs to VM
3129
3130 Returns:
3131 created_items (dict): Persistent volumes eliminated created_items
3132 """
3133 return {
3134 key: value
3135 for (key, value) in created_items.items()
3136 if len(key.split(":")) == 2
3137 }
3138
3139 def delete_vminstance(
3140 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3141 ) -> None:
3142 """Removes a VM instance from VIM. Returns the old identifier.
3143 Args:
3144 vm_id (str): Identifier of VM instance
3145 created_items (dict): All created items belongs to VM
3146 volumes_to_hold (list): Volumes_to_hold
3147 """
3148 if created_items is None:
3149 created_items = {}
3150 if volumes_to_hold is None:
3151 volumes_to_hold = []
3152
3153 try:
3154 created_items = self._extract_items_wth_keep_flag_from_created_items(
3155 created_items
3156 )
3157
3158 self._reload_connection()
3159
3160 # Delete VM ports attached to the networks before the virtual machine
3161 if created_items:
3162 self._delete_vm_ports_attached_to_network(created_items)
3163
3164 if vm_id:
3165 self.nova.servers.delete(vm_id)
3166
3167 # Although having detached, volumes should have in active status before deleting.
3168 # We ensure in this loop
3169 keep_waiting = True
3170 elapsed_time = 0
3171
3172 while keep_waiting and elapsed_time < volume_timeout:
3173 keep_waiting = False
3174
3175 # Delete volumes and floating IP.
3176 keep_waiting = self._delete_created_items(
3177 created_items, volumes_to_hold, keep_waiting
3178 )
3179
3180 if keep_waiting:
3181 time.sleep(1)
3182 elapsed_time += 1
3183
3184 except (
3185 nvExceptions.NotFound,
3186 ksExceptions.ClientException,
3187 nvExceptions.ClientException,
3188 ConnectionError,
3189 ) as e:
3190 self._format_exception(e)
3191
3192 def refresh_vms_status(self, vm_list):
3193 """Get the status of the virtual machines and their interfaces/ports
3194 Params: the list of VM identifiers
3195 Returns a dictionary with:
3196 vm_id: #VIM id of this Virtual Machine
3197 status: #Mandatory. Text with one of:
3198 # DELETED (not found at vim)
3199 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3200 # OTHER (Vim reported other status not understood)
3201 # ERROR (VIM indicates an ERROR status)
3202 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3203 # CREATING (on building process), ERROR
3204 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3205 #
3206 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3207 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3208 interfaces:
3209 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3210 mac_address: #Text format XX:XX:XX:XX:XX:XX
3211 vim_net_id: #network id where this interface is connected
3212 vim_interface_id: #interface/port VIM id
3213 ip_address: #null, or text with IPv4, IPv6 address
3214 compute_node: #identification of compute node where PF,VF interface is allocated
3215 pci: #PCI address of the NIC that hosts the PF,VF
3216 vlan: #physical VLAN used for VF
3217 """
3218 vm_dict = {}
3219 self.logger.debug(
3220 "refresh_vms status: Getting tenant VM instance information from VIM"
3221 )
3222
3223 for vm_id in vm_list:
3224 vm = {}
3225
3226 try:
3227 vm_vim = self.get_vminstance(vm_id)
3228
3229 if vm_vim["status"] in vmStatus2manoFormat:
3230 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3231 else:
3232 vm["status"] = "OTHER"
3233 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3234
3235 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3236 vm_vim.pop("user_data", None)
3237 vm["vim_info"] = self.serialize(vm_vim)
3238
3239 vm["interfaces"] = []
3240 if vm_vim.get("fault"):
3241 vm["error_msg"] = str(vm_vim["fault"])
3242
3243 # get interfaces
3244 try:
3245 self._reload_connection()
3246 port_dict = self.neutron.list_ports(device_id=vm_id)
3247
3248 for port in port_dict["ports"]:
3249 interface = {}
3250 interface["vim_info"] = self.serialize(port)
3251 interface["mac_address"] = port.get("mac_address")
3252 interface["vim_net_id"] = port["network_id"]
3253 interface["vim_interface_id"] = port["id"]
3254 # check if OS-EXT-SRV-ATTR:host is there,
3255 # in case of non-admin credentials, it will be missing
3256
3257 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3258 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3259
3260 interface["pci"] = None
3261
3262 # check if binding:profile is there,
3263 # in case of non-admin credentials, it will be missing
3264 if port.get("binding:profile"):
3265 if port["binding:profile"].get("pci_slot"):
3266 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3267 # the slot to 0x00
3268 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3269 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3270 pci = port["binding:profile"]["pci_slot"]
3271 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3272 interface["pci"] = pci
3273
3274 interface["vlan"] = None
3275
3276 if port.get("binding:vif_details"):
3277 interface["vlan"] = port["binding:vif_details"].get("vlan")
3278
3279 # Get vlan from network in case not present in port for those old openstacks and cases where
3280 # it is needed vlan at PT
3281 if not interface["vlan"]:
3282 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3283 network = self.neutron.show_network(port["network_id"])
3284
3285 if (
3286 network["network"].get("provider:network_type")
3287 == "vlan"
3288 ):
3289 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3290 interface["vlan"] = network["network"].get(
3291 "provider:segmentation_id"
3292 )
3293
3294 ips = []
3295 # look for floating ip address
3296 try:
3297 floating_ip_dict = self.neutron.list_floatingips(
3298 port_id=port["id"]
3299 )
3300
3301 if floating_ip_dict.get("floatingips"):
3302 ips.append(
3303 floating_ip_dict["floatingips"][0].get(
3304 "floating_ip_address"
3305 )
3306 )
3307 except Exception:
3308 pass
3309
3310 for subnet in port["fixed_ips"]:
3311 ips.append(subnet["ip_address"])
3312
3313 interface["ip_address"] = ";".join(ips)
3314 vm["interfaces"].append(interface)
3315 except Exception as e:
3316 self.logger.error(
3317 "Error getting vm interface information {}: {}".format(
3318 type(e).__name__, e
3319 ),
3320 exc_info=True,
3321 )
3322 except vimconn.VimConnNotFoundException as e:
3323 self.logger.error("Exception getting vm status: %s", str(e))
3324 vm["status"] = "DELETED"
3325 vm["error_msg"] = str(e)
3326 except vimconn.VimConnException as e:
3327 self.logger.error("Exception getting vm status: %s", str(e))
3328 vm["status"] = "VIM_ERROR"
3329 vm["error_msg"] = str(e)
3330
3331 vm_dict[vm_id] = vm
3332
3333 return vm_dict
3334
3335 def action_vminstance(self, vm_id, action_dict, created_items={}):
3336 """Send and action over a VM instance from VIM
3337 Returns None or the console dict if the action was successfully sent to the VIM
3338 """
3339 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3340
3341 try:
3342 self._reload_connection()
3343 server = self.nova.servers.find(id=vm_id)
3344
3345 if "start" in action_dict:
3346 if action_dict["start"] == "rebuild":
3347 server.rebuild()
3348 else:
3349 if server.status == "PAUSED":
3350 server.unpause()
3351 elif server.status == "SUSPENDED":
3352 server.resume()
3353 elif server.status == "SHUTOFF":
3354 server.start()
3355 else:
3356 self.logger.debug(
3357 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3358 )
3359 raise vimconn.VimConnException(
3360 "Cannot 'start' instance while it is in active state",
3361 http_code=vimconn.HTTP_Bad_Request,
3362 )
3363
3364 elif "pause" in action_dict:
3365 server.pause()
3366 elif "resume" in action_dict:
3367 server.resume()
3368 elif "shutoff" in action_dict or "shutdown" in action_dict:
3369 self.logger.debug("server status %s", server.status)
3370 if server.status == "ACTIVE":
3371 server.stop()
3372 else:
3373 self.logger.debug("ERROR: VM is not in Active state")
3374 raise vimconn.VimConnException(
3375 "VM is not in active state, stop operation is not allowed",
3376 http_code=vimconn.HTTP_Bad_Request,
3377 )
3378 elif "forceOff" in action_dict:
3379 server.stop() # TODO
3380 elif "terminate" in action_dict:
3381 server.delete()
3382 elif "createImage" in action_dict:
3383 server.create_image()
3384 # "path":path_schema,
3385 # "description":description_schema,
3386 # "name":name_schema,
3387 # "metadata":metadata_schema,
3388 # "imageRef": id_schema,
3389 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3390 elif "rebuild" in action_dict:
3391 server.rebuild(server.image["id"])
3392 elif "reboot" in action_dict:
3393 server.reboot() # reboot_type="SOFT"
3394 elif "console" in action_dict:
3395 console_type = action_dict["console"]
3396
3397 if console_type is None or console_type == "novnc":
3398 console_dict = server.get_vnc_console("novnc")
3399 elif console_type == "xvpvnc":
3400 console_dict = server.get_vnc_console(console_type)
3401 elif console_type == "rdp-html5":
3402 console_dict = server.get_rdp_console(console_type)
3403 elif console_type == "spice-html5":
3404 console_dict = server.get_spice_console(console_type)
3405 else:
3406 raise vimconn.VimConnException(
3407 "console type '{}' not allowed".format(console_type),
3408 http_code=vimconn.HTTP_Bad_Request,
3409 )
3410
3411 try:
3412 console_url = console_dict["console"]["url"]
3413 # parse console_url
3414 protocol_index = console_url.find("//")
3415 suffix_index = (
3416 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3417 )
3418 port_index = (
3419 console_url[protocol_index + 2 : suffix_index].find(":")
3420 + protocol_index
3421 + 2
3422 )
3423
3424 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3425 raise vimconn.VimConnException(
3426 "Unexpected response from VIM " + str(console_dict)
3427 )
3428
3429 console_dict2 = {
3430 "protocol": console_url[0:protocol_index],
3431 "server": console_url[protocol_index + 2 : port_index],
3432 "port": int(console_url[port_index + 1 : suffix_index]),
3433 "suffix": console_url[suffix_index + 1 :],
3434 }
3435
3436 return console_dict2
3437 except Exception:
3438 raise vimconn.VimConnException(
3439 "Unexpected response from VIM " + str(console_dict)
3440 )
3441
3442 return None
3443 except (
3444 ksExceptions.ClientException,
3445 nvExceptions.ClientException,
3446 nvExceptions.NotFound,
3447 ConnectionError,
3448 ) as e:
3449 self._format_exception(e)
3450 # TODO insert exception vimconn.HTTP_Unauthorized
3451
3452 # ###### VIO Specific Changes #########
3453 def _generate_vlanID(self):
3454 """
3455 Method to get unused vlanID
3456 Args:
3457 None
3458 Returns:
3459 vlanID
3460 """
3461 # Get used VLAN IDs
3462 usedVlanIDs = []
3463 networks = self.get_network_list()
3464
3465 for net in networks:
3466 if net.get("provider:segmentation_id"):
3467 usedVlanIDs.append(net.get("provider:segmentation_id"))
3468
3469 used_vlanIDs = set(usedVlanIDs)
3470
3471 # find unused VLAN ID
3472 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3473 try:
3474 start_vlanid, end_vlanid = map(
3475 int, vlanID_range.replace(" ", "").split("-")
3476 )
3477
3478 for vlanID in range(start_vlanid, end_vlanid + 1):
3479 if vlanID not in used_vlanIDs:
3480 return vlanID
3481 except Exception as exp:
3482 raise vimconn.VimConnException(
3483 "Exception {} occurred while generating VLAN ID.".format(exp)
3484 )
3485 else:
3486 raise vimconn.VimConnConflictException(
3487 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3488 self.config.get("dataplane_net_vlan_range")
3489 )
3490 )
3491
3492 def _generate_multisegment_vlanID(self):
3493 """
3494 Method to get unused vlanID
3495 Args:
3496 None
3497 Returns:
3498 vlanID
3499 """
3500 # Get used VLAN IDs
3501 usedVlanIDs = []
3502 networks = self.get_network_list()
3503 for net in networks:
3504 if net.get("provider:network_type") == "vlan" and net.get(
3505 "provider:segmentation_id"
3506 ):
3507 usedVlanIDs.append(net.get("provider:segmentation_id"))
3508 elif net.get("segments"):
3509 for segment in net.get("segments"):
3510 if segment.get("provider:network_type") == "vlan" and segment.get(
3511 "provider:segmentation_id"
3512 ):
3513 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3514
3515 used_vlanIDs = set(usedVlanIDs)
3516
3517 # find unused VLAN ID
3518 for vlanID_range in self.config.get("multisegment_vlan_range"):
3519 try:
3520 start_vlanid, end_vlanid = map(
3521 int, vlanID_range.replace(" ", "").split("-")
3522 )
3523
3524 for vlanID in range(start_vlanid, end_vlanid + 1):
3525 if vlanID not in used_vlanIDs:
3526 return vlanID
3527 except Exception as exp:
3528 raise vimconn.VimConnException(
3529 "Exception {} occurred while generating VLAN ID.".format(exp)
3530 )
3531 else:
3532 raise vimconn.VimConnConflictException(
3533 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3534 self.config.get("multisegment_vlan_range")
3535 )
3536 )
3537
3538 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3539 """
3540 Method to validate user given vlanID ranges
3541 Args: None
3542 Returns: None
3543 """
3544 for vlanID_range in input_vlan_range:
3545 vlan_range = vlanID_range.replace(" ", "")
3546 # validate format
3547 vlanID_pattern = r"(\d)*-(\d)*$"
3548 match_obj = re.match(vlanID_pattern, vlan_range)
3549 if not match_obj:
3550 raise vimconn.VimConnConflictException(
3551 "Invalid VLAN range for {}: {}.You must provide "
3552 "'{}' in format [start_ID - end_ID].".format(
3553 text_vlan_range, vlanID_range, text_vlan_range
3554 )
3555 )
3556
3557 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3558 if start_vlanid <= 0:
3559 raise vimconn.VimConnConflictException(
3560 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3561 "networks valid IDs are 1 to 4094 ".format(
3562 text_vlan_range, vlanID_range
3563 )
3564 )
3565
3566 if end_vlanid > 4094:
3567 raise vimconn.VimConnConflictException(
3568 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3569 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3570 text_vlan_range, vlanID_range
3571 )
3572 )
3573
3574 if start_vlanid > end_vlanid:
3575 raise vimconn.VimConnConflictException(
3576 "Invalid VLAN range for {}: {}. You must provide '{}'"
3577 " in format start_ID - end_ID and start_ID < end_ID ".format(
3578 text_vlan_range, vlanID_range, text_vlan_range
3579 )
3580 )
3581
3582 def get_hosts_info(self):
3583 """Get the information of deployed hosts
3584 Returns the hosts content"""
3585 if self.debug:
3586 print("osconnector: Getting Host info from VIM")
3587
3588 try:
3589 h_list = []
3590 self._reload_connection()
3591 hypervisors = self.nova.hypervisors.list()
3592
3593 for hype in hypervisors:
3594 h_list.append(hype.to_dict())
3595
3596 return 1, {"hosts": h_list}
3597 except nvExceptions.NotFound as e:
3598 error_value = -vimconn.HTTP_Not_Found
3599 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3600 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3601 error_value = -vimconn.HTTP_Bad_Request
3602 error_text = (
3603 type(e).__name__
3604 + ": "
3605 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3606 )
3607
3608 # TODO insert exception vimconn.HTTP_Unauthorized
3609 # if reaching here is because an exception
3610 self.logger.debug("get_hosts_info " + error_text)
3611
3612 return error_value, error_text
3613
3614 def get_hosts(self, vim_tenant):
3615 """Get the hosts and deployed instances
3616 Returns the hosts content"""
3617 r, hype_dict = self.get_hosts_info()
3618
3619 if r < 0:
3620 return r, hype_dict
3621
3622 hypervisors = hype_dict["hosts"]
3623
3624 try:
3625 servers = self.nova.servers.list()
3626 for hype in hypervisors:
3627 for server in servers:
3628 if (
3629 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3630 == hype["hypervisor_hostname"]
3631 ):
3632 if "vm" in hype:
3633 hype["vm"].append(server.id)
3634 else:
3635 hype["vm"] = [server.id]
3636
3637 return 1, hype_dict
3638 except nvExceptions.NotFound as e:
3639 error_value = -vimconn.HTTP_Not_Found
3640 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3641 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3642 error_value = -vimconn.HTTP_Bad_Request
3643 error_text = (
3644 type(e).__name__
3645 + ": "
3646 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3647 )
3648
3649 # TODO insert exception vimconn.HTTP_Unauthorized
3650 # if reaching here is because an exception
3651 self.logger.debug("get_hosts " + error_text)
3652
3653 return error_value, error_text
3654
3655 def new_affinity_group(self, affinity_group_data):
3656 """Adds a server group to VIM
3657 affinity_group_data contains a dictionary with information, keys:
3658 name: name in VIM for the server group
3659 type: affinity or anti-affinity
3660 scope: Only nfvi-node allowed
3661 Returns the server group identifier"""
3662 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3663
3664 try:
3665 name = affinity_group_data["name"]
3666 policy = affinity_group_data["type"]
3667
3668 self._reload_connection()
3669 new_server_group = self.nova.server_groups.create(name, policy)
3670
3671 return new_server_group.id
3672 except (
3673 ksExceptions.ClientException,
3674 nvExceptions.ClientException,
3675 ConnectionError,
3676 KeyError,
3677 ) as e:
3678 self._format_exception(e)
3679
3680 def get_affinity_group(self, affinity_group_id):
3681 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3682 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3683 try:
3684 self._reload_connection()
3685 server_group = self.nova.server_groups.find(id=affinity_group_id)
3686
3687 return server_group.to_dict()
3688 except (
3689 nvExceptions.NotFound,
3690 nvExceptions.ClientException,
3691 ksExceptions.ClientException,
3692 ConnectionError,
3693 ) as e:
3694 self._format_exception(e)
3695
3696 def delete_affinity_group(self, affinity_group_id):
3697 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3698 self.logger.debug("Getting server group '%s'", affinity_group_id)
3699 try:
3700 self._reload_connection()
3701 self.nova.server_groups.delete(affinity_group_id)
3702
3703 return affinity_group_id
3704 except (
3705 nvExceptions.NotFound,
3706 ksExceptions.ClientException,
3707 nvExceptions.ClientException,
3708 ConnectionError,
3709 ) as e:
3710 self._format_exception(e)
3711
3712 def get_vdu_state(self, vm_id, host_is_required=False) -> list:
3713 """Getting the state of a VDU.
3714 Args:
3715 vm_id (str): ID of an instance
3716 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
3717 and if this is set to True, it raises KeyError.
3718 Returns:
3719 vdu_data (list): VDU details including state, flavor, host_info, AZ
3720 """
3721 self.logger.debug("Getting the status of VM")
3722 self.logger.debug("VIM VM ID %s", vm_id)
3723 try:
3724 self._reload_connection()
3725 server_dict = self._find_nova_server(vm_id)
3726 srv_attr = "OS-EXT-SRV-ATTR:host"
3727 host_info = (
3728 server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
3729 )
3730 vdu_data = [
3731 server_dict["status"],
3732 server_dict["flavor"]["id"],
3733 host_info,
3734 server_dict["OS-EXT-AZ:availability_zone"],
3735 ]
3736 self.logger.debug("vdu_data %s", vdu_data)
3737 return vdu_data
3738
3739 except Exception as e:
3740 self._format_exception(e)
3741
3742 def check_compute_availability(self, host, server_flavor_details):
3743 self._reload_connection()
3744 hypervisor_search = self.nova.hypervisors.search(
3745 hypervisor_match=host, servers=True
3746 )
3747 for hypervisor in hypervisor_search:
3748 hypervisor_id = hypervisor.to_dict()["id"]
3749 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3750 hypervisor_dict = hypervisor_details.to_dict()
3751 hypervisor_temp = json.dumps(hypervisor_dict)
3752 hypervisor_json = json.loads(hypervisor_temp)
3753 resources_available = [
3754 hypervisor_json["free_ram_mb"],
3755 hypervisor_json["disk_available_least"],
3756 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3757 ]
3758 compute_available = all(
3759 x > y for x, y in zip(resources_available, server_flavor_details)
3760 )
3761 if compute_available:
3762 return host
3763
3764 def check_availability_zone(
3765 self, old_az, server_flavor_details, old_host, host=None
3766 ):
3767 self._reload_connection()
3768 az_check = {"zone_check": False, "compute_availability": None}
3769 aggregates_list = self.nova.aggregates.list()
3770 for aggregate in aggregates_list:
3771 aggregate_details = aggregate.to_dict()
3772 aggregate_temp = json.dumps(aggregate_details)
3773 aggregate_json = json.loads(aggregate_temp)
3774 if aggregate_json["availability_zone"] == old_az:
3775 hosts_list = aggregate_json["hosts"]
3776 if host is not None:
3777 if host in hosts_list:
3778 az_check["zone_check"] = True
3779 available_compute_id = self.check_compute_availability(
3780 host, server_flavor_details
3781 )
3782 if available_compute_id is not None:
3783 az_check["compute_availability"] = available_compute_id
3784 else:
3785 for check_host in hosts_list:
3786 if check_host != old_host:
3787 available_compute_id = self.check_compute_availability(
3788 check_host, server_flavor_details
3789 )
3790 if available_compute_id is not None:
3791 az_check["zone_check"] = True
3792 az_check["compute_availability"] = available_compute_id
3793 break
3794 else:
3795 az_check["zone_check"] = True
3796 return az_check
3797
3798 def migrate_instance(self, vm_id, compute_host=None):
3799 """
3800 Migrate a vdu
3801 param:
3802 vm_id: ID of an instance
3803 compute_host: Host to migrate the vdu to
3804 """
3805 self._reload_connection()
3806 vm_state = False
3807 instance_state = self.get_vdu_state(vm_id, host_is_required=True)
3808 server_flavor_id = instance_state[1]
3809 server_hypervisor_name = instance_state[2]
3810 server_availability_zone = instance_state[3]
3811 try:
3812 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3813 server_flavor_details = [
3814 server_flavor["ram"],
3815 server_flavor["disk"],
3816 server_flavor["vcpus"],
3817 ]
3818 if compute_host == server_hypervisor_name:
3819 raise vimconn.VimConnException(
3820 "Unable to migrate instance '{}' to the same host '{}'".format(
3821 vm_id, compute_host
3822 ),
3823 http_code=vimconn.HTTP_Bad_Request,
3824 )
3825 az_status = self.check_availability_zone(
3826 server_availability_zone,
3827 server_flavor_details,
3828 server_hypervisor_name,
3829 compute_host,
3830 )
3831 availability_zone_check = az_status["zone_check"]
3832 available_compute_id = az_status.get("compute_availability")
3833
3834 if availability_zone_check is False:
3835 raise vimconn.VimConnException(
3836 "Unable to migrate instance '{}' to a different availability zone".format(
3837 vm_id
3838 ),
3839 http_code=vimconn.HTTP_Bad_Request,
3840 )
3841 if available_compute_id is not None:
3842 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
3843 self.nova.servers.live_migrate(
3844 server=vm_id,
3845 host=available_compute_id,
3846 block_migration=True,
3847 )
3848 state = "MIGRATING"
3849 changed_compute_host = ""
3850 if state == "MIGRATING":
3851 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3852 changed_compute_host = self.get_vdu_state(
3853 vm_id, host_is_required=True
3854 )[2]
3855 if vm_state and changed_compute_host == available_compute_id:
3856 self.logger.debug(
3857 "Instance '{}' migrated to the new compute host '{}'".format(
3858 vm_id, changed_compute_host
3859 )
3860 )
3861 return state, available_compute_id
3862 else:
3863 raise vimconn.VimConnException(
3864 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3865 vm_id, available_compute_id
3866 ),
3867 http_code=vimconn.HTTP_Bad_Request,
3868 )
3869 else:
3870 raise vimconn.VimConnException(
3871 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3872 available_compute_id
3873 ),
3874 http_code=vimconn.HTTP_Bad_Request,
3875 )
3876 except (
3877 nvExceptions.BadRequest,
3878 nvExceptions.ClientException,
3879 nvExceptions.NotFound,
3880 ) as e:
3881 self._format_exception(e)
3882
3883 def resize_instance(self, vm_id, new_flavor_id):
3884 """
3885 For resizing the vm based on the given
3886 flavor details
3887 param:
3888 vm_id : ID of an instance
3889 new_flavor_id : Flavor id to be resized
3890 Return the status of a resized instance
3891 """
3892 self._reload_connection()
3893 self.logger.debug("resize the flavor of an instance")
3894 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3895 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3896 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3897 try:
3898 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3899 if old_flavor_disk > new_flavor_disk:
3900 raise nvExceptions.BadRequest(
3901 400,
3902 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3903 )
3904 else:
3905 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3906 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3907 if vm_state:
3908 instance_resized_status = self.confirm_resize(vm_id)
3909 return instance_resized_status
3910 else:
3911 raise nvExceptions.BadRequest(
3912 409,
3913 message="Cannot 'resize' vm_state is in ERROR",
3914 )
3915
3916 else:
3917 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3918 raise nvExceptions.BadRequest(
3919 409,
3920 message="Cannot 'resize' instance while it is in vm_state resized",
3921 )
3922 except (
3923 nvExceptions.BadRequest,
3924 nvExceptions.ClientException,
3925 nvExceptions.NotFound,
3926 ) as e:
3927 self._format_exception(e)
3928
3929 def confirm_resize(self, vm_id):
3930 """
3931 Confirm the resize of an instance
3932 param:
3933 vm_id: ID of an instance
3934 """
3935 self._reload_connection()
3936 self.nova.servers.confirm_resize(server=vm_id)
3937 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3938 self.__wait_for_vm(vm_id, "ACTIVE")
3939 instance_status = self.get_vdu_state(vm_id)[0]
3940 return instance_status
3941
3942 def get_monitoring_data(self):
3943 try:
3944 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3945 self._reload_connection()
3946 all_servers = self.nova.servers.list(detailed=True)
3947 try:
3948 for server in all_servers:
3949 server.flavor["id"] = self.nova.flavors.find(
3950 name=server.flavor["original_name"]
3951 ).id
3952 except nClient.exceptions.NotFound as e:
3953 self.logger.warning(str(e.message))
3954 all_ports = self.neutron.list_ports()
3955 return all_servers, all_ports
3956 except (
3957 vimconn.VimConnException,
3958 vimconn.VimConnNotFoundException,
3959 vimconn.VimConnConnectionException,
3960 ) as e:
3961 raise vimconn.VimConnException(
3962 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3963 )