Fixes multiattach issues in attaching and deletion
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.60"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 raise vimconn.VimConnException(
571 type(exception).__name__ + ": " + message_error
572 )
573 elif isinstance(
574 exception,
575 (
576 nvExceptions.ClientException,
577 ksExceptions.ClientException,
578 neExceptions.NeutronException,
579 ),
580 ):
581 raise vimconn.VimConnUnexpectedResponse(
582 type(exception).__name__ + ": " + message_error
583 )
584 elif isinstance(exception, nvExceptions.Conflict):
585 raise vimconn.VimConnConflictException(
586 type(exception).__name__ + ": " + message_error
587 )
588 elif isinstance(exception, vimconn.VimConnException):
589 raise exception
590 else: # ()
591 self.logger.error("General Exception " + message_error, exc_info=True)
592
593 raise vimconn.VimConnConnectionException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 def _get_ids_from_name(self):
598 """
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600 :return: None
601 """
602 # get tenant_id if only tenant_name is supplied
603 self._reload_connection()
604
605 if not self.my_tenant_id:
606 raise vimconn.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self.tenant_name, self.tenant_id
609 )
610 )
611
612 if self.config.get("security_groups") and not self.security_groups_id:
613 # convert from name to id
614 neutron_sg_list = self.neutron.list_security_groups(
615 tenant_id=self.my_tenant_id
616 )["security_groups"]
617
618 self.security_groups_id = []
619 for sg in self.config.get("security_groups"):
620 for neutron_sg in neutron_sg_list:
621 if sg in (neutron_sg["id"], neutron_sg["name"]):
622 self.security_groups_id.append(neutron_sg["id"])
623 break
624 else:
625 self.security_groups_id = None
626
627 raise vimconn.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg)
629 )
630
631 def _find_nova_server(self, vm_id):
632 """
633 Returns the VM instance from Openstack and completes it with flavor ID
634 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
635 """
636 try:
637 self._reload_connection()
638 server = self.nova.servers.find(id=vm_id)
639 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
640 server_dict = server.to_dict()
641 try:
642 server_dict["flavor"]["id"] = self.nova.flavors.find(
643 name=server_dict["flavor"]["original_name"]
644 ).id
645 except nClient.exceptions.NotFound as e:
646 self.logger.warning(str(e.message))
647 return server_dict
648 except (
649 ksExceptions.ClientException,
650 nvExceptions.ClientException,
651 nvExceptions.NotFound,
652 ConnectionError,
653 ) as e:
654 self._format_exception(e)
655
656 def check_vim_connectivity(self):
657 # just get network list to check connectivity and credentials
658 self.get_network_list(filter_dict={})
659
660 def get_tenant_list(self, filter_dict={}):
661 """Obtain tenants of VIM
662 filter_dict can contain the following keys:
663 name: filter by tenant name
664 id: filter by tenant uuid/id
665 <other VIM specific>
666 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
667 """
668 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
669
670 try:
671 self._reload_connection()
672
673 if self.api_version3:
674 project_class_list = self.keystone.projects.list(
675 name=filter_dict.get("name")
676 )
677 else:
678 project_class_list = self.keystone.tenants.findall(**filter_dict)
679
680 project_list = []
681
682 for project in project_class_list:
683 if filter_dict.get("id") and filter_dict["id"] != project.id:
684 continue
685
686 project_list.append(project.to_dict())
687
688 return project_list
689 except (
690 ksExceptions.ConnectionError,
691 ksExceptions.ClientException,
692 ConnectionError,
693 ) as e:
694 self._format_exception(e)
695
696 def new_tenant(self, tenant_name, tenant_description):
697 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
698 self.logger.debug("Adding a new tenant name: %s", tenant_name)
699
700 try:
701 self._reload_connection()
702
703 if self.api_version3:
704 project = self.keystone.projects.create(
705 tenant_name,
706 self.config.get("project_domain_id", "default"),
707 description=tenant_description,
708 is_domain=False,
709 )
710 else:
711 project = self.keystone.tenants.create(tenant_name, tenant_description)
712
713 return project.id
714 except (
715 ksExceptions.ConnectionError,
716 ksExceptions.ClientException,
717 ksExceptions.BadRequest,
718 ConnectionError,
719 ) as e:
720 self._format_exception(e)
721
722 def delete_tenant(self, tenant_id):
723 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
724 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
725
726 try:
727 self._reload_connection()
728
729 if self.api_version3:
730 self.keystone.projects.delete(tenant_id)
731 else:
732 self.keystone.tenants.delete(tenant_id)
733
734 return tenant_id
735 except (
736 ksExceptions.ConnectionError,
737 ksExceptions.ClientException,
738 ksExceptions.NotFound,
739 ConnectionError,
740 ) as e:
741 self._format_exception(e)
742
743 def new_network(
744 self,
745 net_name,
746 net_type,
747 ip_profile=None,
748 shared=False,
749 provider_network_profile=None,
750 ):
751 """Adds a tenant network to VIM
752 Params:
753 'net_name': name of the network
754 'net_type': one of:
755 'bridge': overlay isolated network
756 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
757 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
758 'ip_profile': is a dict containing the IP parameters of the network
759 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
760 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
761 'gateway_address': (Optional) ip_schema, that is X.X.X.X
762 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
763 'dhcp_enabled': True or False
764 'dhcp_start_address': ip_schema, first IP to grant
765 'dhcp_count': number of IPs to grant.
766 'shared': if this network can be seen/use by other tenants/organization
767 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
768 physical-network: physnet-label}
769 Returns a tuple with the network identifier and created_items, or raises an exception on error
770 created_items can be None or a dictionary where this method can include key-values that will be passed to
771 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
772 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
773 as not present.
774 """
775 self.logger.debug(
776 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
777 )
778 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
779
780 try:
781 vlan = None
782
783 if provider_network_profile:
784 vlan = provider_network_profile.get("segmentation-id")
785
786 new_net = None
787 created_items = {}
788 self._reload_connection()
789 network_dict = {"name": net_name, "admin_state_up": True}
790
791 if net_type in ("data", "ptp") or provider_network_profile:
792 provider_physical_network = None
793
794 if provider_network_profile and provider_network_profile.get(
795 "physical-network"
796 ):
797 provider_physical_network = provider_network_profile.get(
798 "physical-network"
799 )
800
801 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
802 # or not declared, just ignore the checking
803 if (
804 isinstance(
805 self.config.get("dataplane_physical_net"), (tuple, list)
806 )
807 and provider_physical_network
808 not in self.config["dataplane_physical_net"]
809 ):
810 raise vimconn.VimConnConflictException(
811 "Invalid parameter 'provider-network:physical-network' "
812 "for network creation. '{}' is not one of the declared "
813 "list at VIM_config:dataplane_physical_net".format(
814 provider_physical_network
815 )
816 )
817
818 # use the default dataplane_physical_net
819 if not provider_physical_network:
820 provider_physical_network = self.config.get(
821 "dataplane_physical_net"
822 )
823
824 # if it is non empty list, use the first value. If it is a string use the value directly
825 if (
826 isinstance(provider_physical_network, (tuple, list))
827 and provider_physical_network
828 ):
829 provider_physical_network = provider_physical_network[0]
830
831 if not provider_physical_network:
832 raise vimconn.VimConnConflictException(
833 "missing information needed for underlay networks. Provide "
834 "'dataplane_physical_net' configuration at VIM or use the NS "
835 "instantiation parameter 'provider-network.physical-network'"
836 " for the VLD"
837 )
838
839 if not self.config.get("multisegment_support"):
840 network_dict[
841 "provider:physical_network"
842 ] = provider_physical_network
843
844 if (
845 provider_network_profile
846 and "network-type" in provider_network_profile
847 ):
848 network_dict[
849 "provider:network_type"
850 ] = provider_network_profile["network-type"]
851 else:
852 network_dict["provider:network_type"] = self.config.get(
853 "dataplane_network_type", "vlan"
854 )
855
856 if vlan:
857 network_dict["provider:segmentation_id"] = vlan
858 else:
859 # Multi-segment case
860 segment_list = []
861 segment1_dict = {
862 "provider:physical_network": "",
863 "provider:network_type": "vxlan",
864 }
865 segment_list.append(segment1_dict)
866 segment2_dict = {
867 "provider:physical_network": provider_physical_network,
868 "provider:network_type": "vlan",
869 }
870
871 if vlan:
872 segment2_dict["provider:segmentation_id"] = vlan
873 elif self.config.get("multisegment_vlan_range"):
874 vlanID = self._generate_multisegment_vlanID()
875 segment2_dict["provider:segmentation_id"] = vlanID
876
877 # else
878 # raise vimconn.VimConnConflictException(
879 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
880 # network")
881 segment_list.append(segment2_dict)
882 network_dict["segments"] = segment_list
883
884 # VIO Specific Changes. It needs a concrete VLAN
885 if self.vim_type == "VIO" and vlan is None:
886 if self.config.get("dataplane_net_vlan_range") is None:
887 raise vimconn.VimConnConflictException(
888 "You must provide 'dataplane_net_vlan_range' in format "
889 "[start_ID - end_ID] at VIM_config for creating underlay "
890 "networks"
891 )
892
893 network_dict["provider:segmentation_id"] = self._generate_vlanID()
894
895 network_dict["shared"] = shared
896
897 if self.config.get("disable_network_port_security"):
898 network_dict["port_security_enabled"] = False
899
900 if self.config.get("neutron_availability_zone_hints"):
901 hints = self.config.get("neutron_availability_zone_hints")
902
903 if isinstance(hints, str):
904 hints = [hints]
905
906 network_dict["availability_zone_hints"] = hints
907
908 new_net = self.neutron.create_network({"network": network_dict})
909 # print new_net
910 # create subnetwork, even if there is no profile
911
912 if not ip_profile:
913 ip_profile = {}
914
915 if not ip_profile.get("subnet_address"):
916 # Fake subnet is required
917 subnet_rand = random.SystemRandom().randint(0, 255)
918 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
919
920 if "ip_version" not in ip_profile:
921 ip_profile["ip_version"] = "IPv4"
922
923 subnet = {
924 "name": net_name + "-subnet",
925 "network_id": new_net["network"]["id"],
926 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
927 "cidr": ip_profile["subnet_address"],
928 }
929
930 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
931 if ip_profile.get("gateway_address"):
932 subnet["gateway_ip"] = ip_profile["gateway_address"]
933 else:
934 subnet["gateway_ip"] = None
935
936 if ip_profile.get("dns_address"):
937 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
938
939 if "dhcp_enabled" in ip_profile:
940 subnet["enable_dhcp"] = (
941 False
942 if ip_profile["dhcp_enabled"] == "false"
943 or ip_profile["dhcp_enabled"] is False
944 else True
945 )
946
947 if ip_profile.get("dhcp_start_address"):
948 subnet["allocation_pools"] = []
949 subnet["allocation_pools"].append(dict())
950 subnet["allocation_pools"][0]["start"] = ip_profile[
951 "dhcp_start_address"
952 ]
953
954 if ip_profile.get("dhcp_count"):
955 # parts = ip_profile["dhcp_start_address"].split(".")
956 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
957 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
958 ip_int += ip_profile["dhcp_count"] - 1
959 ip_str = str(netaddr.IPAddress(ip_int))
960 subnet["allocation_pools"][0]["end"] = ip_str
961
962 if (
963 ip_profile.get("ipv6_address_mode")
964 and ip_profile["ip_version"] != "IPv4"
965 ):
966 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
967 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
968 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
969 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
970
971 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
972 self.neutron.create_subnet({"subnet": subnet})
973
974 if net_type == "data" and self.config.get("multisegment_support"):
975 if self.config.get("l2gw_support"):
976 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
977 for l2gw in l2gw_list:
978 l2gw_conn = {
979 "l2_gateway_id": l2gw["id"],
980 "network_id": new_net["network"]["id"],
981 "segmentation_id": str(vlanID),
982 }
983 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
984 {"l2_gateway_connection": l2gw_conn}
985 )
986 created_items[
987 "l2gwconn:"
988 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
989 ] = True
990
991 return new_net["network"]["id"], created_items
992 except Exception as e:
993 # delete l2gw connections (if any) before deleting the network
994 for k, v in created_items.items():
995 if not v: # skip already deleted
996 continue
997
998 try:
999 k_item, _, k_id = k.partition(":")
1000
1001 if k_item == "l2gwconn":
1002 self.neutron.delete_l2_gateway_connection(k_id)
1003 except Exception as e2:
1004 self.logger.error(
1005 "Error deleting l2 gateway connection: {}: {}".format(
1006 type(e2).__name__, e2
1007 )
1008 )
1009
1010 if new_net:
1011 self.neutron.delete_network(new_net["network"]["id"])
1012
1013 self._format_exception(e)
1014
1015 def get_network_list(self, filter_dict={}):
1016 """Obtain tenant networks of VIM
1017 Filter_dict can be:
1018 name: network name
1019 id: network uuid
1020 shared: boolean
1021 tenant_id: tenant
1022 admin_state_up: boolean
1023 status: 'ACTIVE'
1024 Returns the network list of dictionaries
1025 """
1026 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1027
1028 try:
1029 self._reload_connection()
1030 filter_dict_os = filter_dict.copy()
1031
1032 if self.api_version3 and "tenant_id" in filter_dict_os:
1033 # TODO check
1034 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1035
1036 net_dict = self.neutron.list_networks(**filter_dict_os)
1037 net_list = net_dict["networks"]
1038 self.__net_os2mano(net_list)
1039
1040 return net_list
1041 except (
1042 neExceptions.ConnectionFailed,
1043 ksExceptions.ClientException,
1044 neExceptions.NeutronException,
1045 ConnectionError,
1046 ) as e:
1047 self._format_exception(e)
1048
1049 def get_network(self, net_id):
1050 """Obtain details of network from VIM
1051 Returns the network information from a network id"""
1052 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1053 filter_dict = {"id": net_id}
1054 net_list = self.get_network_list(filter_dict)
1055
1056 if len(net_list) == 0:
1057 raise vimconn.VimConnNotFoundException(
1058 "Network '{}' not found".format(net_id)
1059 )
1060 elif len(net_list) > 1:
1061 raise vimconn.VimConnConflictException(
1062 "Found more than one network with this criteria"
1063 )
1064
1065 net = net_list[0]
1066 subnets = []
1067 for subnet_id in net.get("subnets", ()):
1068 try:
1069 subnet = self.neutron.show_subnet(subnet_id)
1070 except Exception as e:
1071 self.logger.error(
1072 "osconnector.get_network(): Error getting subnet %s %s"
1073 % (net_id, str(e))
1074 )
1075 subnet = {"id": subnet_id, "fault": str(e)}
1076
1077 subnets.append(subnet)
1078
1079 net["subnets"] = subnets
1080 net["encapsulation"] = net.get("provider:network_type")
1081 net["encapsulation_type"] = net.get("provider:network_type")
1082 net["segmentation_id"] = net.get("provider:segmentation_id")
1083 net["encapsulation_id"] = net.get("provider:segmentation_id")
1084
1085 return net
1086
1087 def delete_network(self, net_id, created_items=None):
1088 """
1089 Removes a tenant network from VIM and its associated elements
1090 :param net_id: VIM identifier of the network, provided by method new_network
1091 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1092 Returns the network identifier or raises an exception upon error or when network is not found
1093 """
1094 self.logger.debug("Deleting network '%s' from VIM", net_id)
1095
1096 if created_items is None:
1097 created_items = {}
1098
1099 try:
1100 self._reload_connection()
1101 # delete l2gw connections (if any) before deleting the network
1102 for k, v in created_items.items():
1103 if not v: # skip already deleted
1104 continue
1105
1106 try:
1107 k_item, _, k_id = k.partition(":")
1108 if k_item == "l2gwconn":
1109 self.neutron.delete_l2_gateway_connection(k_id)
1110 except Exception as e:
1111 self.logger.error(
1112 "Error deleting l2 gateway connection: {}: {}".format(
1113 type(e).__name__, e
1114 )
1115 )
1116
1117 # delete VM ports attached to this networks before the network
1118 ports = self.neutron.list_ports(network_id=net_id)
1119 for p in ports["ports"]:
1120 try:
1121 self.neutron.delete_port(p["id"])
1122 except Exception as e:
1123 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1124
1125 self.neutron.delete_network(net_id)
1126
1127 return net_id
1128 except (
1129 neExceptions.ConnectionFailed,
1130 neExceptions.NetworkNotFoundClient,
1131 neExceptions.NeutronException,
1132 ksExceptions.ClientException,
1133 neExceptions.NeutronException,
1134 ConnectionError,
1135 ) as e:
1136 self._format_exception(e)
1137
1138 def refresh_nets_status(self, net_list):
1139 """Get the status of the networks
1140 Params: the list of network identifiers
1141 Returns a dictionary with:
1142 net_id: #VIM id of this network
1143 status: #Mandatory. Text with one of:
1144 # DELETED (not found at vim)
1145 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1146 # OTHER (Vim reported other status not understood)
1147 # ERROR (VIM indicates an ERROR status)
1148 # ACTIVE, INACTIVE, DOWN (admin down),
1149 # BUILD (on building process)
1150 #
1151 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1152 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1153 """
1154 net_dict = {}
1155
1156 for net_id in net_list:
1157 net = {}
1158
1159 try:
1160 net_vim = self.get_network(net_id)
1161
1162 if net_vim["status"] in netStatus2manoFormat:
1163 net["status"] = netStatus2manoFormat[net_vim["status"]]
1164 else:
1165 net["status"] = "OTHER"
1166 net["error_msg"] = "VIM status reported " + net_vim["status"]
1167
1168 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1169 net["status"] = "DOWN"
1170
1171 net["vim_info"] = self.serialize(net_vim)
1172
1173 if net_vim.get("fault"): # TODO
1174 net["error_msg"] = str(net_vim["fault"])
1175 except vimconn.VimConnNotFoundException as e:
1176 self.logger.error("Exception getting net status: %s", str(e))
1177 net["status"] = "DELETED"
1178 net["error_msg"] = str(e)
1179 except vimconn.VimConnException as e:
1180 self.logger.error("Exception getting net status: %s", str(e))
1181 net["status"] = "VIM_ERROR"
1182 net["error_msg"] = str(e)
1183 net_dict[net_id] = net
1184 return net_dict
1185
1186 def get_flavor(self, flavor_id):
1187 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1188 self.logger.debug("Getting flavor '%s'", flavor_id)
1189
1190 try:
1191 self._reload_connection()
1192 flavor = self.nova.flavors.find(id=flavor_id)
1193 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1194
1195 return flavor.to_dict()
1196 except (
1197 nvExceptions.NotFound,
1198 nvExceptions.ClientException,
1199 ksExceptions.ClientException,
1200 ConnectionError,
1201 ) as e:
1202 self._format_exception(e)
1203
1204 def get_flavor_id_from_data(self, flavor_dict):
1205 """Obtain flavor id that match the flavor description
1206 Returns the flavor_id or raises a vimconnNotFoundException
1207 flavor_dict: contains the required ram, vcpus, disk
1208 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1209 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1210 vimconnNotFoundException is raised
1211 """
1212 exact_match = False if self.config.get("use_existing_flavors") else True
1213
1214 try:
1215 self._reload_connection()
1216 flavor_candidate_id = None
1217 flavor_candidate_data = (10000, 10000, 10000)
1218 flavor_target = (
1219 flavor_dict["ram"],
1220 flavor_dict["vcpus"],
1221 flavor_dict["disk"],
1222 flavor_dict.get("ephemeral", 0),
1223 flavor_dict.get("swap", 0),
1224 )
1225 # numa=None
1226 extended = flavor_dict.get("extended", {})
1227 if extended:
1228 # TODO
1229 raise vimconn.VimConnNotFoundException(
1230 "Flavor with EPA still not implemented"
1231 )
1232 # if len(numas) > 1:
1233 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1234 # numa=numas[0]
1235 # numas = extended.get("numas")
1236 for flavor in self.nova.flavors.list():
1237 epa = flavor.get_keys()
1238
1239 if epa:
1240 continue
1241 # TODO
1242
1243 flavor_data = (
1244 flavor.ram,
1245 flavor.vcpus,
1246 flavor.disk,
1247 flavor.ephemeral,
1248 flavor.swap if isinstance(flavor.swap, int) else 0,
1249 )
1250 if flavor_data == flavor_target:
1251 return flavor.id
1252 elif (
1253 not exact_match
1254 and flavor_target < flavor_data < flavor_candidate_data
1255 ):
1256 flavor_candidate_id = flavor.id
1257 flavor_candidate_data = flavor_data
1258
1259 if not exact_match and flavor_candidate_id:
1260 return flavor_candidate_id
1261
1262 raise vimconn.VimConnNotFoundException(
1263 "Cannot find any flavor matching '{}'".format(flavor_dict)
1264 )
1265 except (
1266 nvExceptions.NotFound,
1267 nvExceptions.ClientException,
1268 ksExceptions.ClientException,
1269 ConnectionError,
1270 ) as e:
1271 self._format_exception(e)
1272
1273 @staticmethod
1274 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1275 """Process resource quota and fill up extra_specs.
1276 Args:
1277 quota (dict): Keeping the quota of resurces
1278 prefix (str) Prefix
1279 extra_specs (dict) Dict to be filled to be used during flavor creation
1280
1281 """
1282 if "limit" in quota:
1283 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1284
1285 if "reserve" in quota:
1286 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1287
1288 if "shares" in quota:
1289 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1290 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1291
1292 @staticmethod
1293 def process_numa_memory(
1294 numa: dict, node_id: Optional[int], extra_specs: dict
1295 ) -> None:
1296 """Set the memory in extra_specs.
1297 Args:
1298 numa (dict): A dictionary which includes numa information
1299 node_id (int): ID of numa node
1300 extra_specs (dict): To be filled.
1301
1302 """
1303 if not numa.get("memory"):
1304 return
1305 memory_mb = numa["memory"] * 1024
1306 memory = "hw:numa_mem.{}".format(node_id)
1307 extra_specs[memory] = int(memory_mb)
1308
1309 @staticmethod
1310 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1311 """Set the cpu in extra_specs.
1312 Args:
1313 numa (dict): A dictionary which includes numa information
1314 node_id (int): ID of numa node
1315 extra_specs (dict): To be filled.
1316
1317 """
1318 if not numa.get("vcpu"):
1319 return
1320 vcpu = numa["vcpu"]
1321 cpu = "hw:numa_cpus.{}".format(node_id)
1322 vcpu = ",".join(map(str, vcpu))
1323 extra_specs[cpu] = vcpu
1324
1325 @staticmethod
1326 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1327 """Fill up extra_specs if numa has paired-threads.
1328 Args:
1329 numa (dict): A dictionary which includes numa information
1330 extra_specs (dict): To be filled.
1331
1332 Returns:
1333 threads (int) Number of virtual cpus
1334
1335 """
1336 if not numa.get("paired-threads"):
1337 return
1338
1339 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1340 threads = numa["paired-threads"] * 2
1341 extra_specs["hw:cpu_thread_policy"] = "require"
1342 extra_specs["hw:cpu_policy"] = "dedicated"
1343 return threads
1344
1345 @staticmethod
1346 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1347 """Fill up extra_specs if numa has cores.
1348 Args:
1349 numa (dict): A dictionary which includes numa information
1350 extra_specs (dict): To be filled.
1351
1352 Returns:
1353 cores (int) Number of virtual cpus
1354
1355 """
1356 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1357 # architecture, or a non-SMT architecture will be emulated
1358 if not numa.get("cores"):
1359 return
1360 cores = numa["cores"]
1361 extra_specs["hw:cpu_thread_policy"] = "isolate"
1362 extra_specs["hw:cpu_policy"] = "dedicated"
1363 return cores
1364
1365 @staticmethod
1366 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1367 """Fill up extra_specs if numa has threads.
1368 Args:
1369 numa (dict): A dictionary which includes numa information
1370 extra_specs (dict): To be filled.
1371
1372 Returns:
1373 threads (int) Number of virtual cpus
1374
1375 """
1376 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1377 if not numa.get("threads"):
1378 return
1379 threads = numa["threads"]
1380 extra_specs["hw:cpu_thread_policy"] = "prefer"
1381 extra_specs["hw:cpu_policy"] = "dedicated"
1382 return threads
1383
1384 def _process_numa_parameters_of_flavor(
1385 self, numas: List, extra_specs: Dict
1386 ) -> None:
1387 """Process numa parameters and fill up extra_specs.
1388
1389 Args:
1390 numas (list): List of dictionary which includes numa information
1391 extra_specs (dict): To be filled.
1392
1393 """
1394 numa_nodes = len(numas)
1395 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1396 cpu_cores, cpu_threads = 0, 0
1397
1398 if self.vim_type == "VIO":
1399 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1400
1401 for numa in numas:
1402 if "id" in numa:
1403 node_id = numa["id"]
1404 # overwrite ram and vcpus
1405 # check if key "memory" is present in numa else use ram value at flavor
1406 self.process_numa_memory(numa, node_id, extra_specs)
1407 self.process_numa_vcpu(numa, node_id, extra_specs)
1408
1409 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1410 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1411
1412 if "paired-threads" in numa:
1413 threads = self.process_numa_paired_threads(numa, extra_specs)
1414 cpu_threads += threads
1415
1416 elif "cores" in numa:
1417 cores = self.process_numa_cores(numa, extra_specs)
1418 cpu_cores += cores
1419
1420 elif "threads" in numa:
1421 threads = self.process_numa_threads(numa, extra_specs)
1422 cpu_threads += threads
1423
1424 if cpu_cores:
1425 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1426 if cpu_threads:
1427 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1428
1429 @staticmethod
1430 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1431 """According to number of numa nodes, updates the extra_specs for VIO.
1432
1433 Args:
1434
1435 numa_nodes (int): List keeps the numa node numbers
1436 extra_specs (dict): Extra specs dict to be updated
1437
1438 """
1439 # If there are several numas, we do not define specific affinity.
1440 extra_specs["vmware:latency_sensitivity_level"] = "high"
1441
1442 def _change_flavor_name(
1443 self, name: str, name_suffix: int, flavor_data: dict
1444 ) -> str:
1445 """Change the flavor name if the name already exists.
1446
1447 Args:
1448 name (str): Flavor name to be checked
1449 name_suffix (int): Suffix to be appended to name
1450 flavor_data (dict): Flavor dict
1451
1452 Returns:
1453 name (str): New flavor name to be used
1454
1455 """
1456 # Get used names
1457 fl = self.nova.flavors.list()
1458 fl_names = [f.name for f in fl]
1459
1460 while name in fl_names:
1461 name_suffix += 1
1462 name = flavor_data["name"] + "-" + str(name_suffix)
1463
1464 return name
1465
1466 def _process_extended_config_of_flavor(
1467 self, extended: dict, extra_specs: dict
1468 ) -> None:
1469 """Process the extended dict to fill up extra_specs.
1470 Args:
1471
1472 extended (dict): Keeping the extra specification of flavor
1473 extra_specs (dict) Dict to be filled to be used during flavor creation
1474
1475 """
1476 quotas = {
1477 "cpu-quota": "cpu",
1478 "mem-quota": "memory",
1479 "vif-quota": "vif",
1480 "disk-io-quota": "disk_io",
1481 }
1482
1483 page_sizes = {
1484 "LARGE": "large",
1485 "SMALL": "small",
1486 "SIZE_2MB": "2MB",
1487 "SIZE_1GB": "1GB",
1488 "PREFER_LARGE": "any",
1489 }
1490
1491 policies = {
1492 "cpu-pinning-policy": "hw:cpu_policy",
1493 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1494 "mem-policy": "hw:numa_mempolicy",
1495 }
1496
1497 numas = extended.get("numas")
1498 if numas:
1499 self._process_numa_parameters_of_flavor(numas, extra_specs)
1500
1501 for quota, item in quotas.items():
1502 if quota in extended.keys():
1503 self.process_resource_quota(extended.get(quota), item, extra_specs)
1504
1505 # Set the mempage size as specified in the descriptor
1506 if extended.get("mempage-size"):
1507 if extended["mempage-size"] in page_sizes.keys():
1508 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1509 else:
1510 # Normally, validations in NBI should not allow to this condition.
1511 self.logger.debug(
1512 "Invalid mempage-size %s. Will be ignored",
1513 extended.get("mempage-size"),
1514 )
1515
1516 for policy, hw_policy in policies.items():
1517 if extended.get(policy):
1518 extra_specs[hw_policy] = extended[policy].lower()
1519
1520 @staticmethod
1521 def _get_flavor_details(flavor_data: dict) -> Tuple:
1522 """Returns the details of flavor
1523 Args:
1524 flavor_data (dict): Dictionary that includes required flavor details
1525
1526 Returns:
1527 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1528
1529 """
1530 return (
1531 flavor_data.get("ram", 64),
1532 flavor_data.get("vcpus", 1),
1533 {},
1534 flavor_data.get("extended"),
1535 )
1536
1537 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1538 """Adds a tenant flavor to openstack VIM.
1539 if change_name_if_used is True, it will change name in case of conflict,
1540 because it is not supported name repetition.
1541
1542 Args:
1543 flavor_data (dict): Flavor details to be processed
1544 change_name_if_used (bool): Change name in case of conflict
1545
1546 Returns:
1547 flavor_id (str): flavor identifier
1548
1549 """
1550 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1551 retry = 0
1552 max_retries = 3
1553 name_suffix = 0
1554
1555 try:
1556 name = flavor_data["name"]
1557 while retry < max_retries:
1558 retry += 1
1559 try:
1560 self._reload_connection()
1561
1562 if change_name_if_used:
1563 name = self._change_flavor_name(name, name_suffix, flavor_data)
1564
1565 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1566 flavor_data
1567 )
1568 if extended:
1569 self._process_extended_config_of_flavor(extended, extra_specs)
1570
1571 # Create flavor
1572
1573 new_flavor = self.nova.flavors.create(
1574 name=name,
1575 ram=ram,
1576 vcpus=vcpus,
1577 disk=flavor_data.get("disk", 0),
1578 ephemeral=flavor_data.get("ephemeral", 0),
1579 swap=flavor_data.get("swap", 0),
1580 is_public=flavor_data.get("is_public", True),
1581 )
1582
1583 # Add metadata
1584 if extra_specs:
1585 new_flavor.set_keys(extra_specs)
1586
1587 return new_flavor.id
1588
1589 except nvExceptions.Conflict as e:
1590 if change_name_if_used and retry < max_retries:
1591 continue
1592
1593 self._format_exception(e)
1594
1595 except (
1596 ksExceptions.ClientException,
1597 nvExceptions.ClientException,
1598 ConnectionError,
1599 KeyError,
1600 ) as e:
1601 self._format_exception(e)
1602
1603 def delete_flavor(self, flavor_id):
1604 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1605 try:
1606 self._reload_connection()
1607 self.nova.flavors.delete(flavor_id)
1608
1609 return flavor_id
1610 # except nvExceptions.BadRequest as e:
1611 except (
1612 nvExceptions.NotFound,
1613 ksExceptions.ClientException,
1614 nvExceptions.ClientException,
1615 ConnectionError,
1616 ) as e:
1617 self._format_exception(e)
1618
1619 def new_image(self, image_dict):
1620 """
1621 Adds a tenant image to VIM. imge_dict is a dictionary with:
1622 name: name
1623 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1624 location: path or URI
1625 public: "yes" or "no"
1626 metadata: metadata of the image
1627 Returns the image_id
1628 """
1629 retry = 0
1630 max_retries = 3
1631
1632 while retry < max_retries:
1633 retry += 1
1634 try:
1635 self._reload_connection()
1636
1637 # determine format http://docs.openstack.org/developer/glance/formats.html
1638 if "disk_format" in image_dict:
1639 disk_format = image_dict["disk_format"]
1640 else: # autodiscover based on extension
1641 if image_dict["location"].endswith(".qcow2"):
1642 disk_format = "qcow2"
1643 elif image_dict["location"].endswith(".vhd"):
1644 disk_format = "vhd"
1645 elif image_dict["location"].endswith(".vmdk"):
1646 disk_format = "vmdk"
1647 elif image_dict["location"].endswith(".vdi"):
1648 disk_format = "vdi"
1649 elif image_dict["location"].endswith(".iso"):
1650 disk_format = "iso"
1651 elif image_dict["location"].endswith(".aki"):
1652 disk_format = "aki"
1653 elif image_dict["location"].endswith(".ari"):
1654 disk_format = "ari"
1655 elif image_dict["location"].endswith(".ami"):
1656 disk_format = "ami"
1657 else:
1658 disk_format = "raw"
1659
1660 self.logger.debug(
1661 "new_image: '%s' loading from '%s'",
1662 image_dict["name"],
1663 image_dict["location"],
1664 )
1665 if self.vim_type == "VIO":
1666 container_format = "bare"
1667 if "container_format" in image_dict:
1668 container_format = image_dict["container_format"]
1669
1670 new_image = self.glance.images.create(
1671 name=image_dict["name"],
1672 container_format=container_format,
1673 disk_format=disk_format,
1674 )
1675 else:
1676 new_image = self.glance.images.create(name=image_dict["name"])
1677
1678 if image_dict["location"].startswith("http"):
1679 # TODO there is not a method to direct download. It must be downloaded locally with requests
1680 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1681 else: # local path
1682 with open(image_dict["location"]) as fimage:
1683 self.glance.images.upload(new_image.id, fimage)
1684 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1685 # image_dict.get("public","yes")=="yes",
1686 # container_format="bare", data=fimage, disk_format=disk_format)
1687
1688 metadata_to_load = image_dict.get("metadata")
1689
1690 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1691 # for openstack
1692 if self.vim_type == "VIO":
1693 metadata_to_load["upload_location"] = image_dict["location"]
1694 else:
1695 metadata_to_load["location"] = image_dict["location"]
1696
1697 self.glance.images.update(new_image.id, **metadata_to_load)
1698
1699 return new_image.id
1700 except (
1701 nvExceptions.Conflict,
1702 ksExceptions.ClientException,
1703 nvExceptions.ClientException,
1704 ) as e:
1705 self._format_exception(e)
1706 except (
1707 HTTPException,
1708 gl1Exceptions.HTTPException,
1709 gl1Exceptions.CommunicationError,
1710 ConnectionError,
1711 ) as e:
1712 if retry == max_retries:
1713 continue
1714
1715 self._format_exception(e)
1716 except IOError as e: # can not open the file
1717 raise vimconn.VimConnConnectionException(
1718 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1719 http_code=vimconn.HTTP_Bad_Request,
1720 )
1721
1722 def delete_image(self, image_id):
1723 """Deletes a tenant image from openstack VIM. Returns the old id"""
1724 try:
1725 self._reload_connection()
1726 self.glance.images.delete(image_id)
1727
1728 return image_id
1729 except (
1730 nvExceptions.NotFound,
1731 ksExceptions.ClientException,
1732 nvExceptions.ClientException,
1733 gl1Exceptions.CommunicationError,
1734 gl1Exceptions.HTTPNotFound,
1735 ConnectionError,
1736 ) as e: # TODO remove
1737 self._format_exception(e)
1738
1739 def get_image_id_from_path(self, path):
1740 """Get the image id from image path in the VIM database. Returns the image_id"""
1741 try:
1742 self._reload_connection()
1743 images = self.glance.images.list()
1744
1745 for image in images:
1746 if image.metadata.get("location") == path:
1747 return image.id
1748
1749 raise vimconn.VimConnNotFoundException(
1750 "image with location '{}' not found".format(path)
1751 )
1752 except (
1753 ksExceptions.ClientException,
1754 nvExceptions.ClientException,
1755 gl1Exceptions.CommunicationError,
1756 ConnectionError,
1757 ) as e:
1758 self._format_exception(e)
1759
1760 def get_image_list(self, filter_dict={}):
1761 """Obtain tenant images from VIM
1762 Filter_dict can be:
1763 id: image id
1764 name: image name
1765 checksum: image checksum
1766 Returns the image list of dictionaries:
1767 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1768 List can be empty
1769 """
1770 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1771
1772 try:
1773 self._reload_connection()
1774 # filter_dict_os = filter_dict.copy()
1775 # First we filter by the available filter fields: name, id. The others are removed.
1776 image_list = self.glance.images.list()
1777 filtered_list = []
1778
1779 for image in image_list:
1780 try:
1781 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1782 continue
1783
1784 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1785 continue
1786
1787 if (
1788 filter_dict.get("checksum")
1789 and image["checksum"] != filter_dict["checksum"]
1790 ):
1791 continue
1792
1793 filtered_list.append(image.copy())
1794 except gl1Exceptions.HTTPNotFound:
1795 pass
1796
1797 return filtered_list
1798 except (
1799 ksExceptions.ClientException,
1800 nvExceptions.ClientException,
1801 gl1Exceptions.CommunicationError,
1802 ConnectionError,
1803 ) as e:
1804 self._format_exception(e)
1805
1806 def __wait_for_vm(self, vm_id, status):
1807 """wait until vm is in the desired status and return True.
1808 If the VM gets in ERROR status, return false.
1809 If the timeout is reached generate an exception"""
1810 elapsed_time = 0
1811 while elapsed_time < server_timeout:
1812 vm_status = self.nova.servers.get(vm_id).status
1813
1814 if vm_status == status:
1815 return True
1816
1817 if vm_status == "ERROR":
1818 return False
1819
1820 time.sleep(5)
1821 elapsed_time += 5
1822
1823 # if we exceeded the timeout rollback
1824 if elapsed_time >= server_timeout:
1825 raise vimconn.VimConnException(
1826 "Timeout waiting for instance " + vm_id + " to get " + status,
1827 http_code=vimconn.HTTP_Request_Timeout,
1828 )
1829
1830 def _get_openstack_availablity_zones(self):
1831 """
1832 Get from openstack availability zones available
1833 :return:
1834 """
1835 try:
1836 openstack_availability_zone = self.nova.availability_zones.list()
1837 openstack_availability_zone = [
1838 str(zone.zoneName)
1839 for zone in openstack_availability_zone
1840 if zone.zoneName != "internal"
1841 ]
1842
1843 return openstack_availability_zone
1844 except Exception:
1845 return None
1846
1847 def _set_availablity_zones(self):
1848 """
1849 Set vim availablity zone
1850 :return:
1851 """
1852 if "availability_zone" in self.config:
1853 vim_availability_zones = self.config.get("availability_zone")
1854
1855 if isinstance(vim_availability_zones, str):
1856 self.availability_zone = [vim_availability_zones]
1857 elif isinstance(vim_availability_zones, list):
1858 self.availability_zone = vim_availability_zones
1859 else:
1860 self.availability_zone = self._get_openstack_availablity_zones()
1861
1862 def _get_vm_availability_zone(
1863 self, availability_zone_index, availability_zone_list
1864 ):
1865 """
1866 Return thge availability zone to be used by the created VM.
1867 :return: The VIM availability zone to be used or None
1868 """
1869 if availability_zone_index is None:
1870 if not self.config.get("availability_zone"):
1871 return None
1872 elif isinstance(self.config.get("availability_zone"), str):
1873 return self.config["availability_zone"]
1874 else:
1875 # TODO consider using a different parameter at config for default AV and AV list match
1876 return self.config["availability_zone"][0]
1877
1878 vim_availability_zones = self.availability_zone
1879 # check if VIM offer enough availability zones describe in the VNFD
1880 if vim_availability_zones and len(availability_zone_list) <= len(
1881 vim_availability_zones
1882 ):
1883 # check if all the names of NFV AV match VIM AV names
1884 match_by_index = False
1885 for av in availability_zone_list:
1886 if av not in vim_availability_zones:
1887 match_by_index = True
1888 break
1889
1890 if match_by_index:
1891 return vim_availability_zones[availability_zone_index]
1892 else:
1893 return availability_zone_list[availability_zone_index]
1894 else:
1895 raise vimconn.VimConnConflictException(
1896 "No enough availability zones at VIM for this deployment"
1897 )
1898
1899 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1900 """Fill up the security_groups in the port_dict.
1901
1902 Args:
1903 net (dict): Network details
1904 port_dict (dict): Port details
1905
1906 """
1907 if (
1908 self.config.get("security_groups")
1909 and net.get("port_security") is not False
1910 and not self.config.get("no_port_security_extension")
1911 ):
1912 if not self.security_groups_id:
1913 self._get_ids_from_name()
1914
1915 port_dict["security_groups"] = self.security_groups_id
1916
1917 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1918 """Fill up the network binding depending on network type in the port_dict.
1919
1920 Args:
1921 net (dict): Network details
1922 port_dict (dict): Port details
1923
1924 """
1925 if not net.get("type"):
1926 raise vimconn.VimConnException("Type is missing in the network details.")
1927
1928 if net["type"] == "virtual":
1929 pass
1930
1931 # For VF
1932 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1933 port_dict["binding:vnic_type"] = "direct"
1934
1935 # VIO specific Changes
1936 if self.vim_type == "VIO":
1937 # Need to create port with port_security_enabled = False and no-security-groups
1938 port_dict["port_security_enabled"] = False
1939 port_dict["provider_security_groups"] = []
1940 port_dict["security_groups"] = []
1941
1942 else:
1943 # For PT PCI-PASSTHROUGH
1944 port_dict["binding:vnic_type"] = "direct-physical"
1945
1946 @staticmethod
1947 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1948 """Set the "ip" parameter in net dictionary.
1949
1950 Args:
1951 new_port (dict): New created port
1952 net (dict): Network details
1953
1954 """
1955 fixed_ips = new_port["port"].get("fixed_ips")
1956
1957 if fixed_ips:
1958 net["ip"] = fixed_ips[0].get("ip_address")
1959 else:
1960 net["ip"] = None
1961
1962 @staticmethod
1963 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1964 """Fill up the mac_address and fixed_ips in port_dict.
1965
1966 Args:
1967 net (dict): Network details
1968 port_dict (dict): Port details
1969
1970 """
1971 if net.get("mac_address"):
1972 port_dict["mac_address"] = net["mac_address"]
1973
1974 ip_dual_list = []
1975 if ip_list := net.get("ip_address"):
1976 if not isinstance(ip_list, list):
1977 ip_list = [ip_list]
1978 for ip in ip_list:
1979 ip_dict = {"ip_address": ip}
1980 ip_dual_list.append(ip_dict)
1981 port_dict["fixed_ips"] = ip_dual_list
1982 # TODO add "subnet_id": <subnet_id>
1983
1984 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1985 """Create new port using neutron.
1986
1987 Args:
1988 port_dict (dict): Port details
1989 created_items (dict): All created items
1990 net (dict): Network details
1991
1992 Returns:
1993 new_port (dict): New created port
1994
1995 """
1996 new_port = self.neutron.create_port({"port": port_dict})
1997 created_items["port:" + str(new_port["port"]["id"])] = True
1998 net["mac_address"] = new_port["port"]["mac_address"]
1999 net["vim_id"] = new_port["port"]["id"]
2000
2001 return new_port
2002
2003 def _create_port(
2004 self, net: dict, name: str, created_items: dict
2005 ) -> Tuple[dict, dict]:
2006 """Create port using net details.
2007
2008 Args:
2009 net (dict): Network details
2010 name (str): Name to be used as network name if net dict does not include name
2011 created_items (dict): All created items
2012
2013 Returns:
2014 new_port, port New created port, port dictionary
2015
2016 """
2017
2018 port_dict = {
2019 "network_id": net["net_id"],
2020 "name": net.get("name"),
2021 "admin_state_up": True,
2022 }
2023
2024 if not port_dict["name"]:
2025 port_dict["name"] = name
2026
2027 self._prepare_port_dict_security_groups(net, port_dict)
2028
2029 self._prepare_port_dict_binding(net, port_dict)
2030
2031 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2032
2033 new_port = self._create_new_port(port_dict, created_items, net)
2034
2035 vimconnector._set_fixed_ip(new_port, net)
2036
2037 port = {"port-id": new_port["port"]["id"]}
2038
2039 if float(self.nova.api_version.get_string()) >= 2.32:
2040 port["tag"] = new_port["port"]["name"]
2041
2042 return new_port, port
2043
2044 def _prepare_network_for_vminstance(
2045 self,
2046 name: str,
2047 net_list: list,
2048 created_items: dict,
2049 net_list_vim: list,
2050 external_network: list,
2051 no_secured_ports: list,
2052 ) -> None:
2053 """Create port and fill up net dictionary for new VM instance creation.
2054
2055 Args:
2056 name (str): Name of network
2057 net_list (list): List of networks
2058 created_items (dict): All created items belongs to a VM
2059 net_list_vim (list): List of ports
2060 external_network (list): List of external-networks
2061 no_secured_ports (list): Port security disabled ports
2062 """
2063
2064 self._reload_connection()
2065
2066 for net in net_list:
2067 # Skip non-connected iface
2068 if not net.get("net_id"):
2069 continue
2070
2071 new_port, port = self._create_port(net, name, created_items)
2072
2073 net_list_vim.append(port)
2074
2075 if net.get("floating_ip", False):
2076 net["exit_on_floating_ip_error"] = True
2077 external_network.append(net)
2078
2079 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2080 net["exit_on_floating_ip_error"] = False
2081 external_network.append(net)
2082 net["floating_ip"] = self.config.get("use_floating_ip")
2083
2084 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2085 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2086 if net.get("port_security") is False and not self.config.get(
2087 "no_port_security_extension"
2088 ):
2089 no_secured_ports.append(
2090 (
2091 new_port["port"]["id"],
2092 net.get("port_security_disable_strategy"),
2093 )
2094 )
2095
2096 def _prepare_persistent_root_volumes(
2097 self,
2098 name: str,
2099 vm_av_zone: list,
2100 disk: dict,
2101 base_disk_index: int,
2102 block_device_mapping: dict,
2103 existing_vim_volumes: list,
2104 created_items: dict,
2105 ) -> Optional[str]:
2106 """Prepare persistent root volumes for new VM instance.
2107
2108 Args:
2109 name (str): Name of VM instance
2110 vm_av_zone (list): List of availability zones
2111 disk (dict): Disk details
2112 base_disk_index (int): Disk index
2113 block_device_mapping (dict): Block device details
2114 existing_vim_volumes (list): Existing disk details
2115 created_items (dict): All created items belongs to VM
2116
2117 Returns:
2118 boot_volume_id (str): ID of boot volume
2119
2120 """
2121 # Disk may include only vim_volume_id or only vim_id."
2122 # Use existing persistent root volume finding with volume_id or vim_id
2123 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2124
2125 if disk.get(key_id):
2126 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2127 existing_vim_volumes.append({"id": disk[key_id]})
2128
2129 else:
2130 # Create persistent root volume
2131 volume = self.cinder.volumes.create(
2132 size=disk["size"],
2133 name=name + "vd" + chr(base_disk_index),
2134 imageRef=disk["image_id"],
2135 # Make sure volume is in the same AZ as the VM to be attached to
2136 availability_zone=vm_av_zone,
2137 )
2138 boot_volume_id = volume.id
2139 self.update_block_device_mapping(
2140 volume=volume,
2141 block_device_mapping=block_device_mapping,
2142 base_disk_index=base_disk_index,
2143 disk=disk,
2144 created_items=created_items,
2145 )
2146
2147 return boot_volume_id
2148
2149 @staticmethod
2150 def update_block_device_mapping(
2151 volume: object,
2152 block_device_mapping: dict,
2153 base_disk_index: int,
2154 disk: dict,
2155 created_items: dict,
2156 ) -> None:
2157 """Add volume information to block device mapping dict.
2158 Args:
2159 volume (object): Created volume object
2160 block_device_mapping (dict): Block device details
2161 base_disk_index (int): Disk index
2162 disk (dict): Disk details
2163 created_items (dict): All created items belongs to VM
2164 """
2165 if not volume:
2166 raise vimconn.VimConnException("Volume is empty.")
2167
2168 if not hasattr(volume, "id"):
2169 raise vimconn.VimConnException(
2170 "Created volume is not valid, does not have id attribute."
2171 )
2172
2173 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2174 if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
2175 return
2176 volume_txt = "volume:" + str(volume.id)
2177 if disk.get("keep"):
2178 volume_txt += ":keep"
2179 created_items[volume_txt] = True
2180
2181 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2182 try:
2183 volume = self.cinder.volumes.create(
2184 size=shared_volume_data["size"],
2185 name=shared_volume_data["name"],
2186 volume_type="multiattach",
2187 )
2188 return (volume.name, volume.id)
2189 except (ConnectionError, KeyError) as e:
2190 self._format_exception(e)
2191
2192 def _prepare_shared_volumes(
2193 self,
2194 name: str,
2195 disk: dict,
2196 base_disk_index: int,
2197 block_device_mapping: dict,
2198 existing_vim_volumes: list,
2199 created_items: dict,
2200 ):
2201 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2202 if volumes.get(disk["name"]):
2203 sv_id = volumes[disk["name"]]
2204 max_retries = 3
2205 vol_status = ""
2206 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2207 while max_retries:
2208 max_retries -= 1
2209 volume = self.cinder.volumes.get(sv_id)
2210 vol_status = volume.status
2211 if volume.status not in ("in-use", "available"):
2212 time.sleep(5)
2213 continue
2214 self.update_block_device_mapping(
2215 volume=volume,
2216 block_device_mapping=block_device_mapping,
2217 base_disk_index=base_disk_index,
2218 disk=disk,
2219 created_items=created_items,
2220 )
2221 return
2222 raise vimconn.VimConnException(
2223 "Shared volume is not prepared, status is: {}".format(vol_status),
2224 http_code=vimconn.HTTP_Internal_Server_Error,
2225 )
2226
2227 def _prepare_non_root_persistent_volumes(
2228 self,
2229 name: str,
2230 disk: dict,
2231 vm_av_zone: list,
2232 block_device_mapping: dict,
2233 base_disk_index: int,
2234 existing_vim_volumes: list,
2235 created_items: dict,
2236 ) -> None:
2237 """Prepare persistent volumes for new VM instance.
2238
2239 Args:
2240 name (str): Name of VM instance
2241 disk (dict): Disk details
2242 vm_av_zone (list): List of availability zones
2243 block_device_mapping (dict): Block device details
2244 base_disk_index (int): Disk index
2245 existing_vim_volumes (list): Existing disk details
2246 created_items (dict): All created items belongs to VM
2247 """
2248 # Non-root persistent volumes
2249 # Disk may include only vim_volume_id or only vim_id."
2250 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2251 if disk.get(key_id):
2252 # Use existing persistent volume
2253 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2254 existing_vim_volumes.append({"id": disk[key_id]})
2255 else:
2256 volume_name = f"{name}vd{chr(base_disk_index)}"
2257 volume = self.cinder.volumes.create(
2258 size=disk["size"],
2259 name=volume_name,
2260 # Make sure volume is in the same AZ as the VM to be attached to
2261 availability_zone=vm_av_zone,
2262 )
2263 self.update_block_device_mapping(
2264 volume=volume,
2265 block_device_mapping=block_device_mapping,
2266 base_disk_index=base_disk_index,
2267 disk=disk,
2268 created_items=created_items,
2269 )
2270
2271 def _wait_for_created_volumes_availability(
2272 self, elapsed_time: int, created_items: dict
2273 ) -> Optional[int]:
2274 """Wait till created volumes become available.
2275
2276 Args:
2277 elapsed_time (int): Passed time while waiting
2278 created_items (dict): All created items belongs to VM
2279
2280 Returns:
2281 elapsed_time (int): Time spent while waiting
2282
2283 """
2284 while elapsed_time < volume_timeout:
2285 for created_item in created_items:
2286 v, volume_id = (
2287 created_item.split(":")[0],
2288 created_item.split(":")[1],
2289 )
2290 if v == "volume":
2291 volume = self.cinder.volumes.get(volume_id)
2292 if (
2293 volume.volume_type == "multiattach"
2294 and volume.status == "in-use"
2295 ):
2296 return elapsed_time
2297 elif volume.status != "available":
2298 break
2299 else:
2300 # All ready: break from while
2301 break
2302
2303 time.sleep(5)
2304 elapsed_time += 5
2305
2306 return elapsed_time
2307
2308 def _wait_for_existing_volumes_availability(
2309 self, elapsed_time: int, existing_vim_volumes: list
2310 ) -> Optional[int]:
2311 """Wait till existing volumes become available.
2312
2313 Args:
2314 elapsed_time (int): Passed time while waiting
2315 existing_vim_volumes (list): Existing volume details
2316
2317 Returns:
2318 elapsed_time (int): Time spent while waiting
2319
2320 """
2321
2322 while elapsed_time < volume_timeout:
2323 for volume in existing_vim_volumes:
2324 v = self.cinder.volumes.get(volume["id"])
2325 if v.volume_type == "multiattach" and v.status == "in-use":
2326 return elapsed_time
2327 elif v.status != "available":
2328 break
2329 else: # all ready: break from while
2330 break
2331
2332 time.sleep(5)
2333 elapsed_time += 5
2334
2335 return elapsed_time
2336
2337 def _prepare_disk_for_vminstance(
2338 self,
2339 name: str,
2340 existing_vim_volumes: list,
2341 created_items: dict,
2342 vm_av_zone: list,
2343 block_device_mapping: dict,
2344 disk_list: list = None,
2345 ) -> None:
2346 """Prepare all volumes for new VM instance.
2347
2348 Args:
2349 name (str): Name of Instance
2350 existing_vim_volumes (list): List of existing volumes
2351 created_items (dict): All created items belongs to VM
2352 vm_av_zone (list): VM availability zone
2353 block_device_mapping (dict): Block devices to be attached to VM
2354 disk_list (list): List of disks
2355
2356 """
2357 # Create additional volumes in case these are present in disk_list
2358 base_disk_index = ord("b")
2359 boot_volume_id = None
2360 elapsed_time = 0
2361 for disk in disk_list:
2362 if "image_id" in disk:
2363 # Root persistent volume
2364 base_disk_index = ord("a")
2365 boot_volume_id = self._prepare_persistent_root_volumes(
2366 name=name,
2367 vm_av_zone=vm_av_zone,
2368 disk=disk,
2369 base_disk_index=base_disk_index,
2370 block_device_mapping=block_device_mapping,
2371 existing_vim_volumes=existing_vim_volumes,
2372 created_items=created_items,
2373 )
2374 elif disk.get("multiattach"):
2375 self._prepare_shared_volumes(
2376 name=name,
2377 disk=disk,
2378 base_disk_index=base_disk_index,
2379 block_device_mapping=block_device_mapping,
2380 existing_vim_volumes=existing_vim_volumes,
2381 created_items=created_items,
2382 )
2383 else:
2384 # Non-root persistent volume
2385 self._prepare_non_root_persistent_volumes(
2386 name=name,
2387 disk=disk,
2388 vm_av_zone=vm_av_zone,
2389 block_device_mapping=block_device_mapping,
2390 base_disk_index=base_disk_index,
2391 existing_vim_volumes=existing_vim_volumes,
2392 created_items=created_items,
2393 )
2394 base_disk_index += 1
2395
2396 # Wait until created volumes are with status available
2397 elapsed_time = self._wait_for_created_volumes_availability(
2398 elapsed_time, created_items
2399 )
2400 # Wait until existing volumes in vim are with status available
2401 elapsed_time = self._wait_for_existing_volumes_availability(
2402 elapsed_time, existing_vim_volumes
2403 )
2404 # If we exceeded the timeout rollback
2405 if elapsed_time >= volume_timeout:
2406 raise vimconn.VimConnException(
2407 "Timeout creating volumes for instance " + name,
2408 http_code=vimconn.HTTP_Request_Timeout,
2409 )
2410 if boot_volume_id:
2411 self.cinder.volumes.set_bootable(boot_volume_id, True)
2412
2413 def _find_the_external_network_for_floating_ip(self):
2414 """Get the external network ip in order to create floating IP.
2415
2416 Returns:
2417 pool_id (str): External network pool ID
2418
2419 """
2420
2421 # Find the external network
2422 external_nets = list()
2423
2424 for net in self.neutron.list_networks()["networks"]:
2425 if net["router:external"]:
2426 external_nets.append(net)
2427
2428 if len(external_nets) == 0:
2429 raise vimconn.VimConnException(
2430 "Cannot create floating_ip automatically since "
2431 "no external network is present",
2432 http_code=vimconn.HTTP_Conflict,
2433 )
2434
2435 if len(external_nets) > 1:
2436 raise vimconn.VimConnException(
2437 "Cannot create floating_ip automatically since "
2438 "multiple external networks are present",
2439 http_code=vimconn.HTTP_Conflict,
2440 )
2441
2442 # Pool ID
2443 return external_nets[0].get("id")
2444
2445 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2446 """Trigger neutron to create a new floating IP using external network ID.
2447
2448 Args:
2449 param (dict): Input parameters to create a floating IP
2450 created_items (dict): All created items belongs to new VM instance
2451
2452 Raises:
2453
2454 VimConnException
2455 """
2456 try:
2457 self.logger.debug("Creating floating IP")
2458 new_floating_ip = self.neutron.create_floatingip(param)
2459 free_floating_ip = new_floating_ip["floatingip"]["id"]
2460 created_items["floating_ip:" + str(free_floating_ip)] = True
2461
2462 except Exception as e:
2463 raise vimconn.VimConnException(
2464 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2465 http_code=vimconn.HTTP_Conflict,
2466 )
2467
2468 def _create_floating_ip(
2469 self, floating_network: dict, server: object, created_items: dict
2470 ) -> None:
2471 """Get the available Pool ID and create a new floating IP.
2472
2473 Args:
2474 floating_network (dict): Dict including external network ID
2475 server (object): Server object
2476 created_items (dict): All created items belongs to new VM instance
2477
2478 """
2479
2480 # Pool_id is available
2481 if (
2482 isinstance(floating_network["floating_ip"], str)
2483 and floating_network["floating_ip"].lower() != "true"
2484 ):
2485 pool_id = floating_network["floating_ip"]
2486
2487 # Find the Pool_id
2488 else:
2489 pool_id = self._find_the_external_network_for_floating_ip()
2490
2491 param = {
2492 "floatingip": {
2493 "floating_network_id": pool_id,
2494 "tenant_id": server.tenant_id,
2495 }
2496 }
2497
2498 self._neutron_create_float_ip(param, created_items)
2499
2500 def _find_floating_ip(
2501 self,
2502 server: object,
2503 floating_ips: list,
2504 floating_network: dict,
2505 ) -> Optional[str]:
2506 """Find the available free floating IPs if there are.
2507
2508 Args:
2509 server (object): Server object
2510 floating_ips (list): List of floating IPs
2511 floating_network (dict): Details of floating network such as ID
2512
2513 Returns:
2514 free_floating_ip (str): Free floating ip address
2515
2516 """
2517 for fip in floating_ips:
2518 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2519 continue
2520
2521 if isinstance(floating_network["floating_ip"], str):
2522 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2523 continue
2524
2525 return fip["id"]
2526
2527 def _assign_floating_ip(
2528 self, free_floating_ip: str, floating_network: dict
2529 ) -> Dict:
2530 """Assign the free floating ip address to port.
2531
2532 Args:
2533 free_floating_ip (str): Floating IP to be assigned
2534 floating_network (dict): ID of floating network
2535
2536 Returns:
2537 fip (dict) (dict): Floating ip details
2538
2539 """
2540 # The vim_id key contains the neutron.port_id
2541 self.neutron.update_floatingip(
2542 free_floating_ip,
2543 {"floatingip": {"port_id": floating_network["vim_id"]}},
2544 )
2545 # For race condition ensure not re-assigned to other VM after 5 seconds
2546 time.sleep(5)
2547
2548 return self.neutron.show_floatingip(free_floating_ip)
2549
2550 def _get_free_floating_ip(
2551 self, server: object, floating_network: dict
2552 ) -> Optional[str]:
2553 """Get the free floating IP address.
2554
2555 Args:
2556 server (object): Server Object
2557 floating_network (dict): Floating network details
2558
2559 Returns:
2560 free_floating_ip (str): Free floating ip addr
2561
2562 """
2563
2564 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2565
2566 # Randomize
2567 random.shuffle(floating_ips)
2568
2569 return self._find_floating_ip(server, floating_ips, floating_network)
2570
2571 def _prepare_external_network_for_vminstance(
2572 self,
2573 external_network: list,
2574 server: object,
2575 created_items: dict,
2576 vm_start_time: float,
2577 ) -> None:
2578 """Assign floating IP address for VM instance.
2579
2580 Args:
2581 external_network (list): ID of External network
2582 server (object): Server Object
2583 created_items (dict): All created items belongs to new VM instance
2584 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2585
2586 Raises:
2587 VimConnException
2588
2589 """
2590 for floating_network in external_network:
2591 try:
2592 assigned = False
2593 floating_ip_retries = 3
2594 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2595 # several times
2596 while not assigned:
2597 free_floating_ip = self._get_free_floating_ip(
2598 server, floating_network
2599 )
2600
2601 if not free_floating_ip:
2602 self._create_floating_ip(
2603 floating_network, server, created_items
2604 )
2605
2606 try:
2607 # For race condition ensure not already assigned
2608 fip = self.neutron.show_floatingip(free_floating_ip)
2609
2610 if fip["floatingip"].get("port_id"):
2611 continue
2612
2613 # Assign floating ip
2614 fip = self._assign_floating_ip(
2615 free_floating_ip, floating_network
2616 )
2617
2618 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2619 self.logger.warning(
2620 "floating_ip {} re-assigned to other port".format(
2621 free_floating_ip
2622 )
2623 )
2624 continue
2625
2626 self.logger.debug(
2627 "Assigned floating_ip {} to VM {}".format(
2628 free_floating_ip, server.id
2629 )
2630 )
2631
2632 assigned = True
2633
2634 except Exception as e:
2635 # Openstack need some time after VM creation to assign an IP. So retry if fails
2636 vm_status = self.nova.servers.get(server.id).status
2637
2638 if vm_status not in ("ACTIVE", "ERROR"):
2639 if time.time() - vm_start_time < server_timeout:
2640 time.sleep(5)
2641 continue
2642 elif floating_ip_retries > 0:
2643 floating_ip_retries -= 1
2644 continue
2645
2646 raise vimconn.VimConnException(
2647 "Cannot create floating_ip: {} {}".format(
2648 type(e).__name__, e
2649 ),
2650 http_code=vimconn.HTTP_Conflict,
2651 )
2652
2653 except Exception as e:
2654 if not floating_network["exit_on_floating_ip_error"]:
2655 self.logger.error("Cannot create floating_ip. %s", str(e))
2656 continue
2657
2658 raise
2659
2660 def _update_port_security_for_vminstance(
2661 self,
2662 no_secured_ports: list,
2663 server: object,
2664 ) -> None:
2665 """Updates the port security according to no_secured_ports list.
2666
2667 Args:
2668 no_secured_ports (list): List of ports that security will be disabled
2669 server (object): Server Object
2670
2671 Raises:
2672 VimConnException
2673
2674 """
2675 # Wait until the VM is active and then disable the port-security
2676 if no_secured_ports:
2677 self.__wait_for_vm(server.id, "ACTIVE")
2678
2679 for port in no_secured_ports:
2680 port_update = {
2681 "port": {"port_security_enabled": False, "security_groups": None}
2682 }
2683
2684 if port[1] == "allow-address-pairs":
2685 port_update = {
2686 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2687 }
2688
2689 try:
2690 self.neutron.update_port(port[0], port_update)
2691
2692 except Exception:
2693 raise vimconn.VimConnException(
2694 "It was not possible to disable port security for port {}".format(
2695 port[0]
2696 )
2697 )
2698
2699 def new_vminstance(
2700 self,
2701 name: str,
2702 description: str,
2703 start: bool,
2704 image_id: str,
2705 flavor_id: str,
2706 affinity_group_list: list,
2707 net_list: list,
2708 cloud_config=None,
2709 disk_list=None,
2710 availability_zone_index=None,
2711 availability_zone_list=None,
2712 ) -> tuple:
2713 """Adds a VM instance to VIM.
2714
2715 Args:
2716 name (str): name of VM
2717 description (str): description
2718 start (bool): indicates if VM must start or boot in pause mode. Ignored
2719 image_id (str) image uuid
2720 flavor_id (str) flavor uuid
2721 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2722 net_list (list): list of interfaces, each one is a dictionary with:
2723 name: name of network
2724 net_id: network uuid to connect
2725 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2726 model: interface model, ignored #TODO
2727 mac_address: used for SR-IOV ifaces #TODO for other types
2728 use: 'data', 'bridge', 'mgmt'
2729 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2730 vim_id: filled/added by this function
2731 floating_ip: True/False (or it can be None)
2732 port_security: True/False
2733 cloud_config (dict): (optional) dictionary with:
2734 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2735 users: (optional) list of users to be inserted, each item is a dict with:
2736 name: (mandatory) user name,
2737 key-pairs: (optional) list of strings with the public key to be inserted to the user
2738 user-data: (optional) string is a text script to be passed directly to cloud-init
2739 config-files: (optional). List of files to be transferred. Each item is a dict with:
2740 dest: (mandatory) string with the destination absolute path
2741 encoding: (optional, by default text). Can be one of:
2742 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2743 content : (mandatory) string with the content of the file
2744 permissions: (optional) string with file permissions, typically octal notation '0644'
2745 owner: (optional) file owner, string with the format 'owner:group'
2746 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2747 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2748 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2749 size: (mandatory) string with the size of the disk in GB
2750 vim_id: (optional) should use this existing volume id
2751 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2752 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2753 availability_zone_index is None
2754 #TODO ip, security groups
2755
2756 Returns:
2757 A tuple with the instance identifier and created_items or raises an exception on error
2758 created_items can be None or a dictionary where this method can include key-values that will be passed to
2759 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2760 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2761 as not present.
2762
2763 """
2764 self.logger.debug(
2765 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2766 image_id,
2767 flavor_id,
2768 str(net_list),
2769 )
2770
2771 try:
2772 server = None
2773 created_items = {}
2774 net_list_vim = []
2775 # list of external networks to be connected to instance, later on used to create floating_ip
2776 external_network = []
2777 # List of ports with port-security disabled
2778 no_secured_ports = []
2779 block_device_mapping = {}
2780 existing_vim_volumes = []
2781 server_group_id = None
2782 scheduller_hints = {}
2783
2784 # Check the Openstack Connection
2785 self._reload_connection()
2786
2787 # Prepare network list
2788 self._prepare_network_for_vminstance(
2789 name=name,
2790 net_list=net_list,
2791 created_items=created_items,
2792 net_list_vim=net_list_vim,
2793 external_network=external_network,
2794 no_secured_ports=no_secured_ports,
2795 )
2796
2797 # Cloud config
2798 config_drive, userdata = self._create_user_data(cloud_config)
2799
2800 # Get availability Zone
2801 vm_av_zone = self._get_vm_availability_zone(
2802 availability_zone_index, availability_zone_list
2803 )
2804
2805 if disk_list:
2806 # Prepare disks
2807 self._prepare_disk_for_vminstance(
2808 name=name,
2809 existing_vim_volumes=existing_vim_volumes,
2810 created_items=created_items,
2811 vm_av_zone=vm_av_zone,
2812 block_device_mapping=block_device_mapping,
2813 disk_list=disk_list,
2814 )
2815
2816 if affinity_group_list:
2817 # Only first id on the list will be used. Openstack restriction
2818 server_group_id = affinity_group_list[0]["affinity_group_id"]
2819 scheduller_hints["group"] = server_group_id
2820
2821 self.logger.debug(
2822 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2823 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2824 "block_device_mapping={}, server_group={})".format(
2825 name,
2826 image_id,
2827 flavor_id,
2828 net_list_vim,
2829 self.config.get("security_groups"),
2830 vm_av_zone,
2831 self.config.get("keypair"),
2832 userdata,
2833 config_drive,
2834 block_device_mapping,
2835 server_group_id,
2836 )
2837 )
2838 # Create VM
2839 server = self.nova.servers.create(
2840 name=name,
2841 image=image_id,
2842 flavor=flavor_id,
2843 nics=net_list_vim,
2844 security_groups=self.config.get("security_groups"),
2845 # TODO remove security_groups in future versions. Already at neutron port
2846 availability_zone=vm_av_zone,
2847 key_name=self.config.get("keypair"),
2848 userdata=userdata,
2849 config_drive=config_drive,
2850 block_device_mapping=block_device_mapping,
2851 scheduler_hints=scheduller_hints,
2852 )
2853
2854 vm_start_time = time.time()
2855
2856 self._update_port_security_for_vminstance(no_secured_ports, server)
2857
2858 self._prepare_external_network_for_vminstance(
2859 external_network=external_network,
2860 server=server,
2861 created_items=created_items,
2862 vm_start_time=vm_start_time,
2863 )
2864
2865 return server.id, created_items
2866
2867 except Exception as e:
2868 server_id = None
2869 if server:
2870 server_id = server.id
2871
2872 try:
2873 created_items = self.remove_keep_tag_from_persistent_volumes(
2874 created_items
2875 )
2876
2877 self.delete_vminstance(server_id, created_items)
2878
2879 except Exception as e2:
2880 self.logger.error("new_vminstance rollback fail {}".format(e2))
2881
2882 self._format_exception(e)
2883
2884 @staticmethod
2885 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2886 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2887
2888 Args:
2889 created_items (dict): All created items belongs to VM
2890
2891 Returns:
2892 updated_created_items (dict): Dict which does not include keep flag for volumes.
2893
2894 """
2895 return {
2896 key.replace(":keep", ""): value for (key, value) in created_items.items()
2897 }
2898
2899 def get_vminstance(self, vm_id):
2900 """Returns the VM instance information from VIM"""
2901 return self._find_nova_server(vm_id)
2902
2903 def get_vminstance_console(self, vm_id, console_type="vnc"):
2904 """
2905 Get a console for the virtual machine
2906 Params:
2907 vm_id: uuid of the VM
2908 console_type, can be:
2909 "novnc" (by default), "xvpvnc" for VNC types,
2910 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2911 Returns dict with the console parameters:
2912 protocol: ssh, ftp, http, https, ...
2913 server: usually ip address
2914 port: the http, ssh, ... port
2915 suffix: extra text, e.g. the http path and query string
2916 """
2917 self.logger.debug("Getting VM CONSOLE from VIM")
2918
2919 try:
2920 self._reload_connection()
2921 server = self.nova.servers.find(id=vm_id)
2922
2923 if console_type is None or console_type == "novnc":
2924 console_dict = server.get_vnc_console("novnc")
2925 elif console_type == "xvpvnc":
2926 console_dict = server.get_vnc_console(console_type)
2927 elif console_type == "rdp-html5":
2928 console_dict = server.get_rdp_console(console_type)
2929 elif console_type == "spice-html5":
2930 console_dict = server.get_spice_console(console_type)
2931 else:
2932 raise vimconn.VimConnException(
2933 "console type '{}' not allowed".format(console_type),
2934 http_code=vimconn.HTTP_Bad_Request,
2935 )
2936
2937 console_dict1 = console_dict.get("console")
2938
2939 if console_dict1:
2940 console_url = console_dict1.get("url")
2941
2942 if console_url:
2943 # parse console_url
2944 protocol_index = console_url.find("//")
2945 suffix_index = (
2946 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2947 )
2948 port_index = (
2949 console_url[protocol_index + 2 : suffix_index].find(":")
2950 + protocol_index
2951 + 2
2952 )
2953
2954 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2955 return (
2956 -vimconn.HTTP_Internal_Server_Error,
2957 "Unexpected response from VIM",
2958 )
2959
2960 console_dict = {
2961 "protocol": console_url[0:protocol_index],
2962 "server": console_url[protocol_index + 2 : port_index],
2963 "port": console_url[port_index:suffix_index],
2964 "suffix": console_url[suffix_index + 1 :],
2965 }
2966 protocol_index += 2
2967
2968 return console_dict
2969 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2970 except (
2971 nvExceptions.NotFound,
2972 ksExceptions.ClientException,
2973 nvExceptions.ClientException,
2974 nvExceptions.BadRequest,
2975 ConnectionError,
2976 ) as e:
2977 self._format_exception(e)
2978
2979 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2980 """Neutron delete ports by id.
2981 Args:
2982 k_id (str): Port id in the VIM
2983 """
2984 try:
2985 port_dict = self.neutron.list_ports()
2986 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2987
2988 if k_id in existing_ports:
2989 self.neutron.delete_port(k_id)
2990
2991 except Exception as e:
2992 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2993
2994 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
2995 """Cinder delete volume by id.
2996 Args:
2997 shared_volume_vim_id (str): ID of shared volume in VIM
2998 """
2999 elapsed_time = 0
3000 try:
3001 while elapsed_time < server_timeout:
3002 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
3003 if vol_status == "available":
3004 self.cinder.volumes.delete(shared_volume_vim_id)
3005 return True
3006
3007 time.sleep(5)
3008 elapsed_time += 5
3009
3010 if elapsed_time >= server_timeout:
3011 raise vimconn.VimConnException(
3012 "Timeout waiting for volume "
3013 + shared_volume_vim_id
3014 + " to be available",
3015 http_code=vimconn.HTTP_Request_Timeout,
3016 )
3017
3018 except Exception as e:
3019 self.logger.error(
3020 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3021 )
3022 self._format_exception(e)
3023
3024 def _delete_volumes_by_id_wth_cinder(
3025 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
3026 ) -> bool:
3027 """Cinder delete volume by id.
3028 Args:
3029 k (str): Full item name in created_items
3030 k_id (str): ID of floating ip in VIM
3031 volumes_to_hold (list): Volumes not to delete
3032 created_items (dict): All created items belongs to VM
3033 """
3034 try:
3035 if k_id in volumes_to_hold:
3036 return
3037
3038 if self.cinder.volumes.get(k_id).status != "available":
3039 return True
3040
3041 else:
3042 self.cinder.volumes.delete(k_id)
3043 created_items[k] = None
3044
3045 except Exception as e:
3046 self.logger.error(
3047 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3048 )
3049
3050 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3051 """Neutron delete floating ip by id.
3052 Args:
3053 k (str): Full item name in created_items
3054 k_id (str): ID of floating ip in VIM
3055 created_items (dict): All created items belongs to VM
3056 """
3057 try:
3058 self.neutron.delete_floatingip(k_id)
3059 created_items[k] = None
3060
3061 except Exception as e:
3062 self.logger.error(
3063 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3064 )
3065
3066 @staticmethod
3067 def _get_item_name_id(k: str) -> Tuple[str, str]:
3068 k_item, _, k_id = k.partition(":")
3069 return k_item, k_id
3070
3071 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3072 """Delete VM ports attached to the networks before deleting virtual machine.
3073 Args:
3074 created_items (dict): All created items belongs to VM
3075 """
3076
3077 for k, v in created_items.items():
3078 if not v: # skip already deleted
3079 continue
3080
3081 try:
3082 k_item, k_id = self._get_item_name_id(k)
3083 if k_item == "port":
3084 self._delete_ports_by_id_wth_neutron(k_id)
3085
3086 except Exception as e:
3087 self.logger.error(
3088 "Error deleting port: {}: {}".format(type(e).__name__, e)
3089 )
3090
3091 def _delete_created_items(
3092 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3093 ) -> bool:
3094 """Delete Volumes and floating ip if they exist in created_items."""
3095 for k, v in created_items.items():
3096 if not v: # skip already deleted
3097 continue
3098
3099 try:
3100 k_item, k_id = self._get_item_name_id(k)
3101 if k_item == "volume":
3102 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3103 k, k_id, volumes_to_hold, created_items
3104 )
3105
3106 if unavailable_vol:
3107 keep_waiting = True
3108
3109 elif k_item == "floating_ip":
3110 self._delete_floating_ip_by_id(k, k_id, created_items)
3111
3112 except Exception as e:
3113 self.logger.error("Error deleting {}: {}".format(k, e))
3114
3115 return keep_waiting
3116
3117 @staticmethod
3118 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3119 """Remove the volumes which has key flag from created_items
3120
3121 Args:
3122 created_items (dict): All created items belongs to VM
3123
3124 Returns:
3125 created_items (dict): Persistent volumes eliminated created_items
3126 """
3127 return {
3128 key: value
3129 for (key, value) in created_items.items()
3130 if len(key.split(":")) == 2
3131 }
3132
3133 def delete_vminstance(
3134 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3135 ) -> None:
3136 """Removes a VM instance from VIM. Returns the old identifier.
3137 Args:
3138 vm_id (str): Identifier of VM instance
3139 created_items (dict): All created items belongs to VM
3140 volumes_to_hold (list): Volumes_to_hold
3141 """
3142 if created_items is None:
3143 created_items = {}
3144 if volumes_to_hold is None:
3145 volumes_to_hold = []
3146
3147 try:
3148 created_items = self._extract_items_wth_keep_flag_from_created_items(
3149 created_items
3150 )
3151
3152 self._reload_connection()
3153
3154 # Delete VM ports attached to the networks before the virtual machine
3155 if created_items:
3156 self._delete_vm_ports_attached_to_network(created_items)
3157
3158 if vm_id:
3159 self.nova.servers.delete(vm_id)
3160
3161 # Although having detached, volumes should have in active status before deleting.
3162 # We ensure in this loop
3163 keep_waiting = True
3164 elapsed_time = 0
3165
3166 while keep_waiting and elapsed_time < volume_timeout:
3167 keep_waiting = False
3168
3169 # Delete volumes and floating IP.
3170 keep_waiting = self._delete_created_items(
3171 created_items, volumes_to_hold, keep_waiting
3172 )
3173
3174 if keep_waiting:
3175 time.sleep(1)
3176 elapsed_time += 1
3177
3178 except (
3179 nvExceptions.NotFound,
3180 ksExceptions.ClientException,
3181 nvExceptions.ClientException,
3182 ConnectionError,
3183 ) as e:
3184 self._format_exception(e)
3185
3186 def refresh_vms_status(self, vm_list):
3187 """Get the status of the virtual machines and their interfaces/ports
3188 Params: the list of VM identifiers
3189 Returns a dictionary with:
3190 vm_id: #VIM id of this Virtual Machine
3191 status: #Mandatory. Text with one of:
3192 # DELETED (not found at vim)
3193 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3194 # OTHER (Vim reported other status not understood)
3195 # ERROR (VIM indicates an ERROR status)
3196 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3197 # CREATING (on building process), ERROR
3198 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3199 #
3200 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3201 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3202 interfaces:
3203 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3204 mac_address: #Text format XX:XX:XX:XX:XX:XX
3205 vim_net_id: #network id where this interface is connected
3206 vim_interface_id: #interface/port VIM id
3207 ip_address: #null, or text with IPv4, IPv6 address
3208 compute_node: #identification of compute node where PF,VF interface is allocated
3209 pci: #PCI address of the NIC that hosts the PF,VF
3210 vlan: #physical VLAN used for VF
3211 """
3212 vm_dict = {}
3213 self.logger.debug(
3214 "refresh_vms status: Getting tenant VM instance information from VIM"
3215 )
3216
3217 for vm_id in vm_list:
3218 vm = {}
3219
3220 try:
3221 vm_vim = self.get_vminstance(vm_id)
3222
3223 if vm_vim["status"] in vmStatus2manoFormat:
3224 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3225 else:
3226 vm["status"] = "OTHER"
3227 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3228
3229 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3230 vm_vim.pop("user_data", None)
3231 vm["vim_info"] = self.serialize(vm_vim)
3232
3233 vm["interfaces"] = []
3234 if vm_vim.get("fault"):
3235 vm["error_msg"] = str(vm_vim["fault"])
3236
3237 # get interfaces
3238 try:
3239 self._reload_connection()
3240 port_dict = self.neutron.list_ports(device_id=vm_id)
3241
3242 for port in port_dict["ports"]:
3243 interface = {}
3244 interface["vim_info"] = self.serialize(port)
3245 interface["mac_address"] = port.get("mac_address")
3246 interface["vim_net_id"] = port["network_id"]
3247 interface["vim_interface_id"] = port["id"]
3248 # check if OS-EXT-SRV-ATTR:host is there,
3249 # in case of non-admin credentials, it will be missing
3250
3251 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3252 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3253
3254 interface["pci"] = None
3255
3256 # check if binding:profile is there,
3257 # in case of non-admin credentials, it will be missing
3258 if port.get("binding:profile"):
3259 if port["binding:profile"].get("pci_slot"):
3260 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3261 # the slot to 0x00
3262 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3263 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3264 pci = port["binding:profile"]["pci_slot"]
3265 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3266 interface["pci"] = pci
3267
3268 interface["vlan"] = None
3269
3270 if port.get("binding:vif_details"):
3271 interface["vlan"] = port["binding:vif_details"].get("vlan")
3272
3273 # Get vlan from network in case not present in port for those old openstacks and cases where
3274 # it is needed vlan at PT
3275 if not interface["vlan"]:
3276 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3277 network = self.neutron.show_network(port["network_id"])
3278
3279 if (
3280 network["network"].get("provider:network_type")
3281 == "vlan"
3282 ):
3283 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3284 interface["vlan"] = network["network"].get(
3285 "provider:segmentation_id"
3286 )
3287
3288 ips = []
3289 # look for floating ip address
3290 try:
3291 floating_ip_dict = self.neutron.list_floatingips(
3292 port_id=port["id"]
3293 )
3294
3295 if floating_ip_dict.get("floatingips"):
3296 ips.append(
3297 floating_ip_dict["floatingips"][0].get(
3298 "floating_ip_address"
3299 )
3300 )
3301 except Exception:
3302 pass
3303
3304 for subnet in port["fixed_ips"]:
3305 ips.append(subnet["ip_address"])
3306
3307 interface["ip_address"] = ";".join(ips)
3308 vm["interfaces"].append(interface)
3309 except Exception as e:
3310 self.logger.error(
3311 "Error getting vm interface information {}: {}".format(
3312 type(e).__name__, e
3313 ),
3314 exc_info=True,
3315 )
3316 except vimconn.VimConnNotFoundException as e:
3317 self.logger.error("Exception getting vm status: %s", str(e))
3318 vm["status"] = "DELETED"
3319 vm["error_msg"] = str(e)
3320 except vimconn.VimConnException as e:
3321 self.logger.error("Exception getting vm status: %s", str(e))
3322 vm["status"] = "VIM_ERROR"
3323 vm["error_msg"] = str(e)
3324
3325 vm_dict[vm_id] = vm
3326
3327 return vm_dict
3328
3329 def action_vminstance(self, vm_id, action_dict, created_items={}):
3330 """Send and action over a VM instance from VIM
3331 Returns None or the console dict if the action was successfully sent to the VIM
3332 """
3333 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3334
3335 try:
3336 self._reload_connection()
3337 server = self.nova.servers.find(id=vm_id)
3338
3339 if "start" in action_dict:
3340 if action_dict["start"] == "rebuild":
3341 server.rebuild()
3342 else:
3343 if server.status == "PAUSED":
3344 server.unpause()
3345 elif server.status == "SUSPENDED":
3346 server.resume()
3347 elif server.status == "SHUTOFF":
3348 server.start()
3349 else:
3350 self.logger.debug(
3351 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3352 )
3353 raise vimconn.VimConnException(
3354 "Cannot 'start' instance while it is in active state",
3355 http_code=vimconn.HTTP_Bad_Request,
3356 )
3357
3358 elif "pause" in action_dict:
3359 server.pause()
3360 elif "resume" in action_dict:
3361 server.resume()
3362 elif "shutoff" in action_dict or "shutdown" in action_dict:
3363 self.logger.debug("server status %s", server.status)
3364 if server.status == "ACTIVE":
3365 server.stop()
3366 else:
3367 self.logger.debug("ERROR: VM is not in Active state")
3368 raise vimconn.VimConnException(
3369 "VM is not in active state, stop operation is not allowed",
3370 http_code=vimconn.HTTP_Bad_Request,
3371 )
3372 elif "forceOff" in action_dict:
3373 server.stop() # TODO
3374 elif "terminate" in action_dict:
3375 server.delete()
3376 elif "createImage" in action_dict:
3377 server.create_image()
3378 # "path":path_schema,
3379 # "description":description_schema,
3380 # "name":name_schema,
3381 # "metadata":metadata_schema,
3382 # "imageRef": id_schema,
3383 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3384 elif "rebuild" in action_dict:
3385 server.rebuild(server.image["id"])
3386 elif "reboot" in action_dict:
3387 server.reboot() # reboot_type="SOFT"
3388 elif "console" in action_dict:
3389 console_type = action_dict["console"]
3390
3391 if console_type is None or console_type == "novnc":
3392 console_dict = server.get_vnc_console("novnc")
3393 elif console_type == "xvpvnc":
3394 console_dict = server.get_vnc_console(console_type)
3395 elif console_type == "rdp-html5":
3396 console_dict = server.get_rdp_console(console_type)
3397 elif console_type == "spice-html5":
3398 console_dict = server.get_spice_console(console_type)
3399 else:
3400 raise vimconn.VimConnException(
3401 "console type '{}' not allowed".format(console_type),
3402 http_code=vimconn.HTTP_Bad_Request,
3403 )
3404
3405 try:
3406 console_url = console_dict["console"]["url"]
3407 # parse console_url
3408 protocol_index = console_url.find("//")
3409 suffix_index = (
3410 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3411 )
3412 port_index = (
3413 console_url[protocol_index + 2 : suffix_index].find(":")
3414 + protocol_index
3415 + 2
3416 )
3417
3418 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3419 raise vimconn.VimConnException(
3420 "Unexpected response from VIM " + str(console_dict)
3421 )
3422
3423 console_dict2 = {
3424 "protocol": console_url[0:protocol_index],
3425 "server": console_url[protocol_index + 2 : port_index],
3426 "port": int(console_url[port_index + 1 : suffix_index]),
3427 "suffix": console_url[suffix_index + 1 :],
3428 }
3429
3430 return console_dict2
3431 except Exception:
3432 raise vimconn.VimConnException(
3433 "Unexpected response from VIM " + str(console_dict)
3434 )
3435
3436 return None
3437 except (
3438 ksExceptions.ClientException,
3439 nvExceptions.ClientException,
3440 nvExceptions.NotFound,
3441 ConnectionError,
3442 ) as e:
3443 self._format_exception(e)
3444 # TODO insert exception vimconn.HTTP_Unauthorized
3445
3446 # ###### VIO Specific Changes #########
3447 def _generate_vlanID(self):
3448 """
3449 Method to get unused vlanID
3450 Args:
3451 None
3452 Returns:
3453 vlanID
3454 """
3455 # Get used VLAN IDs
3456 usedVlanIDs = []
3457 networks = self.get_network_list()
3458
3459 for net in networks:
3460 if net.get("provider:segmentation_id"):
3461 usedVlanIDs.append(net.get("provider:segmentation_id"))
3462
3463 used_vlanIDs = set(usedVlanIDs)
3464
3465 # find unused VLAN ID
3466 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3467 try:
3468 start_vlanid, end_vlanid = map(
3469 int, vlanID_range.replace(" ", "").split("-")
3470 )
3471
3472 for vlanID in range(start_vlanid, end_vlanid + 1):
3473 if vlanID not in used_vlanIDs:
3474 return vlanID
3475 except Exception as exp:
3476 raise vimconn.VimConnException(
3477 "Exception {} occurred while generating VLAN ID.".format(exp)
3478 )
3479 else:
3480 raise vimconn.VimConnConflictException(
3481 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3482 self.config.get("dataplane_net_vlan_range")
3483 )
3484 )
3485
3486 def _generate_multisegment_vlanID(self):
3487 """
3488 Method to get unused vlanID
3489 Args:
3490 None
3491 Returns:
3492 vlanID
3493 """
3494 # Get used VLAN IDs
3495 usedVlanIDs = []
3496 networks = self.get_network_list()
3497 for net in networks:
3498 if net.get("provider:network_type") == "vlan" and net.get(
3499 "provider:segmentation_id"
3500 ):
3501 usedVlanIDs.append(net.get("provider:segmentation_id"))
3502 elif net.get("segments"):
3503 for segment in net.get("segments"):
3504 if segment.get("provider:network_type") == "vlan" and segment.get(
3505 "provider:segmentation_id"
3506 ):
3507 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3508
3509 used_vlanIDs = set(usedVlanIDs)
3510
3511 # find unused VLAN ID
3512 for vlanID_range in self.config.get("multisegment_vlan_range"):
3513 try:
3514 start_vlanid, end_vlanid = map(
3515 int, vlanID_range.replace(" ", "").split("-")
3516 )
3517
3518 for vlanID in range(start_vlanid, end_vlanid + 1):
3519 if vlanID not in used_vlanIDs:
3520 return vlanID
3521 except Exception as exp:
3522 raise vimconn.VimConnException(
3523 "Exception {} occurred while generating VLAN ID.".format(exp)
3524 )
3525 else:
3526 raise vimconn.VimConnConflictException(
3527 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3528 self.config.get("multisegment_vlan_range")
3529 )
3530 )
3531
3532 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3533 """
3534 Method to validate user given vlanID ranges
3535 Args: None
3536 Returns: None
3537 """
3538 for vlanID_range in input_vlan_range:
3539 vlan_range = vlanID_range.replace(" ", "")
3540 # validate format
3541 vlanID_pattern = r"(\d)*-(\d)*$"
3542 match_obj = re.match(vlanID_pattern, vlan_range)
3543 if not match_obj:
3544 raise vimconn.VimConnConflictException(
3545 "Invalid VLAN range for {}: {}.You must provide "
3546 "'{}' in format [start_ID - end_ID].".format(
3547 text_vlan_range, vlanID_range, text_vlan_range
3548 )
3549 )
3550
3551 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3552 if start_vlanid <= 0:
3553 raise vimconn.VimConnConflictException(
3554 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3555 "networks valid IDs are 1 to 4094 ".format(
3556 text_vlan_range, vlanID_range
3557 )
3558 )
3559
3560 if end_vlanid > 4094:
3561 raise vimconn.VimConnConflictException(
3562 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3563 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3564 text_vlan_range, vlanID_range
3565 )
3566 )
3567
3568 if start_vlanid > end_vlanid:
3569 raise vimconn.VimConnConflictException(
3570 "Invalid VLAN range for {}: {}. You must provide '{}'"
3571 " in format start_ID - end_ID and start_ID < end_ID ".format(
3572 text_vlan_range, vlanID_range, text_vlan_range
3573 )
3574 )
3575
3576 def get_hosts_info(self):
3577 """Get the information of deployed hosts
3578 Returns the hosts content"""
3579 if self.debug:
3580 print("osconnector: Getting Host info from VIM")
3581
3582 try:
3583 h_list = []
3584 self._reload_connection()
3585 hypervisors = self.nova.hypervisors.list()
3586
3587 for hype in hypervisors:
3588 h_list.append(hype.to_dict())
3589
3590 return 1, {"hosts": h_list}
3591 except nvExceptions.NotFound as e:
3592 error_value = -vimconn.HTTP_Not_Found
3593 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3594 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3595 error_value = -vimconn.HTTP_Bad_Request
3596 error_text = (
3597 type(e).__name__
3598 + ": "
3599 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3600 )
3601
3602 # TODO insert exception vimconn.HTTP_Unauthorized
3603 # if reaching here is because an exception
3604 self.logger.debug("get_hosts_info " + error_text)
3605
3606 return error_value, error_text
3607
3608 def get_hosts(self, vim_tenant):
3609 """Get the hosts and deployed instances
3610 Returns the hosts content"""
3611 r, hype_dict = self.get_hosts_info()
3612
3613 if r < 0:
3614 return r, hype_dict
3615
3616 hypervisors = hype_dict["hosts"]
3617
3618 try:
3619 servers = self.nova.servers.list()
3620 for hype in hypervisors:
3621 for server in servers:
3622 if (
3623 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3624 == hype["hypervisor_hostname"]
3625 ):
3626 if "vm" in hype:
3627 hype["vm"].append(server.id)
3628 else:
3629 hype["vm"] = [server.id]
3630
3631 return 1, hype_dict
3632 except nvExceptions.NotFound as e:
3633 error_value = -vimconn.HTTP_Not_Found
3634 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3635 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3636 error_value = -vimconn.HTTP_Bad_Request
3637 error_text = (
3638 type(e).__name__
3639 + ": "
3640 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3641 )
3642
3643 # TODO insert exception vimconn.HTTP_Unauthorized
3644 # if reaching here is because an exception
3645 self.logger.debug("get_hosts " + error_text)
3646
3647 return error_value, error_text
3648
3649 def new_affinity_group(self, affinity_group_data):
3650 """Adds a server group to VIM
3651 affinity_group_data contains a dictionary with information, keys:
3652 name: name in VIM for the server group
3653 type: affinity or anti-affinity
3654 scope: Only nfvi-node allowed
3655 Returns the server group identifier"""
3656 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3657
3658 try:
3659 name = affinity_group_data["name"]
3660 policy = affinity_group_data["type"]
3661
3662 self._reload_connection()
3663 new_server_group = self.nova.server_groups.create(name, policy)
3664
3665 return new_server_group.id
3666 except (
3667 ksExceptions.ClientException,
3668 nvExceptions.ClientException,
3669 ConnectionError,
3670 KeyError,
3671 ) as e:
3672 self._format_exception(e)
3673
3674 def get_affinity_group(self, affinity_group_id):
3675 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3676 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3677 try:
3678 self._reload_connection()
3679 server_group = self.nova.server_groups.find(id=affinity_group_id)
3680
3681 return server_group.to_dict()
3682 except (
3683 nvExceptions.NotFound,
3684 nvExceptions.ClientException,
3685 ksExceptions.ClientException,
3686 ConnectionError,
3687 ) as e:
3688 self._format_exception(e)
3689
3690 def delete_affinity_group(self, affinity_group_id):
3691 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3692 self.logger.debug("Getting server group '%s'", affinity_group_id)
3693 try:
3694 self._reload_connection()
3695 self.nova.server_groups.delete(affinity_group_id)
3696
3697 return affinity_group_id
3698 except (
3699 nvExceptions.NotFound,
3700 ksExceptions.ClientException,
3701 nvExceptions.ClientException,
3702 ConnectionError,
3703 ) as e:
3704 self._format_exception(e)
3705
3706 def get_vdu_state(self, vm_id):
3707 """
3708 Getting the state of a vdu
3709 param:
3710 vm_id: ID of an instance
3711 """
3712 self.logger.debug("Getting the status of VM")
3713 self.logger.debug("VIM VM ID %s", vm_id)
3714 self._reload_connection()
3715 server_dict = self._find_nova_server(vm_id)
3716 vdu_data = [
3717 server_dict["status"],
3718 server_dict["flavor"]["id"],
3719 server_dict["OS-EXT-SRV-ATTR:host"],
3720 server_dict["OS-EXT-AZ:availability_zone"],
3721 ]
3722 self.logger.debug("vdu_data %s", vdu_data)
3723 return vdu_data
3724
3725 def check_compute_availability(self, host, server_flavor_details):
3726 self._reload_connection()
3727 hypervisor_search = self.nova.hypervisors.search(
3728 hypervisor_match=host, servers=True
3729 )
3730 for hypervisor in hypervisor_search:
3731 hypervisor_id = hypervisor.to_dict()["id"]
3732 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3733 hypervisor_dict = hypervisor_details.to_dict()
3734 hypervisor_temp = json.dumps(hypervisor_dict)
3735 hypervisor_json = json.loads(hypervisor_temp)
3736 resources_available = [
3737 hypervisor_json["free_ram_mb"],
3738 hypervisor_json["disk_available_least"],
3739 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3740 ]
3741 compute_available = all(
3742 x > y for x, y in zip(resources_available, server_flavor_details)
3743 )
3744 if compute_available:
3745 return host
3746
3747 def check_availability_zone(
3748 self, old_az, server_flavor_details, old_host, host=None
3749 ):
3750 self._reload_connection()
3751 az_check = {"zone_check": False, "compute_availability": None}
3752 aggregates_list = self.nova.aggregates.list()
3753 for aggregate in aggregates_list:
3754 aggregate_details = aggregate.to_dict()
3755 aggregate_temp = json.dumps(aggregate_details)
3756 aggregate_json = json.loads(aggregate_temp)
3757 if aggregate_json["availability_zone"] == old_az:
3758 hosts_list = aggregate_json["hosts"]
3759 if host is not None:
3760 if host in hosts_list:
3761 az_check["zone_check"] = True
3762 available_compute_id = self.check_compute_availability(
3763 host, server_flavor_details
3764 )
3765 if available_compute_id is not None:
3766 az_check["compute_availability"] = available_compute_id
3767 else:
3768 for check_host in hosts_list:
3769 if check_host != old_host:
3770 available_compute_id = self.check_compute_availability(
3771 check_host, server_flavor_details
3772 )
3773 if available_compute_id is not None:
3774 az_check["zone_check"] = True
3775 az_check["compute_availability"] = available_compute_id
3776 break
3777 else:
3778 az_check["zone_check"] = True
3779 return az_check
3780
3781 def migrate_instance(self, vm_id, compute_host=None):
3782 """
3783 Migrate a vdu
3784 param:
3785 vm_id: ID of an instance
3786 compute_host: Host to migrate the vdu to
3787 """
3788 self._reload_connection()
3789 vm_state = False
3790 instance_state = self.get_vdu_state(vm_id)
3791 server_flavor_id = instance_state[1]
3792 server_hypervisor_name = instance_state[2]
3793 server_availability_zone = instance_state[3]
3794 try:
3795 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3796 server_flavor_details = [
3797 server_flavor["ram"],
3798 server_flavor["disk"],
3799 server_flavor["vcpus"],
3800 ]
3801 if compute_host == server_hypervisor_name:
3802 raise vimconn.VimConnException(
3803 "Unable to migrate instance '{}' to the same host '{}'".format(
3804 vm_id, compute_host
3805 ),
3806 http_code=vimconn.HTTP_Bad_Request,
3807 )
3808 az_status = self.check_availability_zone(
3809 server_availability_zone,
3810 server_flavor_details,
3811 server_hypervisor_name,
3812 compute_host,
3813 )
3814 availability_zone_check = az_status["zone_check"]
3815 available_compute_id = az_status.get("compute_availability")
3816
3817 if availability_zone_check is False:
3818 raise vimconn.VimConnException(
3819 "Unable to migrate instance '{}' to a different availability zone".format(
3820 vm_id
3821 ),
3822 http_code=vimconn.HTTP_Bad_Request,
3823 )
3824 if available_compute_id is not None:
3825 self.nova.servers.live_migrate(
3826 server=vm_id,
3827 host=available_compute_id,
3828 block_migration=True,
3829 disk_over_commit=False,
3830 )
3831 state = "MIGRATING"
3832 changed_compute_host = ""
3833 if state == "MIGRATING":
3834 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3835 changed_compute_host = self.get_vdu_state(vm_id)[2]
3836 if vm_state and changed_compute_host == available_compute_id:
3837 self.logger.debug(
3838 "Instance '{}' migrated to the new compute host '{}'".format(
3839 vm_id, changed_compute_host
3840 )
3841 )
3842 return state, available_compute_id
3843 else:
3844 raise vimconn.VimConnException(
3845 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3846 vm_id, available_compute_id
3847 ),
3848 http_code=vimconn.HTTP_Bad_Request,
3849 )
3850 else:
3851 raise vimconn.VimConnException(
3852 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3853 available_compute_id
3854 ),
3855 http_code=vimconn.HTTP_Bad_Request,
3856 )
3857 except (
3858 nvExceptions.BadRequest,
3859 nvExceptions.ClientException,
3860 nvExceptions.NotFound,
3861 ) as e:
3862 self._format_exception(e)
3863
3864 def resize_instance(self, vm_id, new_flavor_id):
3865 """
3866 For resizing the vm based on the given
3867 flavor details
3868 param:
3869 vm_id : ID of an instance
3870 new_flavor_id : Flavor id to be resized
3871 Return the status of a resized instance
3872 """
3873 self._reload_connection()
3874 self.logger.debug("resize the flavor of an instance")
3875 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3876 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3877 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3878 try:
3879 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3880 if old_flavor_disk > new_flavor_disk:
3881 raise nvExceptions.BadRequest(
3882 400,
3883 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3884 )
3885 else:
3886 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3887 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3888 if vm_state:
3889 instance_resized_status = self.confirm_resize(vm_id)
3890 return instance_resized_status
3891 else:
3892 raise nvExceptions.BadRequest(
3893 409,
3894 message="Cannot 'resize' vm_state is in ERROR",
3895 )
3896
3897 else:
3898 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3899 raise nvExceptions.BadRequest(
3900 409,
3901 message="Cannot 'resize' instance while it is in vm_state resized",
3902 )
3903 except (
3904 nvExceptions.BadRequest,
3905 nvExceptions.ClientException,
3906 nvExceptions.NotFound,
3907 ) as e:
3908 self._format_exception(e)
3909
3910 def confirm_resize(self, vm_id):
3911 """
3912 Confirm the resize of an instance
3913 param:
3914 vm_id: ID of an instance
3915 """
3916 self._reload_connection()
3917 self.nova.servers.confirm_resize(server=vm_id)
3918 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3919 self.__wait_for_vm(vm_id, "ACTIVE")
3920 instance_status = self.get_vdu_state(vm_id)[0]
3921 return instance_status
3922
3923 def get_monitoring_data(self):
3924 try:
3925 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3926 self._reload_connection()
3927 all_servers = self.nova.servers.list(detailed=True)
3928 try:
3929 for server in all_servers:
3930 server.flavor["id"] = self.nova.flavors.find(
3931 name=server.flavor["original_name"]
3932 ).id
3933 except nClient.exceptions.NotFound as e:
3934 self.logger.warning(str(e.message))
3935 all_ports = self.neutron.list_ports()
3936 return all_servers, all_ports
3937 except (
3938 vimconn.VimConnException,
3939 vimconn.VimConnNotFoundException,
3940 vimconn.VimConnConnectionException,
3941 ) as e:
3942 raise vimconn.VimConnException(
3943 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3944 )