Add support of nova client microversion 2.60
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.60"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 raise vimconn.VimConnException(
571 type(exception).__name__ + ": " + message_error
572 )
573 elif isinstance(
574 exception,
575 (
576 nvExceptions.ClientException,
577 ksExceptions.ClientException,
578 neExceptions.NeutronException,
579 ),
580 ):
581 raise vimconn.VimConnUnexpectedResponse(
582 type(exception).__name__ + ": " + message_error
583 )
584 elif isinstance(exception, nvExceptions.Conflict):
585 raise vimconn.VimConnConflictException(
586 type(exception).__name__ + ": " + message_error
587 )
588 elif isinstance(exception, vimconn.VimConnException):
589 raise exception
590 else: # ()
591 self.logger.error("General Exception " + message_error, exc_info=True)
592
593 raise vimconn.VimConnConnectionException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 def _get_ids_from_name(self):
598 """
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600 :return: None
601 """
602 # get tenant_id if only tenant_name is supplied
603 self._reload_connection()
604
605 if not self.my_tenant_id:
606 raise vimconn.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self.tenant_name, self.tenant_id
609 )
610 )
611
612 if self.config.get("security_groups") and not self.security_groups_id:
613 # convert from name to id
614 neutron_sg_list = self.neutron.list_security_groups(
615 tenant_id=self.my_tenant_id
616 )["security_groups"]
617
618 self.security_groups_id = []
619 for sg in self.config.get("security_groups"):
620 for neutron_sg in neutron_sg_list:
621 if sg in (neutron_sg["id"], neutron_sg["name"]):
622 self.security_groups_id.append(neutron_sg["id"])
623 break
624 else:
625 self.security_groups_id = None
626
627 raise vimconn.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg)
629 )
630
631 def _find_nova_server(self, vm_id):
632 """
633 Returns the VM instance from Openstack and completes it with flavor ID
634 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
635 """
636 try:
637 self._reload_connection()
638 server = self.nova.servers.find(id=vm_id)
639 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
640 server_dict = server.to_dict()
641 try:
642 server_dict["flavor"]["id"] = self.nova.flavors.find(
643 name=server_dict["flavor"]["original_name"]
644 ).id
645 except nClient.exceptions.NotFound as e:
646 self.logger.warning(str(e.message))
647 return server_dict
648 except (
649 ksExceptions.ClientException,
650 nvExceptions.ClientException,
651 nvExceptions.NotFound,
652 ConnectionError,
653 ) as e:
654 self._format_exception(e)
655
656 def check_vim_connectivity(self):
657 # just get network list to check connectivity and credentials
658 self.get_network_list(filter_dict={})
659
660 def get_tenant_list(self, filter_dict={}):
661 """Obtain tenants of VIM
662 filter_dict can contain the following keys:
663 name: filter by tenant name
664 id: filter by tenant uuid/id
665 <other VIM specific>
666 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
667 """
668 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
669
670 try:
671 self._reload_connection()
672
673 if self.api_version3:
674 project_class_list = self.keystone.projects.list(
675 name=filter_dict.get("name")
676 )
677 else:
678 project_class_list = self.keystone.tenants.findall(**filter_dict)
679
680 project_list = []
681
682 for project in project_class_list:
683 if filter_dict.get("id") and filter_dict["id"] != project.id:
684 continue
685
686 project_list.append(project.to_dict())
687
688 return project_list
689 except (
690 ksExceptions.ConnectionError,
691 ksExceptions.ClientException,
692 ConnectionError,
693 ) as e:
694 self._format_exception(e)
695
696 def new_tenant(self, tenant_name, tenant_description):
697 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
698 self.logger.debug("Adding a new tenant name: %s", tenant_name)
699
700 try:
701 self._reload_connection()
702
703 if self.api_version3:
704 project = self.keystone.projects.create(
705 tenant_name,
706 self.config.get("project_domain_id", "default"),
707 description=tenant_description,
708 is_domain=False,
709 )
710 else:
711 project = self.keystone.tenants.create(tenant_name, tenant_description)
712
713 return project.id
714 except (
715 ksExceptions.ConnectionError,
716 ksExceptions.ClientException,
717 ksExceptions.BadRequest,
718 ConnectionError,
719 ) as e:
720 self._format_exception(e)
721
722 def delete_tenant(self, tenant_id):
723 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
724 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
725
726 try:
727 self._reload_connection()
728
729 if self.api_version3:
730 self.keystone.projects.delete(tenant_id)
731 else:
732 self.keystone.tenants.delete(tenant_id)
733
734 return tenant_id
735 except (
736 ksExceptions.ConnectionError,
737 ksExceptions.ClientException,
738 ksExceptions.NotFound,
739 ConnectionError,
740 ) as e:
741 self._format_exception(e)
742
743 def new_network(
744 self,
745 net_name,
746 net_type,
747 ip_profile=None,
748 shared=False,
749 provider_network_profile=None,
750 ):
751 """Adds a tenant network to VIM
752 Params:
753 'net_name': name of the network
754 'net_type': one of:
755 'bridge': overlay isolated network
756 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
757 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
758 'ip_profile': is a dict containing the IP parameters of the network
759 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
760 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
761 'gateway_address': (Optional) ip_schema, that is X.X.X.X
762 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
763 'dhcp_enabled': True or False
764 'dhcp_start_address': ip_schema, first IP to grant
765 'dhcp_count': number of IPs to grant.
766 'shared': if this network can be seen/use by other tenants/organization
767 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
768 physical-network: physnet-label}
769 Returns a tuple with the network identifier and created_items, or raises an exception on error
770 created_items can be None or a dictionary where this method can include key-values that will be passed to
771 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
772 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
773 as not present.
774 """
775 self.logger.debug(
776 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
777 )
778 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
779
780 try:
781 vlan = None
782
783 if provider_network_profile:
784 vlan = provider_network_profile.get("segmentation-id")
785
786 new_net = None
787 created_items = {}
788 self._reload_connection()
789 network_dict = {"name": net_name, "admin_state_up": True}
790
791 if net_type in ("data", "ptp") or provider_network_profile:
792 provider_physical_network = None
793
794 if provider_network_profile and provider_network_profile.get(
795 "physical-network"
796 ):
797 provider_physical_network = provider_network_profile.get(
798 "physical-network"
799 )
800
801 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
802 # or not declared, just ignore the checking
803 if (
804 isinstance(
805 self.config.get("dataplane_physical_net"), (tuple, list)
806 )
807 and provider_physical_network
808 not in self.config["dataplane_physical_net"]
809 ):
810 raise vimconn.VimConnConflictException(
811 "Invalid parameter 'provider-network:physical-network' "
812 "for network creation. '{}' is not one of the declared "
813 "list at VIM_config:dataplane_physical_net".format(
814 provider_physical_network
815 )
816 )
817
818 # use the default dataplane_physical_net
819 if not provider_physical_network:
820 provider_physical_network = self.config.get(
821 "dataplane_physical_net"
822 )
823
824 # if it is non empty list, use the first value. If it is a string use the value directly
825 if (
826 isinstance(provider_physical_network, (tuple, list))
827 and provider_physical_network
828 ):
829 provider_physical_network = provider_physical_network[0]
830
831 if not provider_physical_network:
832 raise vimconn.VimConnConflictException(
833 "missing information needed for underlay networks. Provide "
834 "'dataplane_physical_net' configuration at VIM or use the NS "
835 "instantiation parameter 'provider-network.physical-network'"
836 " for the VLD"
837 )
838
839 if not self.config.get("multisegment_support"):
840 network_dict[
841 "provider:physical_network"
842 ] = provider_physical_network
843
844 if (
845 provider_network_profile
846 and "network-type" in provider_network_profile
847 ):
848 network_dict[
849 "provider:network_type"
850 ] = provider_network_profile["network-type"]
851 else:
852 network_dict["provider:network_type"] = self.config.get(
853 "dataplane_network_type", "vlan"
854 )
855
856 if vlan:
857 network_dict["provider:segmentation_id"] = vlan
858 else:
859 # Multi-segment case
860 segment_list = []
861 segment1_dict = {
862 "provider:physical_network": "",
863 "provider:network_type": "vxlan",
864 }
865 segment_list.append(segment1_dict)
866 segment2_dict = {
867 "provider:physical_network": provider_physical_network,
868 "provider:network_type": "vlan",
869 }
870
871 if vlan:
872 segment2_dict["provider:segmentation_id"] = vlan
873 elif self.config.get("multisegment_vlan_range"):
874 vlanID = self._generate_multisegment_vlanID()
875 segment2_dict["provider:segmentation_id"] = vlanID
876
877 # else
878 # raise vimconn.VimConnConflictException(
879 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
880 # network")
881 segment_list.append(segment2_dict)
882 network_dict["segments"] = segment_list
883
884 # VIO Specific Changes. It needs a concrete VLAN
885 if self.vim_type == "VIO" and vlan is None:
886 if self.config.get("dataplane_net_vlan_range") is None:
887 raise vimconn.VimConnConflictException(
888 "You must provide 'dataplane_net_vlan_range' in format "
889 "[start_ID - end_ID] at VIM_config for creating underlay "
890 "networks"
891 )
892
893 network_dict["provider:segmentation_id"] = self._generate_vlanID()
894
895 network_dict["shared"] = shared
896
897 if self.config.get("disable_network_port_security"):
898 network_dict["port_security_enabled"] = False
899
900 if self.config.get("neutron_availability_zone_hints"):
901 hints = self.config.get("neutron_availability_zone_hints")
902
903 if isinstance(hints, str):
904 hints = [hints]
905
906 network_dict["availability_zone_hints"] = hints
907
908 new_net = self.neutron.create_network({"network": network_dict})
909 # print new_net
910 # create subnetwork, even if there is no profile
911
912 if not ip_profile:
913 ip_profile = {}
914
915 if not ip_profile.get("subnet_address"):
916 # Fake subnet is required
917 subnet_rand = random.SystemRandom().randint(0, 255)
918 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
919
920 if "ip_version" not in ip_profile:
921 ip_profile["ip_version"] = "IPv4"
922
923 subnet = {
924 "name": net_name + "-subnet",
925 "network_id": new_net["network"]["id"],
926 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
927 "cidr": ip_profile["subnet_address"],
928 }
929
930 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
931 if ip_profile.get("gateway_address"):
932 subnet["gateway_ip"] = ip_profile["gateway_address"]
933 else:
934 subnet["gateway_ip"] = None
935
936 if ip_profile.get("dns_address"):
937 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
938
939 if "dhcp_enabled" in ip_profile:
940 subnet["enable_dhcp"] = (
941 False
942 if ip_profile["dhcp_enabled"] == "false"
943 or ip_profile["dhcp_enabled"] is False
944 else True
945 )
946
947 if ip_profile.get("dhcp_start_address"):
948 subnet["allocation_pools"] = []
949 subnet["allocation_pools"].append(dict())
950 subnet["allocation_pools"][0]["start"] = ip_profile[
951 "dhcp_start_address"
952 ]
953
954 if ip_profile.get("dhcp_count"):
955 # parts = ip_profile["dhcp_start_address"].split(".")
956 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
957 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
958 ip_int += ip_profile["dhcp_count"] - 1
959 ip_str = str(netaddr.IPAddress(ip_int))
960 subnet["allocation_pools"][0]["end"] = ip_str
961
962 if (
963 ip_profile.get("ipv6_address_mode")
964 and ip_profile["ip_version"] != "IPv4"
965 ):
966 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
967 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
968 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
969 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
970
971 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
972 self.neutron.create_subnet({"subnet": subnet})
973
974 if net_type == "data" and self.config.get("multisegment_support"):
975 if self.config.get("l2gw_support"):
976 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
977 for l2gw in l2gw_list:
978 l2gw_conn = {
979 "l2_gateway_id": l2gw["id"],
980 "network_id": new_net["network"]["id"],
981 "segmentation_id": str(vlanID),
982 }
983 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
984 {"l2_gateway_connection": l2gw_conn}
985 )
986 created_items[
987 "l2gwconn:"
988 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
989 ] = True
990
991 return new_net["network"]["id"], created_items
992 except Exception as e:
993 # delete l2gw connections (if any) before deleting the network
994 for k, v in created_items.items():
995 if not v: # skip already deleted
996 continue
997
998 try:
999 k_item, _, k_id = k.partition(":")
1000
1001 if k_item == "l2gwconn":
1002 self.neutron.delete_l2_gateway_connection(k_id)
1003 except Exception as e2:
1004 self.logger.error(
1005 "Error deleting l2 gateway connection: {}: {}".format(
1006 type(e2).__name__, e2
1007 )
1008 )
1009
1010 if new_net:
1011 self.neutron.delete_network(new_net["network"]["id"])
1012
1013 self._format_exception(e)
1014
1015 def get_network_list(self, filter_dict={}):
1016 """Obtain tenant networks of VIM
1017 Filter_dict can be:
1018 name: network name
1019 id: network uuid
1020 shared: boolean
1021 tenant_id: tenant
1022 admin_state_up: boolean
1023 status: 'ACTIVE'
1024 Returns the network list of dictionaries
1025 """
1026 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
1027
1028 try:
1029 self._reload_connection()
1030 filter_dict_os = filter_dict.copy()
1031
1032 if self.api_version3 and "tenant_id" in filter_dict_os:
1033 # TODO check
1034 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1035
1036 net_dict = self.neutron.list_networks(**filter_dict_os)
1037 net_list = net_dict["networks"]
1038 self.__net_os2mano(net_list)
1039
1040 return net_list
1041 except (
1042 neExceptions.ConnectionFailed,
1043 ksExceptions.ClientException,
1044 neExceptions.NeutronException,
1045 ConnectionError,
1046 ) as e:
1047 self._format_exception(e)
1048
1049 def get_network(self, net_id):
1050 """Obtain details of network from VIM
1051 Returns the network information from a network id"""
1052 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1053 filter_dict = {"id": net_id}
1054 net_list = self.get_network_list(filter_dict)
1055
1056 if len(net_list) == 0:
1057 raise vimconn.VimConnNotFoundException(
1058 "Network '{}' not found".format(net_id)
1059 )
1060 elif len(net_list) > 1:
1061 raise vimconn.VimConnConflictException(
1062 "Found more than one network with this criteria"
1063 )
1064
1065 net = net_list[0]
1066 subnets = []
1067 for subnet_id in net.get("subnets", ()):
1068 try:
1069 subnet = self.neutron.show_subnet(subnet_id)
1070 except Exception as e:
1071 self.logger.error(
1072 "osconnector.get_network(): Error getting subnet %s %s"
1073 % (net_id, str(e))
1074 )
1075 subnet = {"id": subnet_id, "fault": str(e)}
1076
1077 subnets.append(subnet)
1078
1079 net["subnets"] = subnets
1080 net["encapsulation"] = net.get("provider:network_type")
1081 net["encapsulation_type"] = net.get("provider:network_type")
1082 net["segmentation_id"] = net.get("provider:segmentation_id")
1083 net["encapsulation_id"] = net.get("provider:segmentation_id")
1084
1085 return net
1086
1087 def delete_network(self, net_id, created_items=None):
1088 """
1089 Removes a tenant network from VIM and its associated elements
1090 :param net_id: VIM identifier of the network, provided by method new_network
1091 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1092 Returns the network identifier or raises an exception upon error or when network is not found
1093 """
1094 self.logger.debug("Deleting network '%s' from VIM", net_id)
1095
1096 if created_items is None:
1097 created_items = {}
1098
1099 try:
1100 self._reload_connection()
1101 # delete l2gw connections (if any) before deleting the network
1102 for k, v in created_items.items():
1103 if not v: # skip already deleted
1104 continue
1105
1106 try:
1107 k_item, _, k_id = k.partition(":")
1108 if k_item == "l2gwconn":
1109 self.neutron.delete_l2_gateway_connection(k_id)
1110 except Exception as e:
1111 self.logger.error(
1112 "Error deleting l2 gateway connection: {}: {}".format(
1113 type(e).__name__, e
1114 )
1115 )
1116
1117 # delete VM ports attached to this networks before the network
1118 ports = self.neutron.list_ports(network_id=net_id)
1119 for p in ports["ports"]:
1120 try:
1121 self.neutron.delete_port(p["id"])
1122 except Exception as e:
1123 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1124
1125 self.neutron.delete_network(net_id)
1126
1127 return net_id
1128 except (
1129 neExceptions.ConnectionFailed,
1130 neExceptions.NetworkNotFoundClient,
1131 neExceptions.NeutronException,
1132 ksExceptions.ClientException,
1133 neExceptions.NeutronException,
1134 ConnectionError,
1135 ) as e:
1136 self._format_exception(e)
1137
1138 def refresh_nets_status(self, net_list):
1139 """Get the status of the networks
1140 Params: the list of network identifiers
1141 Returns a dictionary with:
1142 net_id: #VIM id of this network
1143 status: #Mandatory. Text with one of:
1144 # DELETED (not found at vim)
1145 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1146 # OTHER (Vim reported other status not understood)
1147 # ERROR (VIM indicates an ERROR status)
1148 # ACTIVE, INACTIVE, DOWN (admin down),
1149 # BUILD (on building process)
1150 #
1151 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1152 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1153 """
1154 net_dict = {}
1155
1156 for net_id in net_list:
1157 net = {}
1158
1159 try:
1160 net_vim = self.get_network(net_id)
1161
1162 if net_vim["status"] in netStatus2manoFormat:
1163 net["status"] = netStatus2manoFormat[net_vim["status"]]
1164 else:
1165 net["status"] = "OTHER"
1166 net["error_msg"] = "VIM status reported " + net_vim["status"]
1167
1168 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1169 net["status"] = "DOWN"
1170
1171 net["vim_info"] = self.serialize(net_vim)
1172
1173 if net_vim.get("fault"): # TODO
1174 net["error_msg"] = str(net_vim["fault"])
1175 except vimconn.VimConnNotFoundException as e:
1176 self.logger.error("Exception getting net status: %s", str(e))
1177 net["status"] = "DELETED"
1178 net["error_msg"] = str(e)
1179 except vimconn.VimConnException as e:
1180 self.logger.error("Exception getting net status: %s", str(e))
1181 net["status"] = "VIM_ERROR"
1182 net["error_msg"] = str(e)
1183 net_dict[net_id] = net
1184 return net_dict
1185
1186 def get_flavor(self, flavor_id):
1187 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1188 self.logger.debug("Getting flavor '%s'", flavor_id)
1189
1190 try:
1191 self._reload_connection()
1192 flavor = self.nova.flavors.find(id=flavor_id)
1193 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1194
1195 return flavor.to_dict()
1196 except (
1197 nvExceptions.NotFound,
1198 nvExceptions.ClientException,
1199 ksExceptions.ClientException,
1200 ConnectionError,
1201 ) as e:
1202 self._format_exception(e)
1203
1204 def get_flavor_id_from_data(self, flavor_dict):
1205 """Obtain flavor id that match the flavor description
1206 Returns the flavor_id or raises a vimconnNotFoundException
1207 flavor_dict: contains the required ram, vcpus, disk
1208 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1209 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1210 vimconnNotFoundException is raised
1211 """
1212 exact_match = False if self.config.get("use_existing_flavors") else True
1213
1214 try:
1215 self._reload_connection()
1216 flavor_candidate_id = None
1217 flavor_candidate_data = (10000, 10000, 10000)
1218 flavor_target = (
1219 flavor_dict["ram"],
1220 flavor_dict["vcpus"],
1221 flavor_dict["disk"],
1222 flavor_dict.get("ephemeral", 0),
1223 flavor_dict.get("swap", 0),
1224 )
1225 # numa=None
1226 extended = flavor_dict.get("extended", {})
1227 if extended:
1228 # TODO
1229 raise vimconn.VimConnNotFoundException(
1230 "Flavor with EPA still not implemented"
1231 )
1232 # if len(numas) > 1:
1233 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1234 # numa=numas[0]
1235 # numas = extended.get("numas")
1236 for flavor in self.nova.flavors.list():
1237 epa = flavor.get_keys()
1238
1239 if epa:
1240 continue
1241 # TODO
1242
1243 flavor_data = (
1244 flavor.ram,
1245 flavor.vcpus,
1246 flavor.disk,
1247 flavor.ephemeral,
1248 flavor.swap if isinstance(flavor.swap, int) else 0,
1249 )
1250 if flavor_data == flavor_target:
1251 return flavor.id
1252 elif (
1253 not exact_match
1254 and flavor_target < flavor_data < flavor_candidate_data
1255 ):
1256 flavor_candidate_id = flavor.id
1257 flavor_candidate_data = flavor_data
1258
1259 if not exact_match and flavor_candidate_id:
1260 return flavor_candidate_id
1261
1262 raise vimconn.VimConnNotFoundException(
1263 "Cannot find any flavor matching '{}'".format(flavor_dict)
1264 )
1265 except (
1266 nvExceptions.NotFound,
1267 nvExceptions.ClientException,
1268 ksExceptions.ClientException,
1269 ConnectionError,
1270 ) as e:
1271 self._format_exception(e)
1272
1273 @staticmethod
1274 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1275 """Process resource quota and fill up extra_specs.
1276 Args:
1277 quota (dict): Keeping the quota of resurces
1278 prefix (str) Prefix
1279 extra_specs (dict) Dict to be filled to be used during flavor creation
1280
1281 """
1282 if "limit" in quota:
1283 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1284
1285 if "reserve" in quota:
1286 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1287
1288 if "shares" in quota:
1289 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1290 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1291
1292 @staticmethod
1293 def process_numa_memory(
1294 numa: dict, node_id: Optional[int], extra_specs: dict
1295 ) -> None:
1296 """Set the memory in extra_specs.
1297 Args:
1298 numa (dict): A dictionary which includes numa information
1299 node_id (int): ID of numa node
1300 extra_specs (dict): To be filled.
1301
1302 """
1303 if not numa.get("memory"):
1304 return
1305 memory_mb = numa["memory"] * 1024
1306 memory = "hw:numa_mem.{}".format(node_id)
1307 extra_specs[memory] = int(memory_mb)
1308
1309 @staticmethod
1310 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1311 """Set the cpu in extra_specs.
1312 Args:
1313 numa (dict): A dictionary which includes numa information
1314 node_id (int): ID of numa node
1315 extra_specs (dict): To be filled.
1316
1317 """
1318 if not numa.get("vcpu"):
1319 return
1320 vcpu = numa["vcpu"]
1321 cpu = "hw:numa_cpus.{}".format(node_id)
1322 vcpu = ",".join(map(str, vcpu))
1323 extra_specs[cpu] = vcpu
1324
1325 @staticmethod
1326 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1327 """Fill up extra_specs if numa has paired-threads.
1328 Args:
1329 numa (dict): A dictionary which includes numa information
1330 extra_specs (dict): To be filled.
1331
1332 Returns:
1333 threads (int) Number of virtual cpus
1334
1335 """
1336 if not numa.get("paired-threads"):
1337 return
1338
1339 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1340 threads = numa["paired-threads"] * 2
1341 extra_specs["hw:cpu_thread_policy"] = "require"
1342 extra_specs["hw:cpu_policy"] = "dedicated"
1343 return threads
1344
1345 @staticmethod
1346 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1347 """Fill up extra_specs if numa has cores.
1348 Args:
1349 numa (dict): A dictionary which includes numa information
1350 extra_specs (dict): To be filled.
1351
1352 Returns:
1353 cores (int) Number of virtual cpus
1354
1355 """
1356 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1357 # architecture, or a non-SMT architecture will be emulated
1358 if not numa.get("cores"):
1359 return
1360 cores = numa["cores"]
1361 extra_specs["hw:cpu_thread_policy"] = "isolate"
1362 extra_specs["hw:cpu_policy"] = "dedicated"
1363 return cores
1364
1365 @staticmethod
1366 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1367 """Fill up extra_specs if numa has threads.
1368 Args:
1369 numa (dict): A dictionary which includes numa information
1370 extra_specs (dict): To be filled.
1371
1372 Returns:
1373 threads (int) Number of virtual cpus
1374
1375 """
1376 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1377 if not numa.get("threads"):
1378 return
1379 threads = numa["threads"]
1380 extra_specs["hw:cpu_thread_policy"] = "prefer"
1381 extra_specs["hw:cpu_policy"] = "dedicated"
1382 return threads
1383
1384 def _process_numa_parameters_of_flavor(
1385 self, numas: List, extra_specs: Dict
1386 ) -> None:
1387 """Process numa parameters and fill up extra_specs.
1388
1389 Args:
1390 numas (list): List of dictionary which includes numa information
1391 extra_specs (dict): To be filled.
1392
1393 """
1394 numa_nodes = len(numas)
1395 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1396 cpu_cores, cpu_threads = 0, 0
1397
1398 if self.vim_type == "VIO":
1399 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1400
1401 for numa in numas:
1402 if "id" in numa:
1403 node_id = numa["id"]
1404 # overwrite ram and vcpus
1405 # check if key "memory" is present in numa else use ram value at flavor
1406 self.process_numa_memory(numa, node_id, extra_specs)
1407 self.process_numa_vcpu(numa, node_id, extra_specs)
1408
1409 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1410 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1411
1412 if "paired-threads" in numa:
1413 threads = self.process_numa_paired_threads(numa, extra_specs)
1414 cpu_threads += threads
1415
1416 elif "cores" in numa:
1417 cores = self.process_numa_cores(numa, extra_specs)
1418 cpu_cores += cores
1419
1420 elif "threads" in numa:
1421 threads = self.process_numa_threads(numa, extra_specs)
1422 cpu_threads += threads
1423
1424 if cpu_cores:
1425 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1426 if cpu_threads:
1427 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1428
1429 @staticmethod
1430 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1431 """According to number of numa nodes, updates the extra_specs for VIO.
1432
1433 Args:
1434
1435 numa_nodes (int): List keeps the numa node numbers
1436 extra_specs (dict): Extra specs dict to be updated
1437
1438 """
1439 # If there are several numas, we do not define specific affinity.
1440 extra_specs["vmware:latency_sensitivity_level"] = "high"
1441
1442 def _change_flavor_name(
1443 self, name: str, name_suffix: int, flavor_data: dict
1444 ) -> str:
1445 """Change the flavor name if the name already exists.
1446
1447 Args:
1448 name (str): Flavor name to be checked
1449 name_suffix (int): Suffix to be appended to name
1450 flavor_data (dict): Flavor dict
1451
1452 Returns:
1453 name (str): New flavor name to be used
1454
1455 """
1456 # Get used names
1457 fl = self.nova.flavors.list()
1458 fl_names = [f.name for f in fl]
1459
1460 while name in fl_names:
1461 name_suffix += 1
1462 name = flavor_data["name"] + "-" + str(name_suffix)
1463
1464 return name
1465
1466 def _process_extended_config_of_flavor(
1467 self, extended: dict, extra_specs: dict
1468 ) -> None:
1469 """Process the extended dict to fill up extra_specs.
1470 Args:
1471
1472 extended (dict): Keeping the extra specification of flavor
1473 extra_specs (dict) Dict to be filled to be used during flavor creation
1474
1475 """
1476 quotas = {
1477 "cpu-quota": "cpu",
1478 "mem-quota": "memory",
1479 "vif-quota": "vif",
1480 "disk-io-quota": "disk_io",
1481 }
1482
1483 page_sizes = {
1484 "LARGE": "large",
1485 "SMALL": "small",
1486 "SIZE_2MB": "2MB",
1487 "SIZE_1GB": "1GB",
1488 "PREFER_LARGE": "any",
1489 }
1490
1491 policies = {
1492 "cpu-pinning-policy": "hw:cpu_policy",
1493 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1494 "mem-policy": "hw:numa_mempolicy",
1495 }
1496
1497 numas = extended.get("numas")
1498 if numas:
1499 self._process_numa_parameters_of_flavor(numas, extra_specs)
1500
1501 for quota, item in quotas.items():
1502 if quota in extended.keys():
1503 self.process_resource_quota(extended.get(quota), item, extra_specs)
1504
1505 # Set the mempage size as specified in the descriptor
1506 if extended.get("mempage-size"):
1507 if extended["mempage-size"] in page_sizes.keys():
1508 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1509 else:
1510 # Normally, validations in NBI should not allow to this condition.
1511 self.logger.debug(
1512 "Invalid mempage-size %s. Will be ignored",
1513 extended.get("mempage-size"),
1514 )
1515
1516 for policy, hw_policy in policies.items():
1517 if extended.get(policy):
1518 extra_specs[hw_policy] = extended[policy].lower()
1519
1520 @staticmethod
1521 def _get_flavor_details(flavor_data: dict) -> Tuple:
1522 """Returns the details of flavor
1523 Args:
1524 flavor_data (dict): Dictionary that includes required flavor details
1525
1526 Returns:
1527 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1528
1529 """
1530 return (
1531 flavor_data.get("ram", 64),
1532 flavor_data.get("vcpus", 1),
1533 {},
1534 flavor_data.get("extended"),
1535 )
1536
1537 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1538 """Adds a tenant flavor to openstack VIM.
1539 if change_name_if_used is True, it will change name in case of conflict,
1540 because it is not supported name repetition.
1541
1542 Args:
1543 flavor_data (dict): Flavor details to be processed
1544 change_name_if_used (bool): Change name in case of conflict
1545
1546 Returns:
1547 flavor_id (str): flavor identifier
1548
1549 """
1550 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1551 retry = 0
1552 max_retries = 3
1553 name_suffix = 0
1554
1555 try:
1556 name = flavor_data["name"]
1557 while retry < max_retries:
1558 retry += 1
1559 try:
1560 self._reload_connection()
1561
1562 if change_name_if_used:
1563 name = self._change_flavor_name(name, name_suffix, flavor_data)
1564
1565 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1566 flavor_data
1567 )
1568 if extended:
1569 self._process_extended_config_of_flavor(extended, extra_specs)
1570
1571 # Create flavor
1572
1573 new_flavor = self.nova.flavors.create(
1574 name=name,
1575 ram=ram,
1576 vcpus=vcpus,
1577 disk=flavor_data.get("disk", 0),
1578 ephemeral=flavor_data.get("ephemeral", 0),
1579 swap=flavor_data.get("swap", 0),
1580 is_public=flavor_data.get("is_public", True),
1581 )
1582
1583 # Add metadata
1584 if extra_specs:
1585 new_flavor.set_keys(extra_specs)
1586
1587 return new_flavor.id
1588
1589 except nvExceptions.Conflict as e:
1590 if change_name_if_used and retry < max_retries:
1591 continue
1592
1593 self._format_exception(e)
1594
1595 except (
1596 ksExceptions.ClientException,
1597 nvExceptions.ClientException,
1598 ConnectionError,
1599 KeyError,
1600 ) as e:
1601 self._format_exception(e)
1602
1603 def delete_flavor(self, flavor_id):
1604 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1605 try:
1606 self._reload_connection()
1607 self.nova.flavors.delete(flavor_id)
1608
1609 return flavor_id
1610 # except nvExceptions.BadRequest as e:
1611 except (
1612 nvExceptions.NotFound,
1613 ksExceptions.ClientException,
1614 nvExceptions.ClientException,
1615 ConnectionError,
1616 ) as e:
1617 self._format_exception(e)
1618
1619 def new_image(self, image_dict):
1620 """
1621 Adds a tenant image to VIM. imge_dict is a dictionary with:
1622 name: name
1623 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1624 location: path or URI
1625 public: "yes" or "no"
1626 metadata: metadata of the image
1627 Returns the image_id
1628 """
1629 retry = 0
1630 max_retries = 3
1631
1632 while retry < max_retries:
1633 retry += 1
1634 try:
1635 self._reload_connection()
1636
1637 # determine format http://docs.openstack.org/developer/glance/formats.html
1638 if "disk_format" in image_dict:
1639 disk_format = image_dict["disk_format"]
1640 else: # autodiscover based on extension
1641 if image_dict["location"].endswith(".qcow2"):
1642 disk_format = "qcow2"
1643 elif image_dict["location"].endswith(".vhd"):
1644 disk_format = "vhd"
1645 elif image_dict["location"].endswith(".vmdk"):
1646 disk_format = "vmdk"
1647 elif image_dict["location"].endswith(".vdi"):
1648 disk_format = "vdi"
1649 elif image_dict["location"].endswith(".iso"):
1650 disk_format = "iso"
1651 elif image_dict["location"].endswith(".aki"):
1652 disk_format = "aki"
1653 elif image_dict["location"].endswith(".ari"):
1654 disk_format = "ari"
1655 elif image_dict["location"].endswith(".ami"):
1656 disk_format = "ami"
1657 else:
1658 disk_format = "raw"
1659
1660 self.logger.debug(
1661 "new_image: '%s' loading from '%s'",
1662 image_dict["name"],
1663 image_dict["location"],
1664 )
1665 if self.vim_type == "VIO":
1666 container_format = "bare"
1667 if "container_format" in image_dict:
1668 container_format = image_dict["container_format"]
1669
1670 new_image = self.glance.images.create(
1671 name=image_dict["name"],
1672 container_format=container_format,
1673 disk_format=disk_format,
1674 )
1675 else:
1676 new_image = self.glance.images.create(name=image_dict["name"])
1677
1678 if image_dict["location"].startswith("http"):
1679 # TODO there is not a method to direct download. It must be downloaded locally with requests
1680 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1681 else: # local path
1682 with open(image_dict["location"]) as fimage:
1683 self.glance.images.upload(new_image.id, fimage)
1684 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1685 # image_dict.get("public","yes")=="yes",
1686 # container_format="bare", data=fimage, disk_format=disk_format)
1687
1688 metadata_to_load = image_dict.get("metadata")
1689
1690 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1691 # for openstack
1692 if self.vim_type == "VIO":
1693 metadata_to_load["upload_location"] = image_dict["location"]
1694 else:
1695 metadata_to_load["location"] = image_dict["location"]
1696
1697 self.glance.images.update(new_image.id, **metadata_to_load)
1698
1699 return new_image.id
1700 except (
1701 nvExceptions.Conflict,
1702 ksExceptions.ClientException,
1703 nvExceptions.ClientException,
1704 ) as e:
1705 self._format_exception(e)
1706 except (
1707 HTTPException,
1708 gl1Exceptions.HTTPException,
1709 gl1Exceptions.CommunicationError,
1710 ConnectionError,
1711 ) as e:
1712 if retry == max_retries:
1713 continue
1714
1715 self._format_exception(e)
1716 except IOError as e: # can not open the file
1717 raise vimconn.VimConnConnectionException(
1718 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1719 http_code=vimconn.HTTP_Bad_Request,
1720 )
1721
1722 def delete_image(self, image_id):
1723 """Deletes a tenant image from openstack VIM. Returns the old id"""
1724 try:
1725 self._reload_connection()
1726 self.glance.images.delete(image_id)
1727
1728 return image_id
1729 except (
1730 nvExceptions.NotFound,
1731 ksExceptions.ClientException,
1732 nvExceptions.ClientException,
1733 gl1Exceptions.CommunicationError,
1734 gl1Exceptions.HTTPNotFound,
1735 ConnectionError,
1736 ) as e: # TODO remove
1737 self._format_exception(e)
1738
1739 def get_image_id_from_path(self, path):
1740 """Get the image id from image path in the VIM database. Returns the image_id"""
1741 try:
1742 self._reload_connection()
1743 images = self.glance.images.list()
1744
1745 for image in images:
1746 if image.metadata.get("location") == path:
1747 return image.id
1748
1749 raise vimconn.VimConnNotFoundException(
1750 "image with location '{}' not found".format(path)
1751 )
1752 except (
1753 ksExceptions.ClientException,
1754 nvExceptions.ClientException,
1755 gl1Exceptions.CommunicationError,
1756 ConnectionError,
1757 ) as e:
1758 self._format_exception(e)
1759
1760 def get_image_list(self, filter_dict={}):
1761 """Obtain tenant images from VIM
1762 Filter_dict can be:
1763 id: image id
1764 name: image name
1765 checksum: image checksum
1766 Returns the image list of dictionaries:
1767 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1768 List can be empty
1769 """
1770 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1771
1772 try:
1773 self._reload_connection()
1774 # filter_dict_os = filter_dict.copy()
1775 # First we filter by the available filter fields: name, id. The others are removed.
1776 image_list = self.glance.images.list()
1777 filtered_list = []
1778
1779 for image in image_list:
1780 try:
1781 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1782 continue
1783
1784 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1785 continue
1786
1787 if (
1788 filter_dict.get("checksum")
1789 and image["checksum"] != filter_dict["checksum"]
1790 ):
1791 continue
1792
1793 filtered_list.append(image.copy())
1794 except gl1Exceptions.HTTPNotFound:
1795 pass
1796
1797 return filtered_list
1798 except (
1799 ksExceptions.ClientException,
1800 nvExceptions.ClientException,
1801 gl1Exceptions.CommunicationError,
1802 ConnectionError,
1803 ) as e:
1804 self._format_exception(e)
1805
1806 def __wait_for_vm(self, vm_id, status):
1807 """wait until vm is in the desired status and return True.
1808 If the VM gets in ERROR status, return false.
1809 If the timeout is reached generate an exception"""
1810 elapsed_time = 0
1811 while elapsed_time < server_timeout:
1812 vm_status = self.nova.servers.get(vm_id).status
1813
1814 if vm_status == status:
1815 return True
1816
1817 if vm_status == "ERROR":
1818 return False
1819
1820 time.sleep(5)
1821 elapsed_time += 5
1822
1823 # if we exceeded the timeout rollback
1824 if elapsed_time >= server_timeout:
1825 raise vimconn.VimConnException(
1826 "Timeout waiting for instance " + vm_id + " to get " + status,
1827 http_code=vimconn.HTTP_Request_Timeout,
1828 )
1829
1830 def _get_openstack_availablity_zones(self):
1831 """
1832 Get from openstack availability zones available
1833 :return:
1834 """
1835 try:
1836 openstack_availability_zone = self.nova.availability_zones.list()
1837 openstack_availability_zone = [
1838 str(zone.zoneName)
1839 for zone in openstack_availability_zone
1840 if zone.zoneName != "internal"
1841 ]
1842
1843 return openstack_availability_zone
1844 except Exception:
1845 return None
1846
1847 def _set_availablity_zones(self):
1848 """
1849 Set vim availablity zone
1850 :return:
1851 """
1852 if "availability_zone" in self.config:
1853 vim_availability_zones = self.config.get("availability_zone")
1854
1855 if isinstance(vim_availability_zones, str):
1856 self.availability_zone = [vim_availability_zones]
1857 elif isinstance(vim_availability_zones, list):
1858 self.availability_zone = vim_availability_zones
1859 else:
1860 self.availability_zone = self._get_openstack_availablity_zones()
1861
1862 def _get_vm_availability_zone(
1863 self, availability_zone_index, availability_zone_list
1864 ):
1865 """
1866 Return thge availability zone to be used by the created VM.
1867 :return: The VIM availability zone to be used or None
1868 """
1869 if availability_zone_index is None:
1870 if not self.config.get("availability_zone"):
1871 return None
1872 elif isinstance(self.config.get("availability_zone"), str):
1873 return self.config["availability_zone"]
1874 else:
1875 # TODO consider using a different parameter at config for default AV and AV list match
1876 return self.config["availability_zone"][0]
1877
1878 vim_availability_zones = self.availability_zone
1879 # check if VIM offer enough availability zones describe in the VNFD
1880 if vim_availability_zones and len(availability_zone_list) <= len(
1881 vim_availability_zones
1882 ):
1883 # check if all the names of NFV AV match VIM AV names
1884 match_by_index = False
1885 for av in availability_zone_list:
1886 if av not in vim_availability_zones:
1887 match_by_index = True
1888 break
1889
1890 if match_by_index:
1891 return vim_availability_zones[availability_zone_index]
1892 else:
1893 return availability_zone_list[availability_zone_index]
1894 else:
1895 raise vimconn.VimConnConflictException(
1896 "No enough availability zones at VIM for this deployment"
1897 )
1898
1899 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1900 """Fill up the security_groups in the port_dict.
1901
1902 Args:
1903 net (dict): Network details
1904 port_dict (dict): Port details
1905
1906 """
1907 if (
1908 self.config.get("security_groups")
1909 and net.get("port_security") is not False
1910 and not self.config.get("no_port_security_extension")
1911 ):
1912 if not self.security_groups_id:
1913 self._get_ids_from_name()
1914
1915 port_dict["security_groups"] = self.security_groups_id
1916
1917 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1918 """Fill up the network binding depending on network type in the port_dict.
1919
1920 Args:
1921 net (dict): Network details
1922 port_dict (dict): Port details
1923
1924 """
1925 if not net.get("type"):
1926 raise vimconn.VimConnException("Type is missing in the network details.")
1927
1928 if net["type"] == "virtual":
1929 pass
1930
1931 # For VF
1932 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1933 port_dict["binding:vnic_type"] = "direct"
1934
1935 # VIO specific Changes
1936 if self.vim_type == "VIO":
1937 # Need to create port with port_security_enabled = False and no-security-groups
1938 port_dict["port_security_enabled"] = False
1939 port_dict["provider_security_groups"] = []
1940 port_dict["security_groups"] = []
1941
1942 else:
1943 # For PT PCI-PASSTHROUGH
1944 port_dict["binding:vnic_type"] = "direct-physical"
1945
1946 @staticmethod
1947 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1948 """Set the "ip" parameter in net dictionary.
1949
1950 Args:
1951 new_port (dict): New created port
1952 net (dict): Network details
1953
1954 """
1955 fixed_ips = new_port["port"].get("fixed_ips")
1956
1957 if fixed_ips:
1958 net["ip"] = fixed_ips[0].get("ip_address")
1959 else:
1960 net["ip"] = None
1961
1962 @staticmethod
1963 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1964 """Fill up the mac_address and fixed_ips in port_dict.
1965
1966 Args:
1967 net (dict): Network details
1968 port_dict (dict): Port details
1969
1970 """
1971 if net.get("mac_address"):
1972 port_dict["mac_address"] = net["mac_address"]
1973
1974 ip_dual_list = []
1975 if ip_list := net.get("ip_address"):
1976 if not isinstance(ip_list, list):
1977 ip_list = [ip_list]
1978 for ip in ip_list:
1979 ip_dict = {"ip_address": ip}
1980 ip_dual_list.append(ip_dict)
1981 port_dict["fixed_ips"] = ip_dual_list
1982 # TODO add "subnet_id": <subnet_id>
1983
1984 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1985 """Create new port using neutron.
1986
1987 Args:
1988 port_dict (dict): Port details
1989 created_items (dict): All created items
1990 net (dict): Network details
1991
1992 Returns:
1993 new_port (dict): New created port
1994
1995 """
1996 new_port = self.neutron.create_port({"port": port_dict})
1997 created_items["port:" + str(new_port["port"]["id"])] = True
1998 net["mac_address"] = new_port["port"]["mac_address"]
1999 net["vim_id"] = new_port["port"]["id"]
2000
2001 return new_port
2002
2003 def _create_port(
2004 self, net: dict, name: str, created_items: dict
2005 ) -> Tuple[dict, dict]:
2006 """Create port using net details.
2007
2008 Args:
2009 net (dict): Network details
2010 name (str): Name to be used as network name if net dict does not include name
2011 created_items (dict): All created items
2012
2013 Returns:
2014 new_port, port New created port, port dictionary
2015
2016 """
2017
2018 port_dict = {
2019 "network_id": net["net_id"],
2020 "name": net.get("name"),
2021 "admin_state_up": True,
2022 }
2023
2024 if not port_dict["name"]:
2025 port_dict["name"] = name
2026
2027 self._prepare_port_dict_security_groups(net, port_dict)
2028
2029 self._prepare_port_dict_binding(net, port_dict)
2030
2031 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
2032
2033 new_port = self._create_new_port(port_dict, created_items, net)
2034
2035 vimconnector._set_fixed_ip(new_port, net)
2036
2037 port = {"port-id": new_port["port"]["id"]}
2038
2039 if float(self.nova.api_version.get_string()) >= 2.32:
2040 port["tag"] = new_port["port"]["name"]
2041
2042 return new_port, port
2043
2044 def _prepare_network_for_vminstance(
2045 self,
2046 name: str,
2047 net_list: list,
2048 created_items: dict,
2049 net_list_vim: list,
2050 external_network: list,
2051 no_secured_ports: list,
2052 ) -> None:
2053 """Create port and fill up net dictionary for new VM instance creation.
2054
2055 Args:
2056 name (str): Name of network
2057 net_list (list): List of networks
2058 created_items (dict): All created items belongs to a VM
2059 net_list_vim (list): List of ports
2060 external_network (list): List of external-networks
2061 no_secured_ports (list): Port security disabled ports
2062 """
2063
2064 self._reload_connection()
2065
2066 for net in net_list:
2067 # Skip non-connected iface
2068 if not net.get("net_id"):
2069 continue
2070
2071 new_port, port = self._create_port(net, name, created_items)
2072
2073 net_list_vim.append(port)
2074
2075 if net.get("floating_ip", False):
2076 net["exit_on_floating_ip_error"] = True
2077 external_network.append(net)
2078
2079 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2080 net["exit_on_floating_ip_error"] = False
2081 external_network.append(net)
2082 net["floating_ip"] = self.config.get("use_floating_ip")
2083
2084 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2085 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2086 if net.get("port_security") is False and not self.config.get(
2087 "no_port_security_extension"
2088 ):
2089 no_secured_ports.append(
2090 (
2091 new_port["port"]["id"],
2092 net.get("port_security_disable_strategy"),
2093 )
2094 )
2095
2096 def _prepare_persistent_root_volumes(
2097 self,
2098 name: str,
2099 vm_av_zone: list,
2100 disk: dict,
2101 base_disk_index: int,
2102 block_device_mapping: dict,
2103 existing_vim_volumes: list,
2104 created_items: dict,
2105 ) -> Optional[str]:
2106 """Prepare persistent root volumes for new VM instance.
2107
2108 Args:
2109 name (str): Name of VM instance
2110 vm_av_zone (list): List of availability zones
2111 disk (dict): Disk details
2112 base_disk_index (int): Disk index
2113 block_device_mapping (dict): Block device details
2114 existing_vim_volumes (list): Existing disk details
2115 created_items (dict): All created items belongs to VM
2116
2117 Returns:
2118 boot_volume_id (str): ID of boot volume
2119
2120 """
2121 # Disk may include only vim_volume_id or only vim_id."
2122 # Use existing persistent root volume finding with volume_id or vim_id
2123 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2124
2125 if disk.get(key_id):
2126 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2127 existing_vim_volumes.append({"id": disk[key_id]})
2128
2129 else:
2130 # Create persistent root volume
2131 volume = self.cinder.volumes.create(
2132 size=disk["size"],
2133 name=name + "vd" + chr(base_disk_index),
2134 imageRef=disk["image_id"],
2135 # Make sure volume is in the same AZ as the VM to be attached to
2136 availability_zone=vm_av_zone,
2137 )
2138 boot_volume_id = volume.id
2139 self.update_block_device_mapping(
2140 volume=volume,
2141 block_device_mapping=block_device_mapping,
2142 base_disk_index=base_disk_index,
2143 disk=disk,
2144 created_items=created_items,
2145 )
2146
2147 return boot_volume_id
2148
2149 @staticmethod
2150 def update_block_device_mapping(
2151 volume: object,
2152 block_device_mapping: dict,
2153 base_disk_index: int,
2154 disk: dict,
2155 created_items: dict,
2156 ) -> None:
2157 """Add volume information to block device mapping dict.
2158 Args:
2159 volume (object): Created volume object
2160 block_device_mapping (dict): Block device details
2161 base_disk_index (int): Disk index
2162 disk (dict): Disk details
2163 created_items (dict): All created items belongs to VM
2164 """
2165 if not volume:
2166 raise vimconn.VimConnException("Volume is empty.")
2167
2168 if not hasattr(volume, "id"):
2169 raise vimconn.VimConnException(
2170 "Created volume is not valid, does not have id attribute."
2171 )
2172
2173 volume_txt = "volume:" + str(volume.id)
2174 if disk.get("keep"):
2175 volume_txt += ":keep"
2176 created_items[volume_txt] = True
2177 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2178
2179 def new_shared_volumes(self, shared_volume_data) -> (str, str):
2180 try:
2181 volume = self.cinder.volumes.create(
2182 size=shared_volume_data["size"],
2183 name=shared_volume_data["name"],
2184 volume_type="multiattach",
2185 )
2186 return (volume.name, volume.id)
2187 except (ConnectionError, KeyError) as e:
2188 self._format_exception(e)
2189
2190 def _prepare_shared_volumes(
2191 self,
2192 name: str,
2193 disk: dict,
2194 base_disk_index: int,
2195 block_device_mapping: dict,
2196 existing_vim_volumes: list,
2197 created_items: dict,
2198 ):
2199 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
2200 if volumes.get(disk["name"]):
2201 sv_id = volumes[disk["name"]]
2202 volume = self.cinder.volumes.get(sv_id)
2203 self.update_block_device_mapping(
2204 volume=volume,
2205 block_device_mapping=block_device_mapping,
2206 base_disk_index=base_disk_index,
2207 disk=disk,
2208 created_items=created_items,
2209 )
2210
2211 def _prepare_non_root_persistent_volumes(
2212 self,
2213 name: str,
2214 disk: dict,
2215 vm_av_zone: list,
2216 block_device_mapping: dict,
2217 base_disk_index: int,
2218 existing_vim_volumes: list,
2219 created_items: dict,
2220 ) -> None:
2221 """Prepare persistent volumes for new VM instance.
2222
2223 Args:
2224 name (str): Name of VM instance
2225 disk (dict): Disk details
2226 vm_av_zone (list): List of availability zones
2227 block_device_mapping (dict): Block device details
2228 base_disk_index (int): Disk index
2229 existing_vim_volumes (list): Existing disk details
2230 created_items (dict): All created items belongs to VM
2231 """
2232 # Non-root persistent volumes
2233 # Disk may include only vim_volume_id or only vim_id."
2234 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2235 if disk.get(key_id):
2236 # Use existing persistent volume
2237 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2238 existing_vim_volumes.append({"id": disk[key_id]})
2239 else:
2240 volume_name = f"{name}vd{chr(base_disk_index)}"
2241 volume = self.cinder.volumes.create(
2242 size=disk["size"],
2243 name=volume_name,
2244 # Make sure volume is in the same AZ as the VM to be attached to
2245 availability_zone=vm_av_zone,
2246 )
2247 self.update_block_device_mapping(
2248 volume=volume,
2249 block_device_mapping=block_device_mapping,
2250 base_disk_index=base_disk_index,
2251 disk=disk,
2252 created_items=created_items,
2253 )
2254
2255 def _wait_for_created_volumes_availability(
2256 self, elapsed_time: int, created_items: dict
2257 ) -> Optional[int]:
2258 """Wait till created volumes become available.
2259
2260 Args:
2261 elapsed_time (int): Passed time while waiting
2262 created_items (dict): All created items belongs to VM
2263
2264 Returns:
2265 elapsed_time (int): Time spent while waiting
2266
2267 """
2268 while elapsed_time < volume_timeout:
2269 for created_item in created_items:
2270 v, volume_id = (
2271 created_item.split(":")[0],
2272 created_item.split(":")[1],
2273 )
2274 if v == "volume":
2275 volume = self.cinder.volumes.get(volume_id)
2276 if (
2277 volume.volume_type == "multiattach"
2278 and volume.status == "in-use"
2279 ):
2280 return elapsed_time
2281 elif volume.status != "available":
2282 break
2283 else:
2284 # All ready: break from while
2285 break
2286
2287 time.sleep(5)
2288 elapsed_time += 5
2289
2290 return elapsed_time
2291
2292 def _wait_for_existing_volumes_availability(
2293 self, elapsed_time: int, existing_vim_volumes: list
2294 ) -> Optional[int]:
2295 """Wait till existing volumes become available.
2296
2297 Args:
2298 elapsed_time (int): Passed time while waiting
2299 existing_vim_volumes (list): Existing volume details
2300
2301 Returns:
2302 elapsed_time (int): Time spent while waiting
2303
2304 """
2305
2306 while elapsed_time < volume_timeout:
2307 for volume in existing_vim_volumes:
2308 v = self.cinder.volumes.get(volume["id"])
2309 if v.volume_type == "multiattach" and v.status == "in-use":
2310 return elapsed_time
2311 elif v.status != "available":
2312 break
2313 else: # all ready: break from while
2314 break
2315
2316 time.sleep(5)
2317 elapsed_time += 5
2318
2319 return elapsed_time
2320
2321 def _prepare_disk_for_vminstance(
2322 self,
2323 name: str,
2324 existing_vim_volumes: list,
2325 created_items: dict,
2326 vm_av_zone: list,
2327 block_device_mapping: dict,
2328 disk_list: list = None,
2329 ) -> None:
2330 """Prepare all volumes for new VM instance.
2331
2332 Args:
2333 name (str): Name of Instance
2334 existing_vim_volumes (list): List of existing volumes
2335 created_items (dict): All created items belongs to VM
2336 vm_av_zone (list): VM availability zone
2337 block_device_mapping (dict): Block devices to be attached to VM
2338 disk_list (list): List of disks
2339
2340 """
2341 # Create additional volumes in case these are present in disk_list
2342 base_disk_index = ord("b")
2343 boot_volume_id = None
2344 elapsed_time = 0
2345 for disk in disk_list:
2346 if "image_id" in disk:
2347 # Root persistent volume
2348 base_disk_index = ord("a")
2349 boot_volume_id = self._prepare_persistent_root_volumes(
2350 name=name,
2351 vm_av_zone=vm_av_zone,
2352 disk=disk,
2353 base_disk_index=base_disk_index,
2354 block_device_mapping=block_device_mapping,
2355 existing_vim_volumes=existing_vim_volumes,
2356 created_items=created_items,
2357 )
2358 elif disk.get("multiattach"):
2359 self._prepare_shared_volumes(
2360 name=name,
2361 disk=disk,
2362 base_disk_index=base_disk_index,
2363 block_device_mapping=block_device_mapping,
2364 existing_vim_volumes=existing_vim_volumes,
2365 created_items=created_items,
2366 )
2367 else:
2368 # Non-root persistent volume
2369 self._prepare_non_root_persistent_volumes(
2370 name=name,
2371 disk=disk,
2372 vm_av_zone=vm_av_zone,
2373 block_device_mapping=block_device_mapping,
2374 base_disk_index=base_disk_index,
2375 existing_vim_volumes=existing_vim_volumes,
2376 created_items=created_items,
2377 )
2378 base_disk_index += 1
2379
2380 # Wait until created volumes are with status available
2381 elapsed_time = self._wait_for_created_volumes_availability(
2382 elapsed_time, created_items
2383 )
2384 # Wait until existing volumes in vim are with status available
2385 elapsed_time = self._wait_for_existing_volumes_availability(
2386 elapsed_time, existing_vim_volumes
2387 )
2388 # If we exceeded the timeout rollback
2389 if elapsed_time >= volume_timeout:
2390 raise vimconn.VimConnException(
2391 "Timeout creating volumes for instance " + name,
2392 http_code=vimconn.HTTP_Request_Timeout,
2393 )
2394 if boot_volume_id:
2395 self.cinder.volumes.set_bootable(boot_volume_id, True)
2396
2397 def _find_the_external_network_for_floating_ip(self):
2398 """Get the external network ip in order to create floating IP.
2399
2400 Returns:
2401 pool_id (str): External network pool ID
2402
2403 """
2404
2405 # Find the external network
2406 external_nets = list()
2407
2408 for net in self.neutron.list_networks()["networks"]:
2409 if net["router:external"]:
2410 external_nets.append(net)
2411
2412 if len(external_nets) == 0:
2413 raise vimconn.VimConnException(
2414 "Cannot create floating_ip automatically since "
2415 "no external network is present",
2416 http_code=vimconn.HTTP_Conflict,
2417 )
2418
2419 if len(external_nets) > 1:
2420 raise vimconn.VimConnException(
2421 "Cannot create floating_ip automatically since "
2422 "multiple external networks are present",
2423 http_code=vimconn.HTTP_Conflict,
2424 )
2425
2426 # Pool ID
2427 return external_nets[0].get("id")
2428
2429 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2430 """Trigger neutron to create a new floating IP using external network ID.
2431
2432 Args:
2433 param (dict): Input parameters to create a floating IP
2434 created_items (dict): All created items belongs to new VM instance
2435
2436 Raises:
2437
2438 VimConnException
2439 """
2440 try:
2441 self.logger.debug("Creating floating IP")
2442 new_floating_ip = self.neutron.create_floatingip(param)
2443 free_floating_ip = new_floating_ip["floatingip"]["id"]
2444 created_items["floating_ip:" + str(free_floating_ip)] = True
2445
2446 except Exception as e:
2447 raise vimconn.VimConnException(
2448 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2449 http_code=vimconn.HTTP_Conflict,
2450 )
2451
2452 def _create_floating_ip(
2453 self, floating_network: dict, server: object, created_items: dict
2454 ) -> None:
2455 """Get the available Pool ID and create a new floating IP.
2456
2457 Args:
2458 floating_network (dict): Dict including external network ID
2459 server (object): Server object
2460 created_items (dict): All created items belongs to new VM instance
2461
2462 """
2463
2464 # Pool_id is available
2465 if (
2466 isinstance(floating_network["floating_ip"], str)
2467 and floating_network["floating_ip"].lower() != "true"
2468 ):
2469 pool_id = floating_network["floating_ip"]
2470
2471 # Find the Pool_id
2472 else:
2473 pool_id = self._find_the_external_network_for_floating_ip()
2474
2475 param = {
2476 "floatingip": {
2477 "floating_network_id": pool_id,
2478 "tenant_id": server.tenant_id,
2479 }
2480 }
2481
2482 self._neutron_create_float_ip(param, created_items)
2483
2484 def _find_floating_ip(
2485 self,
2486 server: object,
2487 floating_ips: list,
2488 floating_network: dict,
2489 ) -> Optional[str]:
2490 """Find the available free floating IPs if there are.
2491
2492 Args:
2493 server (object): Server object
2494 floating_ips (list): List of floating IPs
2495 floating_network (dict): Details of floating network such as ID
2496
2497 Returns:
2498 free_floating_ip (str): Free floating ip address
2499
2500 """
2501 for fip in floating_ips:
2502 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2503 continue
2504
2505 if isinstance(floating_network["floating_ip"], str):
2506 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2507 continue
2508
2509 return fip["id"]
2510
2511 def _assign_floating_ip(
2512 self, free_floating_ip: str, floating_network: dict
2513 ) -> Dict:
2514 """Assign the free floating ip address to port.
2515
2516 Args:
2517 free_floating_ip (str): Floating IP to be assigned
2518 floating_network (dict): ID of floating network
2519
2520 Returns:
2521 fip (dict) (dict): Floating ip details
2522
2523 """
2524 # The vim_id key contains the neutron.port_id
2525 self.neutron.update_floatingip(
2526 free_floating_ip,
2527 {"floatingip": {"port_id": floating_network["vim_id"]}},
2528 )
2529 # For race condition ensure not re-assigned to other VM after 5 seconds
2530 time.sleep(5)
2531
2532 return self.neutron.show_floatingip(free_floating_ip)
2533
2534 def _get_free_floating_ip(
2535 self, server: object, floating_network: dict
2536 ) -> Optional[str]:
2537 """Get the free floating IP address.
2538
2539 Args:
2540 server (object): Server Object
2541 floating_network (dict): Floating network details
2542
2543 Returns:
2544 free_floating_ip (str): Free floating ip addr
2545
2546 """
2547
2548 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2549
2550 # Randomize
2551 random.shuffle(floating_ips)
2552
2553 return self._find_floating_ip(server, floating_ips, floating_network)
2554
2555 def _prepare_external_network_for_vminstance(
2556 self,
2557 external_network: list,
2558 server: object,
2559 created_items: dict,
2560 vm_start_time: float,
2561 ) -> None:
2562 """Assign floating IP address for VM instance.
2563
2564 Args:
2565 external_network (list): ID of External network
2566 server (object): Server Object
2567 created_items (dict): All created items belongs to new VM instance
2568 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2569
2570 Raises:
2571 VimConnException
2572
2573 """
2574 for floating_network in external_network:
2575 try:
2576 assigned = False
2577 floating_ip_retries = 3
2578 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2579 # several times
2580 while not assigned:
2581 free_floating_ip = self._get_free_floating_ip(
2582 server, floating_network
2583 )
2584
2585 if not free_floating_ip:
2586 self._create_floating_ip(
2587 floating_network, server, created_items
2588 )
2589
2590 try:
2591 # For race condition ensure not already assigned
2592 fip = self.neutron.show_floatingip(free_floating_ip)
2593
2594 if fip["floatingip"].get("port_id"):
2595 continue
2596
2597 # Assign floating ip
2598 fip = self._assign_floating_ip(
2599 free_floating_ip, floating_network
2600 )
2601
2602 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2603 self.logger.warning(
2604 "floating_ip {} re-assigned to other port".format(
2605 free_floating_ip
2606 )
2607 )
2608 continue
2609
2610 self.logger.debug(
2611 "Assigned floating_ip {} to VM {}".format(
2612 free_floating_ip, server.id
2613 )
2614 )
2615
2616 assigned = True
2617
2618 except Exception as e:
2619 # Openstack need some time after VM creation to assign an IP. So retry if fails
2620 vm_status = self.nova.servers.get(server.id).status
2621
2622 if vm_status not in ("ACTIVE", "ERROR"):
2623 if time.time() - vm_start_time < server_timeout:
2624 time.sleep(5)
2625 continue
2626 elif floating_ip_retries > 0:
2627 floating_ip_retries -= 1
2628 continue
2629
2630 raise vimconn.VimConnException(
2631 "Cannot create floating_ip: {} {}".format(
2632 type(e).__name__, e
2633 ),
2634 http_code=vimconn.HTTP_Conflict,
2635 )
2636
2637 except Exception as e:
2638 if not floating_network["exit_on_floating_ip_error"]:
2639 self.logger.error("Cannot create floating_ip. %s", str(e))
2640 continue
2641
2642 raise
2643
2644 def _update_port_security_for_vminstance(
2645 self,
2646 no_secured_ports: list,
2647 server: object,
2648 ) -> None:
2649 """Updates the port security according to no_secured_ports list.
2650
2651 Args:
2652 no_secured_ports (list): List of ports that security will be disabled
2653 server (object): Server Object
2654
2655 Raises:
2656 VimConnException
2657
2658 """
2659 # Wait until the VM is active and then disable the port-security
2660 if no_secured_ports:
2661 self.__wait_for_vm(server.id, "ACTIVE")
2662
2663 for port in no_secured_ports:
2664 port_update = {
2665 "port": {"port_security_enabled": False, "security_groups": None}
2666 }
2667
2668 if port[1] == "allow-address-pairs":
2669 port_update = {
2670 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2671 }
2672
2673 try:
2674 self.neutron.update_port(port[0], port_update)
2675
2676 except Exception:
2677 raise vimconn.VimConnException(
2678 "It was not possible to disable port security for port {}".format(
2679 port[0]
2680 )
2681 )
2682
2683 def new_vminstance(
2684 self,
2685 name: str,
2686 description: str,
2687 start: bool,
2688 image_id: str,
2689 flavor_id: str,
2690 affinity_group_list: list,
2691 net_list: list,
2692 cloud_config=None,
2693 disk_list=None,
2694 availability_zone_index=None,
2695 availability_zone_list=None,
2696 ) -> tuple:
2697 """Adds a VM instance to VIM.
2698
2699 Args:
2700 name (str): name of VM
2701 description (str): description
2702 start (bool): indicates if VM must start or boot in pause mode. Ignored
2703 image_id (str) image uuid
2704 flavor_id (str) flavor uuid
2705 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2706 net_list (list): list of interfaces, each one is a dictionary with:
2707 name: name of network
2708 net_id: network uuid to connect
2709 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2710 model: interface model, ignored #TODO
2711 mac_address: used for SR-IOV ifaces #TODO for other types
2712 use: 'data', 'bridge', 'mgmt'
2713 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2714 vim_id: filled/added by this function
2715 floating_ip: True/False (or it can be None)
2716 port_security: True/False
2717 cloud_config (dict): (optional) dictionary with:
2718 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2719 users: (optional) list of users to be inserted, each item is a dict with:
2720 name: (mandatory) user name,
2721 key-pairs: (optional) list of strings with the public key to be inserted to the user
2722 user-data: (optional) string is a text script to be passed directly to cloud-init
2723 config-files: (optional). List of files to be transferred. Each item is a dict with:
2724 dest: (mandatory) string with the destination absolute path
2725 encoding: (optional, by default text). Can be one of:
2726 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2727 content : (mandatory) string with the content of the file
2728 permissions: (optional) string with file permissions, typically octal notation '0644'
2729 owner: (optional) file owner, string with the format 'owner:group'
2730 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2731 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2732 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2733 size: (mandatory) string with the size of the disk in GB
2734 vim_id: (optional) should use this existing volume id
2735 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2736 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2737 availability_zone_index is None
2738 #TODO ip, security groups
2739
2740 Returns:
2741 A tuple with the instance identifier and created_items or raises an exception on error
2742 created_items can be None or a dictionary where this method can include key-values that will be passed to
2743 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2744 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2745 as not present.
2746
2747 """
2748 self.logger.debug(
2749 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2750 image_id,
2751 flavor_id,
2752 str(net_list),
2753 )
2754
2755 try:
2756 server = None
2757 created_items = {}
2758 net_list_vim = []
2759 # list of external networks to be connected to instance, later on used to create floating_ip
2760 external_network = []
2761 # List of ports with port-security disabled
2762 no_secured_ports = []
2763 block_device_mapping = {}
2764 existing_vim_volumes = []
2765 server_group_id = None
2766 scheduller_hints = {}
2767
2768 # Check the Openstack Connection
2769 self._reload_connection()
2770
2771 # Prepare network list
2772 self._prepare_network_for_vminstance(
2773 name=name,
2774 net_list=net_list,
2775 created_items=created_items,
2776 net_list_vim=net_list_vim,
2777 external_network=external_network,
2778 no_secured_ports=no_secured_ports,
2779 )
2780
2781 # Cloud config
2782 config_drive, userdata = self._create_user_data(cloud_config)
2783
2784 # Get availability Zone
2785 vm_av_zone = self._get_vm_availability_zone(
2786 availability_zone_index, availability_zone_list
2787 )
2788
2789 if disk_list:
2790 # Prepare disks
2791 self._prepare_disk_for_vminstance(
2792 name=name,
2793 existing_vim_volumes=existing_vim_volumes,
2794 created_items=created_items,
2795 vm_av_zone=vm_av_zone,
2796 block_device_mapping=block_device_mapping,
2797 disk_list=disk_list,
2798 )
2799
2800 if affinity_group_list:
2801 # Only first id on the list will be used. Openstack restriction
2802 server_group_id = affinity_group_list[0]["affinity_group_id"]
2803 scheduller_hints["group"] = server_group_id
2804
2805 self.logger.debug(
2806 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2807 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2808 "block_device_mapping={}, server_group={})".format(
2809 name,
2810 image_id,
2811 flavor_id,
2812 net_list_vim,
2813 self.config.get("security_groups"),
2814 vm_av_zone,
2815 self.config.get("keypair"),
2816 userdata,
2817 config_drive,
2818 block_device_mapping,
2819 server_group_id,
2820 )
2821 )
2822 # Create VM
2823 server = self.nova.servers.create(
2824 name=name,
2825 image=image_id,
2826 flavor=flavor_id,
2827 nics=net_list_vim,
2828 security_groups=self.config.get("security_groups"),
2829 # TODO remove security_groups in future versions. Already at neutron port
2830 availability_zone=vm_av_zone,
2831 key_name=self.config.get("keypair"),
2832 userdata=userdata,
2833 config_drive=config_drive,
2834 block_device_mapping=block_device_mapping,
2835 scheduler_hints=scheduller_hints,
2836 )
2837
2838 vm_start_time = time.time()
2839
2840 self._update_port_security_for_vminstance(no_secured_ports, server)
2841
2842 self._prepare_external_network_for_vminstance(
2843 external_network=external_network,
2844 server=server,
2845 created_items=created_items,
2846 vm_start_time=vm_start_time,
2847 )
2848
2849 return server.id, created_items
2850
2851 except Exception as e:
2852 server_id = None
2853 if server:
2854 server_id = server.id
2855
2856 try:
2857 created_items = self.remove_keep_tag_from_persistent_volumes(
2858 created_items
2859 )
2860
2861 self.delete_vminstance(server_id, created_items)
2862
2863 except Exception as e2:
2864 self.logger.error("new_vminstance rollback fail {}".format(e2))
2865
2866 self._format_exception(e)
2867
2868 @staticmethod
2869 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2870 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2871
2872 Args:
2873 created_items (dict): All created items belongs to VM
2874
2875 Returns:
2876 updated_created_items (dict): Dict which does not include keep flag for volumes.
2877
2878 """
2879 return {
2880 key.replace(":keep", ""): value for (key, value) in created_items.items()
2881 }
2882
2883 def get_vminstance(self, vm_id):
2884 """Returns the VM instance information from VIM"""
2885 return self._find_nova_server(vm_id)
2886
2887 def get_vminstance_console(self, vm_id, console_type="vnc"):
2888 """
2889 Get a console for the virtual machine
2890 Params:
2891 vm_id: uuid of the VM
2892 console_type, can be:
2893 "novnc" (by default), "xvpvnc" for VNC types,
2894 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2895 Returns dict with the console parameters:
2896 protocol: ssh, ftp, http, https, ...
2897 server: usually ip address
2898 port: the http, ssh, ... port
2899 suffix: extra text, e.g. the http path and query string
2900 """
2901 self.logger.debug("Getting VM CONSOLE from VIM")
2902
2903 try:
2904 self._reload_connection()
2905 server = self.nova.servers.find(id=vm_id)
2906
2907 if console_type is None or console_type == "novnc":
2908 console_dict = server.get_vnc_console("novnc")
2909 elif console_type == "xvpvnc":
2910 console_dict = server.get_vnc_console(console_type)
2911 elif console_type == "rdp-html5":
2912 console_dict = server.get_rdp_console(console_type)
2913 elif console_type == "spice-html5":
2914 console_dict = server.get_spice_console(console_type)
2915 else:
2916 raise vimconn.VimConnException(
2917 "console type '{}' not allowed".format(console_type),
2918 http_code=vimconn.HTTP_Bad_Request,
2919 )
2920
2921 console_dict1 = console_dict.get("console")
2922
2923 if console_dict1:
2924 console_url = console_dict1.get("url")
2925
2926 if console_url:
2927 # parse console_url
2928 protocol_index = console_url.find("//")
2929 suffix_index = (
2930 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2931 )
2932 port_index = (
2933 console_url[protocol_index + 2 : suffix_index].find(":")
2934 + protocol_index
2935 + 2
2936 )
2937
2938 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2939 return (
2940 -vimconn.HTTP_Internal_Server_Error,
2941 "Unexpected response from VIM",
2942 )
2943
2944 console_dict = {
2945 "protocol": console_url[0:protocol_index],
2946 "server": console_url[protocol_index + 2 : port_index],
2947 "port": console_url[port_index:suffix_index],
2948 "suffix": console_url[suffix_index + 1 :],
2949 }
2950 protocol_index += 2
2951
2952 return console_dict
2953 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2954 except (
2955 nvExceptions.NotFound,
2956 ksExceptions.ClientException,
2957 nvExceptions.ClientException,
2958 nvExceptions.BadRequest,
2959 ConnectionError,
2960 ) as e:
2961 self._format_exception(e)
2962
2963 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2964 """Neutron delete ports by id.
2965 Args:
2966 k_id (str): Port id in the VIM
2967 """
2968 try:
2969 port_dict = self.neutron.list_ports()
2970 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2971
2972 if k_id in existing_ports:
2973 self.neutron.delete_port(k_id)
2974
2975 except Exception as e:
2976 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2977
2978 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
2979 """Cinder delete volume by id.
2980 Args:
2981 shared_volume_vim_id (str): ID of shared volume in VIM
2982 """
2983 try:
2984 if self.cinder.volumes.get(shared_volume_vim_id).status != "available":
2985 return True
2986
2987 else:
2988 self.cinder.volumes.delete(shared_volume_vim_id)
2989
2990 except Exception as e:
2991 self.logger.error(
2992 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2993 )
2994
2995 def _delete_volumes_by_id_wth_cinder(
2996 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2997 ) -> bool:
2998 """Cinder delete volume by id.
2999 Args:
3000 k (str): Full item name in created_items
3001 k_id (str): ID of floating ip in VIM
3002 volumes_to_hold (list): Volumes not to delete
3003 created_items (dict): All created items belongs to VM
3004 """
3005 try:
3006 if k_id in volumes_to_hold:
3007 return
3008
3009 if self.cinder.volumes.get(k_id).status != "available":
3010 return True
3011
3012 else:
3013 self.cinder.volumes.delete(k_id)
3014 created_items[k] = None
3015
3016 except Exception as e:
3017 self.logger.error(
3018 "Error deleting volume: {}: {}".format(type(e).__name__, e)
3019 )
3020
3021 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
3022 """Neutron delete floating ip by id.
3023 Args:
3024 k (str): Full item name in created_items
3025 k_id (str): ID of floating ip in VIM
3026 created_items (dict): All created items belongs to VM
3027 """
3028 try:
3029 self.neutron.delete_floatingip(k_id)
3030 created_items[k] = None
3031
3032 except Exception as e:
3033 self.logger.error(
3034 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
3035 )
3036
3037 @staticmethod
3038 def _get_item_name_id(k: str) -> Tuple[str, str]:
3039 k_item, _, k_id = k.partition(":")
3040 return k_item, k_id
3041
3042 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
3043 """Delete VM ports attached to the networks before deleting virtual machine.
3044 Args:
3045 created_items (dict): All created items belongs to VM
3046 """
3047
3048 for k, v in created_items.items():
3049 if not v: # skip already deleted
3050 continue
3051
3052 try:
3053 k_item, k_id = self._get_item_name_id(k)
3054 if k_item == "port":
3055 self._delete_ports_by_id_wth_neutron(k_id)
3056
3057 except Exception as e:
3058 self.logger.error(
3059 "Error deleting port: {}: {}".format(type(e).__name__, e)
3060 )
3061
3062 def _delete_created_items(
3063 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
3064 ) -> bool:
3065 """Delete Volumes and floating ip if they exist in created_items."""
3066 for k, v in created_items.items():
3067 if not v: # skip already deleted
3068 continue
3069
3070 try:
3071 k_item, k_id = self._get_item_name_id(k)
3072 if k_item == "volume":
3073 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
3074 k, k_id, volumes_to_hold, created_items
3075 )
3076
3077 if unavailable_vol:
3078 keep_waiting = True
3079
3080 elif k_item == "floating_ip":
3081 self._delete_floating_ip_by_id(k, k_id, created_items)
3082
3083 except Exception as e:
3084 self.logger.error("Error deleting {}: {}".format(k, e))
3085
3086 return keep_waiting
3087
3088 @staticmethod
3089 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3090 """Remove the volumes which has key flag from created_items
3091
3092 Args:
3093 created_items (dict): All created items belongs to VM
3094
3095 Returns:
3096 created_items (dict): Persistent volumes eliminated created_items
3097 """
3098 return {
3099 key: value
3100 for (key, value) in created_items.items()
3101 if len(key.split(":")) == 2
3102 }
3103
3104 def delete_vminstance(
3105 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3106 ) -> None:
3107 """Removes a VM instance from VIM. Returns the old identifier.
3108 Args:
3109 vm_id (str): Identifier of VM instance
3110 created_items (dict): All created items belongs to VM
3111 volumes_to_hold (list): Volumes_to_hold
3112 """
3113 if created_items is None:
3114 created_items = {}
3115 if volumes_to_hold is None:
3116 volumes_to_hold = []
3117
3118 try:
3119 created_items = self._extract_items_wth_keep_flag_from_created_items(
3120 created_items
3121 )
3122
3123 self._reload_connection()
3124
3125 # Delete VM ports attached to the networks before the virtual machine
3126 if created_items:
3127 self._delete_vm_ports_attached_to_network(created_items)
3128
3129 if vm_id:
3130 self.nova.servers.delete(vm_id)
3131
3132 # Although having detached, volumes should have in active status before deleting.
3133 # We ensure in this loop
3134 keep_waiting = True
3135 elapsed_time = 0
3136
3137 while keep_waiting and elapsed_time < volume_timeout:
3138 keep_waiting = False
3139
3140 # Delete volumes and floating IP.
3141 keep_waiting = self._delete_created_items(
3142 created_items, volumes_to_hold, keep_waiting
3143 )
3144
3145 if keep_waiting:
3146 time.sleep(1)
3147 elapsed_time += 1
3148
3149 except (
3150 nvExceptions.NotFound,
3151 ksExceptions.ClientException,
3152 nvExceptions.ClientException,
3153 ConnectionError,
3154 ) as e:
3155 self._format_exception(e)
3156
3157 def refresh_vms_status(self, vm_list):
3158 """Get the status of the virtual machines and their interfaces/ports
3159 Params: the list of VM identifiers
3160 Returns a dictionary with:
3161 vm_id: #VIM id of this Virtual Machine
3162 status: #Mandatory. Text with one of:
3163 # DELETED (not found at vim)
3164 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3165 # OTHER (Vim reported other status not understood)
3166 # ERROR (VIM indicates an ERROR status)
3167 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3168 # CREATING (on building process), ERROR
3169 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3170 #
3171 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3172 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3173 interfaces:
3174 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3175 mac_address: #Text format XX:XX:XX:XX:XX:XX
3176 vim_net_id: #network id where this interface is connected
3177 vim_interface_id: #interface/port VIM id
3178 ip_address: #null, or text with IPv4, IPv6 address
3179 compute_node: #identification of compute node where PF,VF interface is allocated
3180 pci: #PCI address of the NIC that hosts the PF,VF
3181 vlan: #physical VLAN used for VF
3182 """
3183 vm_dict = {}
3184 self.logger.debug(
3185 "refresh_vms status: Getting tenant VM instance information from VIM"
3186 )
3187
3188 for vm_id in vm_list:
3189 vm = {}
3190
3191 try:
3192 vm_vim = self.get_vminstance(vm_id)
3193
3194 if vm_vim["status"] in vmStatus2manoFormat:
3195 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3196 else:
3197 vm["status"] = "OTHER"
3198 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3199
3200 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3201 vm_vim.pop("user_data", None)
3202 vm["vim_info"] = self.serialize(vm_vim)
3203
3204 vm["interfaces"] = []
3205 if vm_vim.get("fault"):
3206 vm["error_msg"] = str(vm_vim["fault"])
3207
3208 # get interfaces
3209 try:
3210 self._reload_connection()
3211 port_dict = self.neutron.list_ports(device_id=vm_id)
3212
3213 for port in port_dict["ports"]:
3214 interface = {}
3215 interface["vim_info"] = self.serialize(port)
3216 interface["mac_address"] = port.get("mac_address")
3217 interface["vim_net_id"] = port["network_id"]
3218 interface["vim_interface_id"] = port["id"]
3219 # check if OS-EXT-SRV-ATTR:host is there,
3220 # in case of non-admin credentials, it will be missing
3221
3222 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3223 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3224
3225 interface["pci"] = None
3226
3227 # check if binding:profile is there,
3228 # in case of non-admin credentials, it will be missing
3229 if port.get("binding:profile"):
3230 if port["binding:profile"].get("pci_slot"):
3231 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3232 # the slot to 0x00
3233 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3234 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3235 pci = port["binding:profile"]["pci_slot"]
3236 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3237 interface["pci"] = pci
3238
3239 interface["vlan"] = None
3240
3241 if port.get("binding:vif_details"):
3242 interface["vlan"] = port["binding:vif_details"].get("vlan")
3243
3244 # Get vlan from network in case not present in port for those old openstacks and cases where
3245 # it is needed vlan at PT
3246 if not interface["vlan"]:
3247 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3248 network = self.neutron.show_network(port["network_id"])
3249
3250 if (
3251 network["network"].get("provider:network_type")
3252 == "vlan"
3253 ):
3254 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3255 interface["vlan"] = network["network"].get(
3256 "provider:segmentation_id"
3257 )
3258
3259 ips = []
3260 # look for floating ip address
3261 try:
3262 floating_ip_dict = self.neutron.list_floatingips(
3263 port_id=port["id"]
3264 )
3265
3266 if floating_ip_dict.get("floatingips"):
3267 ips.append(
3268 floating_ip_dict["floatingips"][0].get(
3269 "floating_ip_address"
3270 )
3271 )
3272 except Exception:
3273 pass
3274
3275 for subnet in port["fixed_ips"]:
3276 ips.append(subnet["ip_address"])
3277
3278 interface["ip_address"] = ";".join(ips)
3279 vm["interfaces"].append(interface)
3280 except Exception as e:
3281 self.logger.error(
3282 "Error getting vm interface information {}: {}".format(
3283 type(e).__name__, e
3284 ),
3285 exc_info=True,
3286 )
3287 except vimconn.VimConnNotFoundException as e:
3288 self.logger.error("Exception getting vm status: %s", str(e))
3289 vm["status"] = "DELETED"
3290 vm["error_msg"] = str(e)
3291 except vimconn.VimConnException as e:
3292 self.logger.error("Exception getting vm status: %s", str(e))
3293 vm["status"] = "VIM_ERROR"
3294 vm["error_msg"] = str(e)
3295
3296 vm_dict[vm_id] = vm
3297
3298 return vm_dict
3299
3300 def action_vminstance(self, vm_id, action_dict, created_items={}):
3301 """Send and action over a VM instance from VIM
3302 Returns None or the console dict if the action was successfully sent to the VIM
3303 """
3304 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3305
3306 try:
3307 self._reload_connection()
3308 server = self.nova.servers.find(id=vm_id)
3309
3310 if "start" in action_dict:
3311 if action_dict["start"] == "rebuild":
3312 server.rebuild()
3313 else:
3314 if server.status == "PAUSED":
3315 server.unpause()
3316 elif server.status == "SUSPENDED":
3317 server.resume()
3318 elif server.status == "SHUTOFF":
3319 server.start()
3320 else:
3321 self.logger.debug(
3322 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3323 )
3324 raise vimconn.VimConnException(
3325 "Cannot 'start' instance while it is in active state",
3326 http_code=vimconn.HTTP_Bad_Request,
3327 )
3328
3329 elif "pause" in action_dict:
3330 server.pause()
3331 elif "resume" in action_dict:
3332 server.resume()
3333 elif "shutoff" in action_dict or "shutdown" in action_dict:
3334 self.logger.debug("server status %s", server.status)
3335 if server.status == "ACTIVE":
3336 server.stop()
3337 else:
3338 self.logger.debug("ERROR: VM is not in Active state")
3339 raise vimconn.VimConnException(
3340 "VM is not in active state, stop operation is not allowed",
3341 http_code=vimconn.HTTP_Bad_Request,
3342 )
3343 elif "forceOff" in action_dict:
3344 server.stop() # TODO
3345 elif "terminate" in action_dict:
3346 server.delete()
3347 elif "createImage" in action_dict:
3348 server.create_image()
3349 # "path":path_schema,
3350 # "description":description_schema,
3351 # "name":name_schema,
3352 # "metadata":metadata_schema,
3353 # "imageRef": id_schema,
3354 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3355 elif "rebuild" in action_dict:
3356 server.rebuild(server.image["id"])
3357 elif "reboot" in action_dict:
3358 server.reboot() # reboot_type="SOFT"
3359 elif "console" in action_dict:
3360 console_type = action_dict["console"]
3361
3362 if console_type is None or console_type == "novnc":
3363 console_dict = server.get_vnc_console("novnc")
3364 elif console_type == "xvpvnc":
3365 console_dict = server.get_vnc_console(console_type)
3366 elif console_type == "rdp-html5":
3367 console_dict = server.get_rdp_console(console_type)
3368 elif console_type == "spice-html5":
3369 console_dict = server.get_spice_console(console_type)
3370 else:
3371 raise vimconn.VimConnException(
3372 "console type '{}' not allowed".format(console_type),
3373 http_code=vimconn.HTTP_Bad_Request,
3374 )
3375
3376 try:
3377 console_url = console_dict["console"]["url"]
3378 # parse console_url
3379 protocol_index = console_url.find("//")
3380 suffix_index = (
3381 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3382 )
3383 port_index = (
3384 console_url[protocol_index + 2 : suffix_index].find(":")
3385 + protocol_index
3386 + 2
3387 )
3388
3389 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3390 raise vimconn.VimConnException(
3391 "Unexpected response from VIM " + str(console_dict)
3392 )
3393
3394 console_dict2 = {
3395 "protocol": console_url[0:protocol_index],
3396 "server": console_url[protocol_index + 2 : port_index],
3397 "port": int(console_url[port_index + 1 : suffix_index]),
3398 "suffix": console_url[suffix_index + 1 :],
3399 }
3400
3401 return console_dict2
3402 except Exception:
3403 raise vimconn.VimConnException(
3404 "Unexpected response from VIM " + str(console_dict)
3405 )
3406
3407 return None
3408 except (
3409 ksExceptions.ClientException,
3410 nvExceptions.ClientException,
3411 nvExceptions.NotFound,
3412 ConnectionError,
3413 ) as e:
3414 self._format_exception(e)
3415 # TODO insert exception vimconn.HTTP_Unauthorized
3416
3417 # ###### VIO Specific Changes #########
3418 def _generate_vlanID(self):
3419 """
3420 Method to get unused vlanID
3421 Args:
3422 None
3423 Returns:
3424 vlanID
3425 """
3426 # Get used VLAN IDs
3427 usedVlanIDs = []
3428 networks = self.get_network_list()
3429
3430 for net in networks:
3431 if net.get("provider:segmentation_id"):
3432 usedVlanIDs.append(net.get("provider:segmentation_id"))
3433
3434 used_vlanIDs = set(usedVlanIDs)
3435
3436 # find unused VLAN ID
3437 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3438 try:
3439 start_vlanid, end_vlanid = map(
3440 int, vlanID_range.replace(" ", "").split("-")
3441 )
3442
3443 for vlanID in range(start_vlanid, end_vlanid + 1):
3444 if vlanID not in used_vlanIDs:
3445 return vlanID
3446 except Exception as exp:
3447 raise vimconn.VimConnException(
3448 "Exception {} occurred while generating VLAN ID.".format(exp)
3449 )
3450 else:
3451 raise vimconn.VimConnConflictException(
3452 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3453 self.config.get("dataplane_net_vlan_range")
3454 )
3455 )
3456
3457 def _generate_multisegment_vlanID(self):
3458 """
3459 Method to get unused vlanID
3460 Args:
3461 None
3462 Returns:
3463 vlanID
3464 """
3465 # Get used VLAN IDs
3466 usedVlanIDs = []
3467 networks = self.get_network_list()
3468 for net in networks:
3469 if net.get("provider:network_type") == "vlan" and net.get(
3470 "provider:segmentation_id"
3471 ):
3472 usedVlanIDs.append(net.get("provider:segmentation_id"))
3473 elif net.get("segments"):
3474 for segment in net.get("segments"):
3475 if segment.get("provider:network_type") == "vlan" and segment.get(
3476 "provider:segmentation_id"
3477 ):
3478 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3479
3480 used_vlanIDs = set(usedVlanIDs)
3481
3482 # find unused VLAN ID
3483 for vlanID_range in self.config.get("multisegment_vlan_range"):
3484 try:
3485 start_vlanid, end_vlanid = map(
3486 int, vlanID_range.replace(" ", "").split("-")
3487 )
3488
3489 for vlanID in range(start_vlanid, end_vlanid + 1):
3490 if vlanID not in used_vlanIDs:
3491 return vlanID
3492 except Exception as exp:
3493 raise vimconn.VimConnException(
3494 "Exception {} occurred while generating VLAN ID.".format(exp)
3495 )
3496 else:
3497 raise vimconn.VimConnConflictException(
3498 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3499 self.config.get("multisegment_vlan_range")
3500 )
3501 )
3502
3503 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3504 """
3505 Method to validate user given vlanID ranges
3506 Args: None
3507 Returns: None
3508 """
3509 for vlanID_range in input_vlan_range:
3510 vlan_range = vlanID_range.replace(" ", "")
3511 # validate format
3512 vlanID_pattern = r"(\d)*-(\d)*$"
3513 match_obj = re.match(vlanID_pattern, vlan_range)
3514 if not match_obj:
3515 raise vimconn.VimConnConflictException(
3516 "Invalid VLAN range for {}: {}.You must provide "
3517 "'{}' in format [start_ID - end_ID].".format(
3518 text_vlan_range, vlanID_range, text_vlan_range
3519 )
3520 )
3521
3522 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3523 if start_vlanid <= 0:
3524 raise vimconn.VimConnConflictException(
3525 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3526 "networks valid IDs are 1 to 4094 ".format(
3527 text_vlan_range, vlanID_range
3528 )
3529 )
3530
3531 if end_vlanid > 4094:
3532 raise vimconn.VimConnConflictException(
3533 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3534 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3535 text_vlan_range, vlanID_range
3536 )
3537 )
3538
3539 if start_vlanid > end_vlanid:
3540 raise vimconn.VimConnConflictException(
3541 "Invalid VLAN range for {}: {}. You must provide '{}'"
3542 " in format start_ID - end_ID and start_ID < end_ID ".format(
3543 text_vlan_range, vlanID_range, text_vlan_range
3544 )
3545 )
3546
3547 def get_hosts_info(self):
3548 """Get the information of deployed hosts
3549 Returns the hosts content"""
3550 if self.debug:
3551 print("osconnector: Getting Host info from VIM")
3552
3553 try:
3554 h_list = []
3555 self._reload_connection()
3556 hypervisors = self.nova.hypervisors.list()
3557
3558 for hype in hypervisors:
3559 h_list.append(hype.to_dict())
3560
3561 return 1, {"hosts": h_list}
3562 except nvExceptions.NotFound as e:
3563 error_value = -vimconn.HTTP_Not_Found
3564 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3565 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3566 error_value = -vimconn.HTTP_Bad_Request
3567 error_text = (
3568 type(e).__name__
3569 + ": "
3570 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3571 )
3572
3573 # TODO insert exception vimconn.HTTP_Unauthorized
3574 # if reaching here is because an exception
3575 self.logger.debug("get_hosts_info " + error_text)
3576
3577 return error_value, error_text
3578
3579 def get_hosts(self, vim_tenant):
3580 """Get the hosts and deployed instances
3581 Returns the hosts content"""
3582 r, hype_dict = self.get_hosts_info()
3583
3584 if r < 0:
3585 return r, hype_dict
3586
3587 hypervisors = hype_dict["hosts"]
3588
3589 try:
3590 servers = self.nova.servers.list()
3591 for hype in hypervisors:
3592 for server in servers:
3593 if (
3594 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3595 == hype["hypervisor_hostname"]
3596 ):
3597 if "vm" in hype:
3598 hype["vm"].append(server.id)
3599 else:
3600 hype["vm"] = [server.id]
3601
3602 return 1, hype_dict
3603 except nvExceptions.NotFound as e:
3604 error_value = -vimconn.HTTP_Not_Found
3605 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3606 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3607 error_value = -vimconn.HTTP_Bad_Request
3608 error_text = (
3609 type(e).__name__
3610 + ": "
3611 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3612 )
3613
3614 # TODO insert exception vimconn.HTTP_Unauthorized
3615 # if reaching here is because an exception
3616 self.logger.debug("get_hosts " + error_text)
3617
3618 return error_value, error_text
3619
3620 def new_affinity_group(self, affinity_group_data):
3621 """Adds a server group to VIM
3622 affinity_group_data contains a dictionary with information, keys:
3623 name: name in VIM for the server group
3624 type: affinity or anti-affinity
3625 scope: Only nfvi-node allowed
3626 Returns the server group identifier"""
3627 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3628
3629 try:
3630 name = affinity_group_data["name"]
3631 policy = affinity_group_data["type"]
3632
3633 self._reload_connection()
3634 new_server_group = self.nova.server_groups.create(name, policy)
3635
3636 return new_server_group.id
3637 except (
3638 ksExceptions.ClientException,
3639 nvExceptions.ClientException,
3640 ConnectionError,
3641 KeyError,
3642 ) as e:
3643 self._format_exception(e)
3644
3645 def get_affinity_group(self, affinity_group_id):
3646 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3647 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3648 try:
3649 self._reload_connection()
3650 server_group = self.nova.server_groups.find(id=affinity_group_id)
3651
3652 return server_group.to_dict()
3653 except (
3654 nvExceptions.NotFound,
3655 nvExceptions.ClientException,
3656 ksExceptions.ClientException,
3657 ConnectionError,
3658 ) as e:
3659 self._format_exception(e)
3660
3661 def delete_affinity_group(self, affinity_group_id):
3662 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3663 self.logger.debug("Getting server group '%s'", affinity_group_id)
3664 try:
3665 self._reload_connection()
3666 self.nova.server_groups.delete(affinity_group_id)
3667
3668 return affinity_group_id
3669 except (
3670 nvExceptions.NotFound,
3671 ksExceptions.ClientException,
3672 nvExceptions.ClientException,
3673 ConnectionError,
3674 ) as e:
3675 self._format_exception(e)
3676
3677 def get_vdu_state(self, vm_id):
3678 """
3679 Getting the state of a vdu
3680 param:
3681 vm_id: ID of an instance
3682 """
3683 self.logger.debug("Getting the status of VM")
3684 self.logger.debug("VIM VM ID %s", vm_id)
3685 self._reload_connection()
3686 server_dict = self._find_nova_server(vm_id)
3687 vdu_data = [
3688 server_dict["status"],
3689 server_dict["flavor"]["id"],
3690 server_dict["OS-EXT-SRV-ATTR:host"],
3691 server_dict["OS-EXT-AZ:availability_zone"],
3692 ]
3693 self.logger.debug("vdu_data %s", vdu_data)
3694 return vdu_data
3695
3696 def check_compute_availability(self, host, server_flavor_details):
3697 self._reload_connection()
3698 hypervisor_search = self.nova.hypervisors.search(
3699 hypervisor_match=host, servers=True
3700 )
3701 for hypervisor in hypervisor_search:
3702 hypervisor_id = hypervisor.to_dict()["id"]
3703 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3704 hypervisor_dict = hypervisor_details.to_dict()
3705 hypervisor_temp = json.dumps(hypervisor_dict)
3706 hypervisor_json = json.loads(hypervisor_temp)
3707 resources_available = [
3708 hypervisor_json["free_ram_mb"],
3709 hypervisor_json["disk_available_least"],
3710 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3711 ]
3712 compute_available = all(
3713 x > y for x, y in zip(resources_available, server_flavor_details)
3714 )
3715 if compute_available:
3716 return host
3717
3718 def check_availability_zone(
3719 self, old_az, server_flavor_details, old_host, host=None
3720 ):
3721 self._reload_connection()
3722 az_check = {"zone_check": False, "compute_availability": None}
3723 aggregates_list = self.nova.aggregates.list()
3724 for aggregate in aggregates_list:
3725 aggregate_details = aggregate.to_dict()
3726 aggregate_temp = json.dumps(aggregate_details)
3727 aggregate_json = json.loads(aggregate_temp)
3728 if aggregate_json["availability_zone"] == old_az:
3729 hosts_list = aggregate_json["hosts"]
3730 if host is not None:
3731 if host in hosts_list:
3732 az_check["zone_check"] = True
3733 available_compute_id = self.check_compute_availability(
3734 host, server_flavor_details
3735 )
3736 if available_compute_id is not None:
3737 az_check["compute_availability"] = available_compute_id
3738 else:
3739 for check_host in hosts_list:
3740 if check_host != old_host:
3741 available_compute_id = self.check_compute_availability(
3742 check_host, server_flavor_details
3743 )
3744 if available_compute_id is not None:
3745 az_check["zone_check"] = True
3746 az_check["compute_availability"] = available_compute_id
3747 break
3748 else:
3749 az_check["zone_check"] = True
3750 return az_check
3751
3752 def migrate_instance(self, vm_id, compute_host=None):
3753 """
3754 Migrate a vdu
3755 param:
3756 vm_id: ID of an instance
3757 compute_host: Host to migrate the vdu to
3758 """
3759 self._reload_connection()
3760 vm_state = False
3761 instance_state = self.get_vdu_state(vm_id)
3762 server_flavor_id = instance_state[1]
3763 server_hypervisor_name = instance_state[2]
3764 server_availability_zone = instance_state[3]
3765 try:
3766 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3767 server_flavor_details = [
3768 server_flavor["ram"],
3769 server_flavor["disk"],
3770 server_flavor["vcpus"],
3771 ]
3772 if compute_host == server_hypervisor_name:
3773 raise vimconn.VimConnException(
3774 "Unable to migrate instance '{}' to the same host '{}'".format(
3775 vm_id, compute_host
3776 ),
3777 http_code=vimconn.HTTP_Bad_Request,
3778 )
3779 az_status = self.check_availability_zone(
3780 server_availability_zone,
3781 server_flavor_details,
3782 server_hypervisor_name,
3783 compute_host,
3784 )
3785 availability_zone_check = az_status["zone_check"]
3786 available_compute_id = az_status.get("compute_availability")
3787
3788 if availability_zone_check is False:
3789 raise vimconn.VimConnException(
3790 "Unable to migrate instance '{}' to a different availability zone".format(
3791 vm_id
3792 ),
3793 http_code=vimconn.HTTP_Bad_Request,
3794 )
3795 if available_compute_id is not None:
3796 self.nova.servers.live_migrate(
3797 server=vm_id,
3798 host=available_compute_id,
3799 block_migration=True,
3800 disk_over_commit=False,
3801 )
3802 state = "MIGRATING"
3803 changed_compute_host = ""
3804 if state == "MIGRATING":
3805 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3806 changed_compute_host = self.get_vdu_state(vm_id)[2]
3807 if vm_state and changed_compute_host == available_compute_id:
3808 self.logger.debug(
3809 "Instance '{}' migrated to the new compute host '{}'".format(
3810 vm_id, changed_compute_host
3811 )
3812 )
3813 return state, available_compute_id
3814 else:
3815 raise vimconn.VimConnException(
3816 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3817 vm_id, available_compute_id
3818 ),
3819 http_code=vimconn.HTTP_Bad_Request,
3820 )
3821 else:
3822 raise vimconn.VimConnException(
3823 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3824 available_compute_id
3825 ),
3826 http_code=vimconn.HTTP_Bad_Request,
3827 )
3828 except (
3829 nvExceptions.BadRequest,
3830 nvExceptions.ClientException,
3831 nvExceptions.NotFound,
3832 ) as e:
3833 self._format_exception(e)
3834
3835 def resize_instance(self, vm_id, new_flavor_id):
3836 """
3837 For resizing the vm based on the given
3838 flavor details
3839 param:
3840 vm_id : ID of an instance
3841 new_flavor_id : Flavor id to be resized
3842 Return the status of a resized instance
3843 """
3844 self._reload_connection()
3845 self.logger.debug("resize the flavor of an instance")
3846 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3847 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3848 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3849 try:
3850 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3851 if old_flavor_disk > new_flavor_disk:
3852 raise nvExceptions.BadRequest(
3853 400,
3854 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3855 )
3856 else:
3857 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3858 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3859 if vm_state:
3860 instance_resized_status = self.confirm_resize(vm_id)
3861 return instance_resized_status
3862 else:
3863 raise nvExceptions.BadRequest(
3864 409,
3865 message="Cannot 'resize' vm_state is in ERROR",
3866 )
3867
3868 else:
3869 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3870 raise nvExceptions.BadRequest(
3871 409,
3872 message="Cannot 'resize' instance while it is in vm_state resized",
3873 )
3874 except (
3875 nvExceptions.BadRequest,
3876 nvExceptions.ClientException,
3877 nvExceptions.NotFound,
3878 ) as e:
3879 self._format_exception(e)
3880
3881 def confirm_resize(self, vm_id):
3882 """
3883 Confirm the resize of an instance
3884 param:
3885 vm_id: ID of an instance
3886 """
3887 self._reload_connection()
3888 self.nova.servers.confirm_resize(server=vm_id)
3889 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3890 self.__wait_for_vm(vm_id, "ACTIVE")
3891 instance_status = self.get_vdu_state(vm_id)[0]
3892 return instance_status
3893
3894 def get_monitoring_data(self):
3895 try:
3896 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3897 self._reload_connection()
3898 all_servers = self.nova.servers.list(detailed=True)
3899 try:
3900 for server in all_servers:
3901 server.flavor["id"] = self.nova.flavors.find(
3902 name=server.flavor["original_name"]
3903 ).id
3904 except nClient.exceptions.NotFound as e:
3905 self.logger.warning(str(e.message))
3906 all_ports = self.neutron.list_ports()
3907 return all_servers, all_ports
3908 except (
3909 vimconn.VimConnException,
3910 vimconn.VimConnNotFoundException,
3911 vimconn.VimConnConnectionException,
3912 ) as e:
3913 raise vimconn.VimConnException(
3914 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3915 )