959839f63b4df514a7a47e26c0d133f3dd183c2e
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 raise vimconn.VimConnException(
571 type(exception).__name__ + ": " + message_error
572 )
573 elif isinstance(
574 exception,
575 (
576 nvExceptions.ClientException,
577 ksExceptions.ClientException,
578 neExceptions.NeutronException,
579 ),
580 ):
581 raise vimconn.VimConnUnexpectedResponse(
582 type(exception).__name__ + ": " + message_error
583 )
584 elif isinstance(exception, nvExceptions.Conflict):
585 raise vimconn.VimConnConflictException(
586 type(exception).__name__ + ": " + message_error
587 )
588 elif isinstance(exception, vimconn.VimConnException):
589 raise exception
590 else: # ()
591 self.logger.error("General Exception " + message_error, exc_info=True)
592
593 raise vimconn.VimConnConnectionException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 def _get_ids_from_name(self):
598 """
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600 :return: None
601 """
602 # get tenant_id if only tenant_name is supplied
603 self._reload_connection()
604
605 if not self.my_tenant_id:
606 raise vimconn.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self.tenant_name, self.tenant_id
609 )
610 )
611
612 if self.config.get("security_groups") and not self.security_groups_id:
613 # convert from name to id
614 neutron_sg_list = self.neutron.list_security_groups(
615 tenant_id=self.my_tenant_id
616 )["security_groups"]
617
618 self.security_groups_id = []
619 for sg in self.config.get("security_groups"):
620 for neutron_sg in neutron_sg_list:
621 if sg in (neutron_sg["id"], neutron_sg["name"]):
622 self.security_groups_id.append(neutron_sg["id"])
623 break
624 else:
625 self.security_groups_id = None
626
627 raise vimconn.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg)
629 )
630
631 def check_vim_connectivity(self):
632 # just get network list to check connectivity and credentials
633 self.get_network_list(filter_dict={})
634
635 def get_tenant_list(self, filter_dict={}):
636 """Obtain tenants of VIM
637 filter_dict can contain the following keys:
638 name: filter by tenant name
639 id: filter by tenant uuid/id
640 <other VIM specific>
641 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
642 """
643 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
644
645 try:
646 self._reload_connection()
647
648 if self.api_version3:
649 project_class_list = self.keystone.projects.list(
650 name=filter_dict.get("name")
651 )
652 else:
653 project_class_list = self.keystone.tenants.findall(**filter_dict)
654
655 project_list = []
656
657 for project in project_class_list:
658 if filter_dict.get("id") and filter_dict["id"] != project.id:
659 continue
660
661 project_list.append(project.to_dict())
662
663 return project_list
664 except (
665 ksExceptions.ConnectionError,
666 ksExceptions.ClientException,
667 ConnectionError,
668 ) as e:
669 self._format_exception(e)
670
671 def new_tenant(self, tenant_name, tenant_description):
672 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
673 self.logger.debug("Adding a new tenant name: %s", tenant_name)
674
675 try:
676 self._reload_connection()
677
678 if self.api_version3:
679 project = self.keystone.projects.create(
680 tenant_name,
681 self.config.get("project_domain_id", "default"),
682 description=tenant_description,
683 is_domain=False,
684 )
685 else:
686 project = self.keystone.tenants.create(tenant_name, tenant_description)
687
688 return project.id
689 except (
690 ksExceptions.ConnectionError,
691 ksExceptions.ClientException,
692 ksExceptions.BadRequest,
693 ConnectionError,
694 ) as e:
695 self._format_exception(e)
696
697 def delete_tenant(self, tenant_id):
698 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
699 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
700
701 try:
702 self._reload_connection()
703
704 if self.api_version3:
705 self.keystone.projects.delete(tenant_id)
706 else:
707 self.keystone.tenants.delete(tenant_id)
708
709 return tenant_id
710 except (
711 ksExceptions.ConnectionError,
712 ksExceptions.ClientException,
713 ksExceptions.NotFound,
714 ConnectionError,
715 ) as e:
716 self._format_exception(e)
717
718 def new_network(
719 self,
720 net_name,
721 net_type,
722 ip_profile=None,
723 shared=False,
724 provider_network_profile=None,
725 ):
726 """Adds a tenant network to VIM
727 Params:
728 'net_name': name of the network
729 'net_type': one of:
730 'bridge': overlay isolated network
731 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
732 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
733 'ip_profile': is a dict containing the IP parameters of the network
734 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
735 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
736 'gateway_address': (Optional) ip_schema, that is X.X.X.X
737 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
738 'dhcp_enabled': True or False
739 'dhcp_start_address': ip_schema, first IP to grant
740 'dhcp_count': number of IPs to grant.
741 'shared': if this network can be seen/use by other tenants/organization
742 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
743 physical-network: physnet-label}
744 Returns a tuple with the network identifier and created_items, or raises an exception on error
745 created_items can be None or a dictionary where this method can include key-values that will be passed to
746 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
747 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
748 as not present.
749 """
750 self.logger.debug(
751 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
752 )
753 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
754
755 try:
756 vlan = None
757
758 if provider_network_profile:
759 vlan = provider_network_profile.get("segmentation-id")
760
761 new_net = None
762 created_items = {}
763 self._reload_connection()
764 network_dict = {"name": net_name, "admin_state_up": True}
765
766 if net_type in ("data", "ptp") or provider_network_profile:
767 provider_physical_network = None
768
769 if provider_network_profile and provider_network_profile.get(
770 "physical-network"
771 ):
772 provider_physical_network = provider_network_profile.get(
773 "physical-network"
774 )
775
776 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
777 # or not declared, just ignore the checking
778 if (
779 isinstance(
780 self.config.get("dataplane_physical_net"), (tuple, list)
781 )
782 and provider_physical_network
783 not in self.config["dataplane_physical_net"]
784 ):
785 raise vimconn.VimConnConflictException(
786 "Invalid parameter 'provider-network:physical-network' "
787 "for network creation. '{}' is not one of the declared "
788 "list at VIM_config:dataplane_physical_net".format(
789 provider_physical_network
790 )
791 )
792
793 # use the default dataplane_physical_net
794 if not provider_physical_network:
795 provider_physical_network = self.config.get(
796 "dataplane_physical_net"
797 )
798
799 # if it is non empty list, use the first value. If it is a string use the value directly
800 if (
801 isinstance(provider_physical_network, (tuple, list))
802 and provider_physical_network
803 ):
804 provider_physical_network = provider_physical_network[0]
805
806 if not provider_physical_network:
807 raise vimconn.VimConnConflictException(
808 "missing information needed for underlay networks. Provide "
809 "'dataplane_physical_net' configuration at VIM or use the NS "
810 "instantiation parameter 'provider-network.physical-network'"
811 " for the VLD"
812 )
813
814 if not self.config.get("multisegment_support"):
815 network_dict[
816 "provider:physical_network"
817 ] = provider_physical_network
818
819 if (
820 provider_network_profile
821 and "network-type" in provider_network_profile
822 ):
823 network_dict[
824 "provider:network_type"
825 ] = provider_network_profile["network-type"]
826 else:
827 network_dict["provider:network_type"] = self.config.get(
828 "dataplane_network_type", "vlan"
829 )
830
831 if vlan:
832 network_dict["provider:segmentation_id"] = vlan
833 else:
834 # Multi-segment case
835 segment_list = []
836 segment1_dict = {
837 "provider:physical_network": "",
838 "provider:network_type": "vxlan",
839 }
840 segment_list.append(segment1_dict)
841 segment2_dict = {
842 "provider:physical_network": provider_physical_network,
843 "provider:network_type": "vlan",
844 }
845
846 if vlan:
847 segment2_dict["provider:segmentation_id"] = vlan
848 elif self.config.get("multisegment_vlan_range"):
849 vlanID = self._generate_multisegment_vlanID()
850 segment2_dict["provider:segmentation_id"] = vlanID
851
852 # else
853 # raise vimconn.VimConnConflictException(
854 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
855 # network")
856 segment_list.append(segment2_dict)
857 network_dict["segments"] = segment_list
858
859 # VIO Specific Changes. It needs a concrete VLAN
860 if self.vim_type == "VIO" and vlan is None:
861 if self.config.get("dataplane_net_vlan_range") is None:
862 raise vimconn.VimConnConflictException(
863 "You must provide 'dataplane_net_vlan_range' in format "
864 "[start_ID - end_ID] at VIM_config for creating underlay "
865 "networks"
866 )
867
868 network_dict["provider:segmentation_id"] = self._generate_vlanID()
869
870 network_dict["shared"] = shared
871
872 if self.config.get("disable_network_port_security"):
873 network_dict["port_security_enabled"] = False
874
875 if self.config.get("neutron_availability_zone_hints"):
876 hints = self.config.get("neutron_availability_zone_hints")
877
878 if isinstance(hints, str):
879 hints = [hints]
880
881 network_dict["availability_zone_hints"] = hints
882
883 new_net = self.neutron.create_network({"network": network_dict})
884 # print new_net
885 # create subnetwork, even if there is no profile
886
887 if not ip_profile:
888 ip_profile = {}
889
890 if not ip_profile.get("subnet_address"):
891 # Fake subnet is required
892 subnet_rand = random.randint(0, 255)
893 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
894
895 if "ip_version" not in ip_profile:
896 ip_profile["ip_version"] = "IPv4"
897
898 subnet = {
899 "name": net_name + "-subnet",
900 "network_id": new_net["network"]["id"],
901 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
902 "cidr": ip_profile["subnet_address"],
903 }
904
905 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
906 if ip_profile.get("gateway_address"):
907 subnet["gateway_ip"] = ip_profile["gateway_address"]
908 else:
909 subnet["gateway_ip"] = None
910
911 if ip_profile.get("dns_address"):
912 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
913
914 if "dhcp_enabled" in ip_profile:
915 subnet["enable_dhcp"] = (
916 False
917 if ip_profile["dhcp_enabled"] == "false"
918 or ip_profile["dhcp_enabled"] is False
919 else True
920 )
921
922 if ip_profile.get("dhcp_start_address"):
923 subnet["allocation_pools"] = []
924 subnet["allocation_pools"].append(dict())
925 subnet["allocation_pools"][0]["start"] = ip_profile[
926 "dhcp_start_address"
927 ]
928
929 if ip_profile.get("dhcp_count"):
930 # parts = ip_profile["dhcp_start_address"].split(".")
931 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
932 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
933 ip_int += ip_profile["dhcp_count"] - 1
934 ip_str = str(netaddr.IPAddress(ip_int))
935 subnet["allocation_pools"][0]["end"] = ip_str
936
937 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
938 self.neutron.create_subnet({"subnet": subnet})
939
940 if net_type == "data" and self.config.get("multisegment_support"):
941 if self.config.get("l2gw_support"):
942 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
943 for l2gw in l2gw_list:
944 l2gw_conn = {
945 "l2_gateway_id": l2gw["id"],
946 "network_id": new_net["network"]["id"],
947 "segmentation_id": str(vlanID),
948 }
949 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
950 {"l2_gateway_connection": l2gw_conn}
951 )
952 created_items[
953 "l2gwconn:"
954 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
955 ] = True
956
957 return new_net["network"]["id"], created_items
958 except Exception as e:
959 # delete l2gw connections (if any) before deleting the network
960 for k, v in created_items.items():
961 if not v: # skip already deleted
962 continue
963
964 try:
965 k_item, _, k_id = k.partition(":")
966
967 if k_item == "l2gwconn":
968 self.neutron.delete_l2_gateway_connection(k_id)
969 except Exception as e2:
970 self.logger.error(
971 "Error deleting l2 gateway connection: {}: {}".format(
972 type(e2).__name__, e2
973 )
974 )
975
976 if new_net:
977 self.neutron.delete_network(new_net["network"]["id"])
978
979 self._format_exception(e)
980
981 def get_network_list(self, filter_dict={}):
982 """Obtain tenant networks of VIM
983 Filter_dict can be:
984 name: network name
985 id: network uuid
986 shared: boolean
987 tenant_id: tenant
988 admin_state_up: boolean
989 status: 'ACTIVE'
990 Returns the network list of dictionaries
991 """
992 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
993
994 try:
995 self._reload_connection()
996 filter_dict_os = filter_dict.copy()
997
998 if self.api_version3 and "tenant_id" in filter_dict_os:
999 # TODO check
1000 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1001
1002 net_dict = self.neutron.list_networks(**filter_dict_os)
1003 net_list = net_dict["networks"]
1004 self.__net_os2mano(net_list)
1005
1006 return net_list
1007 except (
1008 neExceptions.ConnectionFailed,
1009 ksExceptions.ClientException,
1010 neExceptions.NeutronException,
1011 ConnectionError,
1012 ) as e:
1013 self._format_exception(e)
1014
1015 def get_network(self, net_id):
1016 """Obtain details of network from VIM
1017 Returns the network information from a network id"""
1018 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1019 filter_dict = {"id": net_id}
1020 net_list = self.get_network_list(filter_dict)
1021
1022 if len(net_list) == 0:
1023 raise vimconn.VimConnNotFoundException(
1024 "Network '{}' not found".format(net_id)
1025 )
1026 elif len(net_list) > 1:
1027 raise vimconn.VimConnConflictException(
1028 "Found more than one network with this criteria"
1029 )
1030
1031 net = net_list[0]
1032 subnets = []
1033 for subnet_id in net.get("subnets", ()):
1034 try:
1035 subnet = self.neutron.show_subnet(subnet_id)
1036 except Exception as e:
1037 self.logger.error(
1038 "osconnector.get_network(): Error getting subnet %s %s"
1039 % (net_id, str(e))
1040 )
1041 subnet = {"id": subnet_id, "fault": str(e)}
1042
1043 subnets.append(subnet)
1044
1045 net["subnets"] = subnets
1046 net["encapsulation"] = net.get("provider:network_type")
1047 net["encapsulation_type"] = net.get("provider:network_type")
1048 net["segmentation_id"] = net.get("provider:segmentation_id")
1049 net["encapsulation_id"] = net.get("provider:segmentation_id")
1050
1051 return net
1052
1053 def delete_network(self, net_id, created_items=None):
1054 """
1055 Removes a tenant network from VIM and its associated elements
1056 :param net_id: VIM identifier of the network, provided by method new_network
1057 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1058 Returns the network identifier or raises an exception upon error or when network is not found
1059 """
1060 self.logger.debug("Deleting network '%s' from VIM", net_id)
1061
1062 if created_items is None:
1063 created_items = {}
1064
1065 try:
1066 self._reload_connection()
1067 # delete l2gw connections (if any) before deleting the network
1068 for k, v in created_items.items():
1069 if not v: # skip already deleted
1070 continue
1071
1072 try:
1073 k_item, _, k_id = k.partition(":")
1074 if k_item == "l2gwconn":
1075 self.neutron.delete_l2_gateway_connection(k_id)
1076 except Exception as e:
1077 self.logger.error(
1078 "Error deleting l2 gateway connection: {}: {}".format(
1079 type(e).__name__, e
1080 )
1081 )
1082
1083 # delete VM ports attached to this networks before the network
1084 ports = self.neutron.list_ports(network_id=net_id)
1085 for p in ports["ports"]:
1086 try:
1087 self.neutron.delete_port(p["id"])
1088 except Exception as e:
1089 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1090
1091 self.neutron.delete_network(net_id)
1092
1093 return net_id
1094 except (
1095 neExceptions.ConnectionFailed,
1096 neExceptions.NetworkNotFoundClient,
1097 neExceptions.NeutronException,
1098 ksExceptions.ClientException,
1099 neExceptions.NeutronException,
1100 ConnectionError,
1101 ) as e:
1102 self._format_exception(e)
1103
1104 def refresh_nets_status(self, net_list):
1105 """Get the status of the networks
1106 Params: the list of network identifiers
1107 Returns a dictionary with:
1108 net_id: #VIM id of this network
1109 status: #Mandatory. Text with one of:
1110 # DELETED (not found at vim)
1111 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1112 # OTHER (Vim reported other status not understood)
1113 # ERROR (VIM indicates an ERROR status)
1114 # ACTIVE, INACTIVE, DOWN (admin down),
1115 # BUILD (on building process)
1116 #
1117 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1118 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1119 """
1120 net_dict = {}
1121
1122 for net_id in net_list:
1123 net = {}
1124
1125 try:
1126 net_vim = self.get_network(net_id)
1127
1128 if net_vim["status"] in netStatus2manoFormat:
1129 net["status"] = netStatus2manoFormat[net_vim["status"]]
1130 else:
1131 net["status"] = "OTHER"
1132 net["error_msg"] = "VIM status reported " + net_vim["status"]
1133
1134 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1135 net["status"] = "DOWN"
1136
1137 net["vim_info"] = self.serialize(net_vim)
1138
1139 if net_vim.get("fault"): # TODO
1140 net["error_msg"] = str(net_vim["fault"])
1141 except vimconn.VimConnNotFoundException as e:
1142 self.logger.error("Exception getting net status: %s", str(e))
1143 net["status"] = "DELETED"
1144 net["error_msg"] = str(e)
1145 except vimconn.VimConnException as e:
1146 self.logger.error("Exception getting net status: %s", str(e))
1147 net["status"] = "VIM_ERROR"
1148 net["error_msg"] = str(e)
1149 net_dict[net_id] = net
1150 return net_dict
1151
1152 def get_flavor(self, flavor_id):
1153 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1154 self.logger.debug("Getting flavor '%s'", flavor_id)
1155
1156 try:
1157 self._reload_connection()
1158 flavor = self.nova.flavors.find(id=flavor_id)
1159 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1160
1161 return flavor.to_dict()
1162 except (
1163 nvExceptions.NotFound,
1164 nvExceptions.ClientException,
1165 ksExceptions.ClientException,
1166 ConnectionError,
1167 ) as e:
1168 self._format_exception(e)
1169
1170 def get_flavor_id_from_data(self, flavor_dict):
1171 """Obtain flavor id that match the flavor description
1172 Returns the flavor_id or raises a vimconnNotFoundException
1173 flavor_dict: contains the required ram, vcpus, disk
1174 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1175 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1176 vimconnNotFoundException is raised
1177 """
1178 exact_match = False if self.config.get("use_existing_flavors") else True
1179
1180 try:
1181 self._reload_connection()
1182 flavor_candidate_id = None
1183 flavor_candidate_data = (10000, 10000, 10000)
1184 flavor_target = (
1185 flavor_dict["ram"],
1186 flavor_dict["vcpus"],
1187 flavor_dict["disk"],
1188 flavor_dict.get("ephemeral", 0),
1189 flavor_dict.get("swap", 0),
1190 )
1191 # numa=None
1192 extended = flavor_dict.get("extended", {})
1193 if extended:
1194 # TODO
1195 raise vimconn.VimConnNotFoundException(
1196 "Flavor with EPA still not implemented"
1197 )
1198 # if len(numas) > 1:
1199 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1200 # numa=numas[0]
1201 # numas = extended.get("numas")
1202 for flavor in self.nova.flavors.list():
1203 epa = flavor.get_keys()
1204
1205 if epa:
1206 continue
1207 # TODO
1208
1209 flavor_data = (
1210 flavor.ram,
1211 flavor.vcpus,
1212 flavor.disk,
1213 flavor.ephemeral,
1214 flavor.swap if isinstance(flavor.swap, int) else 0,
1215 )
1216 if flavor_data == flavor_target:
1217 return flavor.id
1218 elif (
1219 not exact_match
1220 and flavor_target < flavor_data < flavor_candidate_data
1221 ):
1222 flavor_candidate_id = flavor.id
1223 flavor_candidate_data = flavor_data
1224
1225 if not exact_match and flavor_candidate_id:
1226 return flavor_candidate_id
1227
1228 raise vimconn.VimConnNotFoundException(
1229 "Cannot find any flavor matching '{}'".format(flavor_dict)
1230 )
1231 except (
1232 nvExceptions.NotFound,
1233 nvExceptions.ClientException,
1234 ksExceptions.ClientException,
1235 ConnectionError,
1236 ) as e:
1237 self._format_exception(e)
1238
1239 @staticmethod
1240 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1241 """Process resource quota and fill up extra_specs.
1242 Args:
1243 quota (dict): Keeping the quota of resurces
1244 prefix (str) Prefix
1245 extra_specs (dict) Dict to be filled to be used during flavor creation
1246
1247 """
1248 if "limit" in quota:
1249 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1250
1251 if "reserve" in quota:
1252 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1253
1254 if "shares" in quota:
1255 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1256 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1257
1258 @staticmethod
1259 def process_numa_memory(
1260 numa: dict, node_id: Optional[int], extra_specs: dict
1261 ) -> None:
1262 """Set the memory in extra_specs.
1263 Args:
1264 numa (dict): A dictionary which includes numa information
1265 node_id (int): ID of numa node
1266 extra_specs (dict): To be filled.
1267
1268 """
1269 if not numa.get("memory"):
1270 return
1271 memory_mb = numa["memory"] * 1024
1272 memory = "hw:numa_mem.{}".format(node_id)
1273 extra_specs[memory] = int(memory_mb)
1274
1275 @staticmethod
1276 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1277 """Set the cpu in extra_specs.
1278 Args:
1279 numa (dict): A dictionary which includes numa information
1280 node_id (int): ID of numa node
1281 extra_specs (dict): To be filled.
1282
1283 """
1284 if not numa.get("vcpu"):
1285 return
1286 vcpu = numa["vcpu"]
1287 cpu = "hw:numa_cpus.{}".format(node_id)
1288 vcpu = ",".join(map(str, vcpu))
1289 extra_specs[cpu] = vcpu
1290
1291 @staticmethod
1292 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1293 """Fill up extra_specs if numa has paired-threads.
1294 Args:
1295 numa (dict): A dictionary which includes numa information
1296 extra_specs (dict): To be filled.
1297
1298 Returns:
1299 threads (int) Number of virtual cpus
1300
1301 """
1302 if not numa.get("paired-threads"):
1303 return
1304
1305 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1306 threads = numa["paired-threads"] * 2
1307 extra_specs["hw:cpu_thread_policy"] = "require"
1308 extra_specs["hw:cpu_policy"] = "dedicated"
1309 return threads
1310
1311 @staticmethod
1312 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1313 """Fill up extra_specs if numa has cores.
1314 Args:
1315 numa (dict): A dictionary which includes numa information
1316 extra_specs (dict): To be filled.
1317
1318 Returns:
1319 cores (int) Number of virtual cpus
1320
1321 """
1322 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1323 # architecture, or a non-SMT architecture will be emulated
1324 if not numa.get("cores"):
1325 return
1326 cores = numa["cores"]
1327 extra_specs["hw:cpu_thread_policy"] = "isolate"
1328 extra_specs["hw:cpu_policy"] = "dedicated"
1329 return cores
1330
1331 @staticmethod
1332 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1333 """Fill up extra_specs if numa has threads.
1334 Args:
1335 numa (dict): A dictionary which includes numa information
1336 extra_specs (dict): To be filled.
1337
1338 Returns:
1339 threads (int) Number of virtual cpus
1340
1341 """
1342 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1343 if not numa.get("threads"):
1344 return
1345 threads = numa["threads"]
1346 extra_specs["hw:cpu_thread_policy"] = "prefer"
1347 extra_specs["hw:cpu_policy"] = "dedicated"
1348 return threads
1349
1350 def _process_numa_parameters_of_flavor(
1351 self, numas: List, extra_specs: Dict
1352 ) -> None:
1353 """Process numa parameters and fill up extra_specs.
1354
1355 Args:
1356 numas (list): List of dictionary which includes numa information
1357 extra_specs (dict): To be filled.
1358
1359 """
1360 numa_nodes = len(numas)
1361 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1362 cpu_cores, cpu_threads = 0, 0
1363
1364 if self.vim_type == "VIO":
1365 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1366
1367 for numa in numas:
1368 if "id" in numa:
1369 node_id = numa["id"]
1370 # overwrite ram and vcpus
1371 # check if key "memory" is present in numa else use ram value at flavor
1372 self.process_numa_memory(numa, node_id, extra_specs)
1373 self.process_numa_vcpu(numa, node_id, extra_specs)
1374
1375 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1376 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1377
1378 if "paired-threads" in numa:
1379 threads = self.process_numa_paired_threads(numa, extra_specs)
1380 cpu_threads += threads
1381
1382 elif "cores" in numa:
1383 cores = self.process_numa_cores(numa, extra_specs)
1384 cpu_cores += cores
1385
1386 elif "threads" in numa:
1387 threads = self.process_numa_threads(numa, extra_specs)
1388 cpu_threads += threads
1389
1390 if cpu_cores:
1391 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1392 if cpu_threads:
1393 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1394
1395 @staticmethod
1396 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1397 """According to number of numa nodes, updates the extra_specs for VIO.
1398
1399 Args:
1400
1401 numa_nodes (int): List keeps the numa node numbers
1402 extra_specs (dict): Extra specs dict to be updated
1403
1404 """
1405 # If there is not any numa, numas_nodes equals to 0.
1406 if not numa_nodes:
1407 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1408
1409 # If there are several numas, we do not define specific affinity.
1410 extra_specs["vmware:latency_sensitivity_level"] = "high"
1411
1412 def _change_flavor_name(
1413 self, name: str, name_suffix: int, flavor_data: dict
1414 ) -> str:
1415 """Change the flavor name if the name already exists.
1416
1417 Args:
1418 name (str): Flavor name to be checked
1419 name_suffix (int): Suffix to be appended to name
1420 flavor_data (dict): Flavor dict
1421
1422 Returns:
1423 name (str): New flavor name to be used
1424
1425 """
1426 # Get used names
1427 fl = self.nova.flavors.list()
1428 fl_names = [f.name for f in fl]
1429
1430 while name in fl_names:
1431 name_suffix += 1
1432 name = flavor_data["name"] + "-" + str(name_suffix)
1433
1434 return name
1435
1436 def _process_extended_config_of_flavor(
1437 self, extended: dict, extra_specs: dict
1438 ) -> None:
1439 """Process the extended dict to fill up extra_specs.
1440 Args:
1441
1442 extended (dict): Keeping the extra specification of flavor
1443 extra_specs (dict) Dict to be filled to be used during flavor creation
1444
1445 """
1446 quotas = {
1447 "cpu-quota": "cpu",
1448 "mem-quota": "memory",
1449 "vif-quota": "vif",
1450 "disk-io-quota": "disk_io",
1451 }
1452
1453 page_sizes = {
1454 "LARGE": "large",
1455 "SMALL": "small",
1456 "SIZE_2MB": "2MB",
1457 "SIZE_1GB": "1GB",
1458 "PREFER_LARGE": "any",
1459 }
1460
1461 policies = {
1462 "cpu-pinning-policy": "hw:cpu_policy",
1463 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1464 "mem-policy": "hw:numa_mempolicy",
1465 }
1466
1467 numas = extended.get("numas")
1468 if numas:
1469 self._process_numa_parameters_of_flavor(numas, extra_specs)
1470
1471 for quota, item in quotas.items():
1472 if quota in extended.keys():
1473 self.process_resource_quota(extended.get(quota), item, extra_specs)
1474
1475 # Set the mempage size as specified in the descriptor
1476 if extended.get("mempage-size"):
1477 if extended["mempage-size"] in page_sizes.keys():
1478 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1479 else:
1480 # Normally, validations in NBI should not allow to this condition.
1481 self.logger.debug(
1482 "Invalid mempage-size %s. Will be ignored",
1483 extended.get("mempage-size"),
1484 )
1485
1486 for policy, hw_policy in policies.items():
1487 if extended.get(policy):
1488 extra_specs[hw_policy] = extended[policy].lower()
1489
1490 @staticmethod
1491 def _get_flavor_details(flavor_data: dict) -> Tuple:
1492 """Returns the details of flavor
1493 Args:
1494 flavor_data (dict): Dictionary that includes required flavor details
1495
1496 Returns:
1497 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1498
1499 """
1500 return (
1501 flavor_data.get("ram", 64),
1502 flavor_data.get("vcpus", 1),
1503 {},
1504 flavor_data.get("extended"),
1505 )
1506
1507 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1508 """Adds a tenant flavor to openstack VIM.
1509 if change_name_if_used is True, it will change name in case of conflict,
1510 because it is not supported name repetition.
1511
1512 Args:
1513 flavor_data (dict): Flavor details to be processed
1514 change_name_if_used (bool): Change name in case of conflict
1515
1516 Returns:
1517 flavor_id (str): flavor identifier
1518
1519 """
1520 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1521 retry = 0
1522 max_retries = 3
1523 name_suffix = 0
1524
1525 try:
1526 name = flavor_data["name"]
1527 while retry < max_retries:
1528 retry += 1
1529 try:
1530 self._reload_connection()
1531
1532 if change_name_if_used:
1533 name = self._change_flavor_name(name, name_suffix, flavor_data)
1534
1535 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1536 flavor_data
1537 )
1538 if extended:
1539 self._process_extended_config_of_flavor(extended, extra_specs)
1540
1541 # Create flavor
1542
1543 new_flavor = self.nova.flavors.create(
1544 name=name,
1545 ram=ram,
1546 vcpus=vcpus,
1547 disk=flavor_data.get("disk", 0),
1548 ephemeral=flavor_data.get("ephemeral", 0),
1549 swap=flavor_data.get("swap", 0),
1550 is_public=flavor_data.get("is_public", True),
1551 )
1552
1553 # Add metadata
1554 if extra_specs:
1555 new_flavor.set_keys(extra_specs)
1556
1557 return new_flavor.id
1558
1559 except nvExceptions.Conflict as e:
1560 if change_name_if_used and retry < max_retries:
1561 continue
1562
1563 self._format_exception(e)
1564
1565 except (
1566 ksExceptions.ClientException,
1567 nvExceptions.ClientException,
1568 ConnectionError,
1569 KeyError,
1570 ) as e:
1571 self._format_exception(e)
1572
1573 def delete_flavor(self, flavor_id):
1574 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1575 try:
1576 self._reload_connection()
1577 self.nova.flavors.delete(flavor_id)
1578
1579 return flavor_id
1580 # except nvExceptions.BadRequest as e:
1581 except (
1582 nvExceptions.NotFound,
1583 ksExceptions.ClientException,
1584 nvExceptions.ClientException,
1585 ConnectionError,
1586 ) as e:
1587 self._format_exception(e)
1588
1589 def new_image(self, image_dict):
1590 """
1591 Adds a tenant image to VIM. imge_dict is a dictionary with:
1592 name: name
1593 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1594 location: path or URI
1595 public: "yes" or "no"
1596 metadata: metadata of the image
1597 Returns the image_id
1598 """
1599 retry = 0
1600 max_retries = 3
1601
1602 while retry < max_retries:
1603 retry += 1
1604 try:
1605 self._reload_connection()
1606
1607 # determine format http://docs.openstack.org/developer/glance/formats.html
1608 if "disk_format" in image_dict:
1609 disk_format = image_dict["disk_format"]
1610 else: # autodiscover based on extension
1611 if image_dict["location"].endswith(".qcow2"):
1612 disk_format = "qcow2"
1613 elif image_dict["location"].endswith(".vhd"):
1614 disk_format = "vhd"
1615 elif image_dict["location"].endswith(".vmdk"):
1616 disk_format = "vmdk"
1617 elif image_dict["location"].endswith(".vdi"):
1618 disk_format = "vdi"
1619 elif image_dict["location"].endswith(".iso"):
1620 disk_format = "iso"
1621 elif image_dict["location"].endswith(".aki"):
1622 disk_format = "aki"
1623 elif image_dict["location"].endswith(".ari"):
1624 disk_format = "ari"
1625 elif image_dict["location"].endswith(".ami"):
1626 disk_format = "ami"
1627 else:
1628 disk_format = "raw"
1629
1630 self.logger.debug(
1631 "new_image: '%s' loading from '%s'",
1632 image_dict["name"],
1633 image_dict["location"],
1634 )
1635 if self.vim_type == "VIO":
1636 container_format = "bare"
1637 if "container_format" in image_dict:
1638 container_format = image_dict["container_format"]
1639
1640 new_image = self.glance.images.create(
1641 name=image_dict["name"],
1642 container_format=container_format,
1643 disk_format=disk_format,
1644 )
1645 else:
1646 new_image = self.glance.images.create(name=image_dict["name"])
1647
1648 if image_dict["location"].startswith("http"):
1649 # TODO there is not a method to direct download. It must be downloaded locally with requests
1650 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1651 else: # local path
1652 with open(image_dict["location"]) as fimage:
1653 self.glance.images.upload(new_image.id, fimage)
1654 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1655 # image_dict.get("public","yes")=="yes",
1656 # container_format="bare", data=fimage, disk_format=disk_format)
1657
1658 metadata_to_load = image_dict.get("metadata")
1659
1660 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1661 # for openstack
1662 if self.vim_type == "VIO":
1663 metadata_to_load["upload_location"] = image_dict["location"]
1664 else:
1665 metadata_to_load["location"] = image_dict["location"]
1666
1667 self.glance.images.update(new_image.id, **metadata_to_load)
1668
1669 return new_image.id
1670 except (
1671 nvExceptions.Conflict,
1672 ksExceptions.ClientException,
1673 nvExceptions.ClientException,
1674 ) as e:
1675 self._format_exception(e)
1676 except (
1677 HTTPException,
1678 gl1Exceptions.HTTPException,
1679 gl1Exceptions.CommunicationError,
1680 ConnectionError,
1681 ) as e:
1682 if retry == max_retries:
1683 continue
1684
1685 self._format_exception(e)
1686 except IOError as e: # can not open the file
1687 raise vimconn.VimConnConnectionException(
1688 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1689 http_code=vimconn.HTTP_Bad_Request,
1690 )
1691
1692 def delete_image(self, image_id):
1693 """Deletes a tenant image from openstack VIM. Returns the old id"""
1694 try:
1695 self._reload_connection()
1696 self.glance.images.delete(image_id)
1697
1698 return image_id
1699 except (
1700 nvExceptions.NotFound,
1701 ksExceptions.ClientException,
1702 nvExceptions.ClientException,
1703 gl1Exceptions.CommunicationError,
1704 gl1Exceptions.HTTPNotFound,
1705 ConnectionError,
1706 ) as e: # TODO remove
1707 self._format_exception(e)
1708
1709 def get_image_id_from_path(self, path):
1710 """Get the image id from image path in the VIM database. Returns the image_id"""
1711 try:
1712 self._reload_connection()
1713 images = self.glance.images.list()
1714
1715 for image in images:
1716 if image.metadata.get("location") == path:
1717 return image.id
1718
1719 raise vimconn.VimConnNotFoundException(
1720 "image with location '{}' not found".format(path)
1721 )
1722 except (
1723 ksExceptions.ClientException,
1724 nvExceptions.ClientException,
1725 gl1Exceptions.CommunicationError,
1726 ConnectionError,
1727 ) as e:
1728 self._format_exception(e)
1729
1730 def get_image_list(self, filter_dict={}):
1731 """Obtain tenant images from VIM
1732 Filter_dict can be:
1733 id: image id
1734 name: image name
1735 checksum: image checksum
1736 Returns the image list of dictionaries:
1737 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1738 List can be empty
1739 """
1740 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1741
1742 try:
1743 self._reload_connection()
1744 # filter_dict_os = filter_dict.copy()
1745 # First we filter by the available filter fields: name, id. The others are removed.
1746 image_list = self.glance.images.list()
1747 filtered_list = []
1748
1749 for image in image_list:
1750 try:
1751 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1752 continue
1753
1754 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1755 continue
1756
1757 if (
1758 filter_dict.get("checksum")
1759 and image["checksum"] != filter_dict["checksum"]
1760 ):
1761 continue
1762
1763 filtered_list.append(image.copy())
1764 except gl1Exceptions.HTTPNotFound:
1765 pass
1766
1767 return filtered_list
1768 except (
1769 ksExceptions.ClientException,
1770 nvExceptions.ClientException,
1771 gl1Exceptions.CommunicationError,
1772 ConnectionError,
1773 ) as e:
1774 self._format_exception(e)
1775
1776 def __wait_for_vm(self, vm_id, status):
1777 """wait until vm is in the desired status and return True.
1778 If the VM gets in ERROR status, return false.
1779 If the timeout is reached generate an exception"""
1780 elapsed_time = 0
1781 while elapsed_time < server_timeout:
1782 vm_status = self.nova.servers.get(vm_id).status
1783
1784 if vm_status == status:
1785 return True
1786
1787 if vm_status == "ERROR":
1788 return False
1789
1790 time.sleep(5)
1791 elapsed_time += 5
1792
1793 # if we exceeded the timeout rollback
1794 if elapsed_time >= server_timeout:
1795 raise vimconn.VimConnException(
1796 "Timeout waiting for instance " + vm_id + " to get " + status,
1797 http_code=vimconn.HTTP_Request_Timeout,
1798 )
1799
1800 def _get_openstack_availablity_zones(self):
1801 """
1802 Get from openstack availability zones available
1803 :return:
1804 """
1805 try:
1806 openstack_availability_zone = self.nova.availability_zones.list()
1807 openstack_availability_zone = [
1808 str(zone.zoneName)
1809 for zone in openstack_availability_zone
1810 if zone.zoneName != "internal"
1811 ]
1812
1813 return openstack_availability_zone
1814 except Exception:
1815 return None
1816
1817 def _set_availablity_zones(self):
1818 """
1819 Set vim availablity zone
1820 :return:
1821 """
1822 if "availability_zone" in self.config:
1823 vim_availability_zones = self.config.get("availability_zone")
1824
1825 if isinstance(vim_availability_zones, str):
1826 self.availability_zone = [vim_availability_zones]
1827 elif isinstance(vim_availability_zones, list):
1828 self.availability_zone = vim_availability_zones
1829 else:
1830 self.availability_zone = self._get_openstack_availablity_zones()
1831
1832 def _get_vm_availability_zone(
1833 self, availability_zone_index, availability_zone_list
1834 ):
1835 """
1836 Return thge availability zone to be used by the created VM.
1837 :return: The VIM availability zone to be used or None
1838 """
1839 if availability_zone_index is None:
1840 if not self.config.get("availability_zone"):
1841 return None
1842 elif isinstance(self.config.get("availability_zone"), str):
1843 return self.config["availability_zone"]
1844 else:
1845 # TODO consider using a different parameter at config for default AV and AV list match
1846 return self.config["availability_zone"][0]
1847
1848 vim_availability_zones = self.availability_zone
1849 # check if VIM offer enough availability zones describe in the VNFD
1850 if vim_availability_zones and len(availability_zone_list) <= len(
1851 vim_availability_zones
1852 ):
1853 # check if all the names of NFV AV match VIM AV names
1854 match_by_index = False
1855 for av in availability_zone_list:
1856 if av not in vim_availability_zones:
1857 match_by_index = True
1858 break
1859
1860 if match_by_index:
1861 return vim_availability_zones[availability_zone_index]
1862 else:
1863 return availability_zone_list[availability_zone_index]
1864 else:
1865 raise vimconn.VimConnConflictException(
1866 "No enough availability zones at VIM for this deployment"
1867 )
1868
1869 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1870 """Fill up the security_groups in the port_dict.
1871
1872 Args:
1873 net (dict): Network details
1874 port_dict (dict): Port details
1875
1876 """
1877 if (
1878 self.config.get("security_groups")
1879 and net.get("port_security") is not False
1880 and not self.config.get("no_port_security_extension")
1881 ):
1882 if not self.security_groups_id:
1883 self._get_ids_from_name()
1884
1885 port_dict["security_groups"] = self.security_groups_id
1886
1887 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1888 """Fill up the network binding depending on network type in the port_dict.
1889
1890 Args:
1891 net (dict): Network details
1892 port_dict (dict): Port details
1893
1894 """
1895 if not net.get("type"):
1896 raise vimconn.VimConnException("Type is missing in the network details.")
1897
1898 if net["type"] == "virtual":
1899 pass
1900
1901 # For VF
1902 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1903 port_dict["binding:vnic_type"] = "direct"
1904
1905 # VIO specific Changes
1906 if self.vim_type == "VIO":
1907 # Need to create port with port_security_enabled = False and no-security-groups
1908 port_dict["port_security_enabled"] = False
1909 port_dict["provider_security_groups"] = []
1910 port_dict["security_groups"] = []
1911
1912 else:
1913 # For PT PCI-PASSTHROUGH
1914 port_dict["binding:vnic_type"] = "direct-physical"
1915
1916 @staticmethod
1917 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1918 """Set the "ip" parameter in net dictionary.
1919
1920 Args:
1921 new_port (dict): New created port
1922 net (dict): Network details
1923
1924 """
1925 fixed_ips = new_port["port"].get("fixed_ips")
1926
1927 if fixed_ips:
1928 net["ip"] = fixed_ips[0].get("ip_address")
1929 else:
1930 net["ip"] = None
1931
1932 @staticmethod
1933 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1934 """Fill up the mac_address and fixed_ips in port_dict.
1935
1936 Args:
1937 net (dict): Network details
1938 port_dict (dict): Port details
1939
1940 """
1941 if net.get("mac_address"):
1942 port_dict["mac_address"] = net["mac_address"]
1943
1944 if net.get("ip_address"):
1945 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1946 # TODO add "subnet_id": <subnet_id>
1947
1948 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1949 """Create new port using neutron.
1950
1951 Args:
1952 port_dict (dict): Port details
1953 created_items (dict): All created items
1954 net (dict): Network details
1955
1956 Returns:
1957 new_port (dict): New created port
1958
1959 """
1960 new_port = self.neutron.create_port({"port": port_dict})
1961 created_items["port:" + str(new_port["port"]["id"])] = True
1962 net["mac_adress"] = new_port["port"]["mac_address"]
1963 net["vim_id"] = new_port["port"]["id"]
1964
1965 return new_port
1966
1967 def _create_port(
1968 self, net: dict, name: str, created_items: dict
1969 ) -> Tuple[dict, dict]:
1970 """Create port using net details.
1971
1972 Args:
1973 net (dict): Network details
1974 name (str): Name to be used as network name if net dict does not include name
1975 created_items (dict): All created items
1976
1977 Returns:
1978 new_port, port New created port, port dictionary
1979
1980 """
1981
1982 port_dict = {
1983 "network_id": net["net_id"],
1984 "name": net.get("name"),
1985 "admin_state_up": True,
1986 }
1987
1988 if not port_dict["name"]:
1989 port_dict["name"] = name
1990
1991 self._prepare_port_dict_security_groups(net, port_dict)
1992
1993 self._prepare_port_dict_binding(net, port_dict)
1994
1995 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1996
1997 new_port = self._create_new_port(port_dict, created_items, net)
1998
1999 vimconnector._set_fixed_ip(new_port, net)
2000
2001 port = {"port-id": new_port["port"]["id"]}
2002
2003 if float(self.nova.api_version.get_string()) >= 2.32:
2004 port["tag"] = new_port["port"]["name"]
2005
2006 return new_port, port
2007
2008 def _prepare_network_for_vminstance(
2009 self,
2010 name: str,
2011 net_list: list,
2012 created_items: dict,
2013 net_list_vim: list,
2014 external_network: list,
2015 no_secured_ports: list,
2016 ) -> None:
2017 """Create port and fill up net dictionary for new VM instance creation.
2018
2019 Args:
2020 name (str): Name of network
2021 net_list (list): List of networks
2022 created_items (dict): All created items belongs to a VM
2023 net_list_vim (list): List of ports
2024 external_network (list): List of external-networks
2025 no_secured_ports (list): Port security disabled ports
2026 """
2027
2028 self._reload_connection()
2029
2030 for net in net_list:
2031 # Skip non-connected iface
2032 if not net.get("net_id"):
2033 continue
2034
2035 new_port, port = self._create_port(net, name, created_items)
2036
2037 net_list_vim.append(port)
2038
2039 if net.get("floating_ip", False):
2040 net["exit_on_floating_ip_error"] = True
2041 external_network.append(net)
2042
2043 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2044 net["exit_on_floating_ip_error"] = False
2045 external_network.append(net)
2046 net["floating_ip"] = self.config.get("use_floating_ip")
2047
2048 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2049 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2050 if net.get("port_security") is False and not self.config.get(
2051 "no_port_security_extension"
2052 ):
2053 no_secured_ports.append(
2054 (
2055 new_port["port"]["id"],
2056 net.get("port_security_disable_strategy"),
2057 )
2058 )
2059
2060 def _prepare_persistent_root_volumes(
2061 self,
2062 name: str,
2063 vm_av_zone: list,
2064 disk: dict,
2065 base_disk_index: int,
2066 block_device_mapping: dict,
2067 existing_vim_volumes: list,
2068 created_items: dict,
2069 ) -> Optional[str]:
2070 """Prepare persistent root volumes for new VM instance.
2071
2072 Args:
2073 name (str): Name of VM instance
2074 vm_av_zone (list): List of availability zones
2075 disk (dict): Disk details
2076 base_disk_index (int): Disk index
2077 block_device_mapping (dict): Block device details
2078 existing_vim_volumes (list): Existing disk details
2079 created_items (dict): All created items belongs to VM
2080
2081 Returns:
2082 boot_volume_id (str): ID of boot volume
2083
2084 """
2085 # Disk may include only vim_volume_id or only vim_id."
2086 # Use existing persistent root volume finding with volume_id or vim_id
2087 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2088
2089 if disk.get(key_id):
2090 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2091 existing_vim_volumes.append({"id": disk[key_id]})
2092
2093 else:
2094 # Create persistent root volume
2095 volume = self.cinder.volumes.create(
2096 size=disk["size"],
2097 name=name + "vd" + chr(base_disk_index),
2098 imageRef=disk["image_id"],
2099 # Make sure volume is in the same AZ as the VM to be attached to
2100 availability_zone=vm_av_zone,
2101 )
2102 boot_volume_id = volume.id
2103 self.update_block_device_mapping(
2104 volume=volume,
2105 block_device_mapping=block_device_mapping,
2106 base_disk_index=base_disk_index,
2107 disk=disk,
2108 created_items=created_items,
2109 )
2110
2111 return boot_volume_id
2112
2113 @staticmethod
2114 def update_block_device_mapping(
2115 volume: object,
2116 block_device_mapping: dict,
2117 base_disk_index: int,
2118 disk: dict,
2119 created_items: dict,
2120 ) -> None:
2121 """Add volume information to block device mapping dict.
2122 Args:
2123 volume (object): Created volume object
2124 block_device_mapping (dict): Block device details
2125 base_disk_index (int): Disk index
2126 disk (dict): Disk details
2127 created_items (dict): All created items belongs to VM
2128 """
2129 if not volume:
2130 raise vimconn.VimConnException("Volume is empty.")
2131
2132 if not hasattr(volume, "id"):
2133 raise vimconn.VimConnException(
2134 "Created volume is not valid, does not have id attribute."
2135 )
2136
2137 volume_txt = "volume:" + str(volume.id)
2138 if disk.get("keep"):
2139 volume_txt += ":keep"
2140 created_items[volume_txt] = True
2141 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2142
2143 def _prepare_non_root_persistent_volumes(
2144 self,
2145 name: str,
2146 disk: dict,
2147 vm_av_zone: list,
2148 block_device_mapping: dict,
2149 base_disk_index: int,
2150 existing_vim_volumes: list,
2151 created_items: dict,
2152 ) -> None:
2153 """Prepare persistent volumes for new VM instance.
2154
2155 Args:
2156 name (str): Name of VM instance
2157 disk (dict): Disk details
2158 vm_av_zone (list): List of availability zones
2159 block_device_mapping (dict): Block device details
2160 base_disk_index (int): Disk index
2161 existing_vim_volumes (list): Existing disk details
2162 created_items (dict): All created items belongs to VM
2163 """
2164 # Non-root persistent volumes
2165 # Disk may include only vim_volume_id or only vim_id."
2166 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2167
2168 if disk.get(key_id):
2169 # Use existing persistent volume
2170 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2171 existing_vim_volumes.append({"id": disk[key_id]})
2172
2173 else:
2174 # Create persistent volume
2175 volume = self.cinder.volumes.create(
2176 size=disk["size"],
2177 name=name + "vd" + chr(base_disk_index),
2178 # Make sure volume is in the same AZ as the VM to be attached to
2179 availability_zone=vm_av_zone,
2180 )
2181 self.update_block_device_mapping(
2182 volume=volume,
2183 block_device_mapping=block_device_mapping,
2184 base_disk_index=base_disk_index,
2185 disk=disk,
2186 created_items=created_items,
2187 )
2188
2189 def _wait_for_created_volumes_availability(
2190 self, elapsed_time: int, created_items: dict
2191 ) -> Optional[int]:
2192 """Wait till created volumes become available.
2193
2194 Args:
2195 elapsed_time (int): Passed time while waiting
2196 created_items (dict): All created items belongs to VM
2197
2198 Returns:
2199 elapsed_time (int): Time spent while waiting
2200
2201 """
2202
2203 while elapsed_time < volume_timeout:
2204 for created_item in created_items:
2205 v, volume_id = (
2206 created_item.split(":")[0],
2207 created_item.split(":")[1],
2208 )
2209 if v == "volume":
2210 if self.cinder.volumes.get(volume_id).status != "available":
2211 break
2212 else:
2213 # All ready: break from while
2214 break
2215
2216 time.sleep(5)
2217 elapsed_time += 5
2218
2219 return elapsed_time
2220
2221 def _wait_for_existing_volumes_availability(
2222 self, elapsed_time: int, existing_vim_volumes: list
2223 ) -> Optional[int]:
2224 """Wait till existing volumes become available.
2225
2226 Args:
2227 elapsed_time (int): Passed time while waiting
2228 existing_vim_volumes (list): Existing volume details
2229
2230 Returns:
2231 elapsed_time (int): Time spent while waiting
2232
2233 """
2234
2235 while elapsed_time < volume_timeout:
2236 for volume in existing_vim_volumes:
2237 if self.cinder.volumes.get(volume["id"]).status != "available":
2238 break
2239 else: # all ready: break from while
2240 break
2241
2242 time.sleep(5)
2243 elapsed_time += 5
2244
2245 return elapsed_time
2246
2247 def _prepare_disk_for_vminstance(
2248 self,
2249 name: str,
2250 existing_vim_volumes: list,
2251 created_items: dict,
2252 vm_av_zone: list,
2253 block_device_mapping: dict,
2254 disk_list: list = None,
2255 ) -> None:
2256 """Prepare all volumes for new VM instance.
2257
2258 Args:
2259 name (str): Name of Instance
2260 existing_vim_volumes (list): List of existing volumes
2261 created_items (dict): All created items belongs to VM
2262 vm_av_zone (list): VM availability zone
2263 block_device_mapping (dict): Block devices to be attached to VM
2264 disk_list (list): List of disks
2265
2266 """
2267 # Create additional volumes in case these are present in disk_list
2268 base_disk_index = ord("b")
2269 boot_volume_id = None
2270 elapsed_time = 0
2271
2272 for disk in disk_list:
2273 if "image_id" in disk:
2274 # Root persistent volume
2275 base_disk_index = ord("a")
2276 boot_volume_id = self._prepare_persistent_root_volumes(
2277 name=name,
2278 vm_av_zone=vm_av_zone,
2279 disk=disk,
2280 base_disk_index=base_disk_index,
2281 block_device_mapping=block_device_mapping,
2282 existing_vim_volumes=existing_vim_volumes,
2283 created_items=created_items,
2284 )
2285 else:
2286 # Non-root persistent volume
2287 self._prepare_non_root_persistent_volumes(
2288 name=name,
2289 disk=disk,
2290 vm_av_zone=vm_av_zone,
2291 block_device_mapping=block_device_mapping,
2292 base_disk_index=base_disk_index,
2293 existing_vim_volumes=existing_vim_volumes,
2294 created_items=created_items,
2295 )
2296 base_disk_index += 1
2297
2298 # Wait until created volumes are with status available
2299 elapsed_time = self._wait_for_created_volumes_availability(
2300 elapsed_time, created_items
2301 )
2302 # Wait until existing volumes in vim are with status available
2303 elapsed_time = self._wait_for_existing_volumes_availability(
2304 elapsed_time, existing_vim_volumes
2305 )
2306 # If we exceeded the timeout rollback
2307 if elapsed_time >= volume_timeout:
2308 raise vimconn.VimConnException(
2309 "Timeout creating volumes for instance " + name,
2310 http_code=vimconn.HTTP_Request_Timeout,
2311 )
2312 if boot_volume_id:
2313 self.cinder.volumes.set_bootable(boot_volume_id, True)
2314
2315 def _find_the_external_network_for_floating_ip(self):
2316 """Get the external network ip in order to create floating IP.
2317
2318 Returns:
2319 pool_id (str): External network pool ID
2320
2321 """
2322
2323 # Find the external network
2324 external_nets = list()
2325
2326 for net in self.neutron.list_networks()["networks"]:
2327 if net["router:external"]:
2328 external_nets.append(net)
2329
2330 if len(external_nets) == 0:
2331 raise vimconn.VimConnException(
2332 "Cannot create floating_ip automatically since "
2333 "no external network is present",
2334 http_code=vimconn.HTTP_Conflict,
2335 )
2336
2337 if len(external_nets) > 1:
2338 raise vimconn.VimConnException(
2339 "Cannot create floating_ip automatically since "
2340 "multiple external networks are present",
2341 http_code=vimconn.HTTP_Conflict,
2342 )
2343
2344 # Pool ID
2345 return external_nets[0].get("id")
2346
2347 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2348 """Trigger neutron to create a new floating IP using external network ID.
2349
2350 Args:
2351 param (dict): Input parameters to create a floating IP
2352 created_items (dict): All created items belongs to new VM instance
2353
2354 Raises:
2355
2356 VimConnException
2357 """
2358 try:
2359 self.logger.debug("Creating floating IP")
2360 new_floating_ip = self.neutron.create_floatingip(param)
2361 free_floating_ip = new_floating_ip["floatingip"]["id"]
2362 created_items["floating_ip:" + str(free_floating_ip)] = True
2363
2364 except Exception as e:
2365 raise vimconn.VimConnException(
2366 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2367 http_code=vimconn.HTTP_Conflict,
2368 )
2369
2370 def _create_floating_ip(
2371 self, floating_network: dict, server: object, created_items: dict
2372 ) -> None:
2373 """Get the available Pool ID and create a new floating IP.
2374
2375 Args:
2376 floating_network (dict): Dict including external network ID
2377 server (object): Server object
2378 created_items (dict): All created items belongs to new VM instance
2379
2380 """
2381
2382 # Pool_id is available
2383 if (
2384 isinstance(floating_network["floating_ip"], str)
2385 and floating_network["floating_ip"].lower() != "true"
2386 ):
2387 pool_id = floating_network["floating_ip"]
2388
2389 # Find the Pool_id
2390 else:
2391 pool_id = self._find_the_external_network_for_floating_ip()
2392
2393 param = {
2394 "floatingip": {
2395 "floating_network_id": pool_id,
2396 "tenant_id": server.tenant_id,
2397 }
2398 }
2399
2400 self._neutron_create_float_ip(param, created_items)
2401
2402 def _find_floating_ip(
2403 self,
2404 server: object,
2405 floating_ips: list,
2406 floating_network: dict,
2407 ) -> Optional[str]:
2408 """Find the available free floating IPs if there are.
2409
2410 Args:
2411 server (object): Server object
2412 floating_ips (list): List of floating IPs
2413 floating_network (dict): Details of floating network such as ID
2414
2415 Returns:
2416 free_floating_ip (str): Free floating ip address
2417
2418 """
2419 for fip in floating_ips:
2420 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2421 continue
2422
2423 if isinstance(floating_network["floating_ip"], str):
2424 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2425 continue
2426
2427 return fip["id"]
2428
2429 def _assign_floating_ip(
2430 self, free_floating_ip: str, floating_network: dict
2431 ) -> Dict:
2432 """Assign the free floating ip address to port.
2433
2434 Args:
2435 free_floating_ip (str): Floating IP to be assigned
2436 floating_network (dict): ID of floating network
2437
2438 Returns:
2439 fip (dict) (dict): Floating ip details
2440
2441 """
2442 # The vim_id key contains the neutron.port_id
2443 self.neutron.update_floatingip(
2444 free_floating_ip,
2445 {"floatingip": {"port_id": floating_network["vim_id"]}},
2446 )
2447 # For race condition ensure not re-assigned to other VM after 5 seconds
2448 time.sleep(5)
2449
2450 return self.neutron.show_floatingip(free_floating_ip)
2451
2452 def _get_free_floating_ip(
2453 self, server: object, floating_network: dict
2454 ) -> Optional[str]:
2455 """Get the free floating IP address.
2456
2457 Args:
2458 server (object): Server Object
2459 floating_network (dict): Floating network details
2460
2461 Returns:
2462 free_floating_ip (str): Free floating ip addr
2463
2464 """
2465
2466 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2467
2468 # Randomize
2469 random.shuffle(floating_ips)
2470
2471 return self._find_floating_ip(server, floating_ips, floating_network)
2472
2473 def _prepare_external_network_for_vminstance(
2474 self,
2475 external_network: list,
2476 server: object,
2477 created_items: dict,
2478 vm_start_time: float,
2479 ) -> None:
2480 """Assign floating IP address for VM instance.
2481
2482 Args:
2483 external_network (list): ID of External network
2484 server (object): Server Object
2485 created_items (dict): All created items belongs to new VM instance
2486 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2487
2488 Raises:
2489 VimConnException
2490
2491 """
2492 for floating_network in external_network:
2493 try:
2494 assigned = False
2495 floating_ip_retries = 3
2496 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2497 # several times
2498 while not assigned:
2499 free_floating_ip = self._get_free_floating_ip(
2500 server, floating_network
2501 )
2502
2503 if not free_floating_ip:
2504 self._create_floating_ip(
2505 floating_network, server, created_items
2506 )
2507
2508 try:
2509 # For race condition ensure not already assigned
2510 fip = self.neutron.show_floatingip(free_floating_ip)
2511
2512 if fip["floatingip"].get("port_id"):
2513 continue
2514
2515 # Assign floating ip
2516 fip = self._assign_floating_ip(
2517 free_floating_ip, floating_network
2518 )
2519
2520 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2521 self.logger.warning(
2522 "floating_ip {} re-assigned to other port".format(
2523 free_floating_ip
2524 )
2525 )
2526 continue
2527
2528 self.logger.debug(
2529 "Assigned floating_ip {} to VM {}".format(
2530 free_floating_ip, server.id
2531 )
2532 )
2533
2534 assigned = True
2535
2536 except Exception as e:
2537 # Openstack need some time after VM creation to assign an IP. So retry if fails
2538 vm_status = self.nova.servers.get(server.id).status
2539
2540 if vm_status not in ("ACTIVE", "ERROR"):
2541 if time.time() - vm_start_time < server_timeout:
2542 time.sleep(5)
2543 continue
2544 elif floating_ip_retries > 0:
2545 floating_ip_retries -= 1
2546 continue
2547
2548 raise vimconn.VimConnException(
2549 "Cannot create floating_ip: {} {}".format(
2550 type(e).__name__, e
2551 ),
2552 http_code=vimconn.HTTP_Conflict,
2553 )
2554
2555 except Exception as e:
2556 if not floating_network["exit_on_floating_ip_error"]:
2557 self.logger.error("Cannot create floating_ip. %s", str(e))
2558 continue
2559
2560 raise
2561
2562 def _update_port_security_for_vminstance(
2563 self,
2564 no_secured_ports: list,
2565 server: object,
2566 ) -> None:
2567 """Updates the port security according to no_secured_ports list.
2568
2569 Args:
2570 no_secured_ports (list): List of ports that security will be disabled
2571 server (object): Server Object
2572
2573 Raises:
2574 VimConnException
2575
2576 """
2577 # Wait until the VM is active and then disable the port-security
2578 if no_secured_ports:
2579 self.__wait_for_vm(server.id, "ACTIVE")
2580
2581 for port in no_secured_ports:
2582 port_update = {
2583 "port": {"port_security_enabled": False, "security_groups": None}
2584 }
2585
2586 if port[1] == "allow-address-pairs":
2587 port_update = {
2588 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2589 }
2590
2591 try:
2592 self.neutron.update_port(port[0], port_update)
2593
2594 except Exception:
2595 raise vimconn.VimConnException(
2596 "It was not possible to disable port security for port {}".format(
2597 port[0]
2598 )
2599 )
2600
2601 def new_vminstance(
2602 self,
2603 name: str,
2604 description: str,
2605 start: bool,
2606 image_id: str,
2607 flavor_id: str,
2608 affinity_group_list: list,
2609 net_list: list,
2610 cloud_config=None,
2611 disk_list=None,
2612 availability_zone_index=None,
2613 availability_zone_list=None,
2614 ) -> tuple:
2615 """Adds a VM instance to VIM.
2616
2617 Args:
2618 name (str): name of VM
2619 description (str): description
2620 start (bool): indicates if VM must start or boot in pause mode. Ignored
2621 image_id (str) image uuid
2622 flavor_id (str) flavor uuid
2623 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2624 net_list (list): list of interfaces, each one is a dictionary with:
2625 name: name of network
2626 net_id: network uuid to connect
2627 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2628 model: interface model, ignored #TODO
2629 mac_address: used for SR-IOV ifaces #TODO for other types
2630 use: 'data', 'bridge', 'mgmt'
2631 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2632 vim_id: filled/added by this function
2633 floating_ip: True/False (or it can be None)
2634 port_security: True/False
2635 cloud_config (dict): (optional) dictionary with:
2636 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2637 users: (optional) list of users to be inserted, each item is a dict with:
2638 name: (mandatory) user name,
2639 key-pairs: (optional) list of strings with the public key to be inserted to the user
2640 user-data: (optional) string is a text script to be passed directly to cloud-init
2641 config-files: (optional). List of files to be transferred. Each item is a dict with:
2642 dest: (mandatory) string with the destination absolute path
2643 encoding: (optional, by default text). Can be one of:
2644 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2645 content : (mandatory) string with the content of the file
2646 permissions: (optional) string with file permissions, typically octal notation '0644'
2647 owner: (optional) file owner, string with the format 'owner:group'
2648 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2649 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2650 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2651 size: (mandatory) string with the size of the disk in GB
2652 vim_id: (optional) should use this existing volume id
2653 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2654 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2655 availability_zone_index is None
2656 #TODO ip, security groups
2657
2658 Returns:
2659 A tuple with the instance identifier and created_items or raises an exception on error
2660 created_items can be None or a dictionary where this method can include key-values that will be passed to
2661 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2662 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2663 as not present.
2664
2665 """
2666 self.logger.debug(
2667 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2668 image_id,
2669 flavor_id,
2670 str(net_list),
2671 )
2672
2673 try:
2674 server = None
2675 created_items = {}
2676 net_list_vim = []
2677 # list of external networks to be connected to instance, later on used to create floating_ip
2678 external_network = []
2679 # List of ports with port-security disabled
2680 no_secured_ports = []
2681 block_device_mapping = {}
2682 existing_vim_volumes = []
2683 server_group_id = None
2684 scheduller_hints = {}
2685
2686 # Check the Openstack Connection
2687 self._reload_connection()
2688
2689 # Prepare network list
2690 self._prepare_network_for_vminstance(
2691 name=name,
2692 net_list=net_list,
2693 created_items=created_items,
2694 net_list_vim=net_list_vim,
2695 external_network=external_network,
2696 no_secured_ports=no_secured_ports,
2697 )
2698
2699 # Cloud config
2700 config_drive, userdata = self._create_user_data(cloud_config)
2701
2702 # Get availability Zone
2703 vm_av_zone = self._get_vm_availability_zone(
2704 availability_zone_index, availability_zone_list
2705 )
2706
2707 if disk_list:
2708 # Prepare disks
2709 self._prepare_disk_for_vminstance(
2710 name=name,
2711 existing_vim_volumes=existing_vim_volumes,
2712 created_items=created_items,
2713 vm_av_zone=vm_av_zone,
2714 block_device_mapping=block_device_mapping,
2715 disk_list=disk_list,
2716 )
2717
2718 if affinity_group_list:
2719 # Only first id on the list will be used. Openstack restriction
2720 server_group_id = affinity_group_list[0]["affinity_group_id"]
2721 scheduller_hints["group"] = server_group_id
2722
2723 self.logger.debug(
2724 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2725 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2726 "block_device_mapping={}, server_group={})".format(
2727 name,
2728 image_id,
2729 flavor_id,
2730 net_list_vim,
2731 self.config.get("security_groups"),
2732 vm_av_zone,
2733 self.config.get("keypair"),
2734 userdata,
2735 config_drive,
2736 block_device_mapping,
2737 server_group_id,
2738 )
2739 )
2740
2741 # Create VM
2742 server = self.nova.servers.create(
2743 name=name,
2744 image=image_id,
2745 flavor=flavor_id,
2746 nics=net_list_vim,
2747 security_groups=self.config.get("security_groups"),
2748 # TODO remove security_groups in future versions. Already at neutron port
2749 availability_zone=vm_av_zone,
2750 key_name=self.config.get("keypair"),
2751 userdata=userdata,
2752 config_drive=config_drive,
2753 block_device_mapping=block_device_mapping,
2754 scheduler_hints=scheduller_hints,
2755 )
2756
2757 vm_start_time = time.time()
2758
2759 self._update_port_security_for_vminstance(no_secured_ports, server)
2760
2761 self._prepare_external_network_for_vminstance(
2762 external_network=external_network,
2763 server=server,
2764 created_items=created_items,
2765 vm_start_time=vm_start_time,
2766 )
2767
2768 return server.id, created_items
2769
2770 except Exception as e:
2771 server_id = None
2772 if server:
2773 server_id = server.id
2774
2775 try:
2776 created_items = self.remove_keep_tag_from_persistent_volumes(
2777 created_items
2778 )
2779
2780 self.delete_vminstance(server_id, created_items)
2781
2782 except Exception as e2:
2783 self.logger.error("new_vminstance rollback fail {}".format(e2))
2784
2785 self._format_exception(e)
2786
2787 @staticmethod
2788 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2789 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2790
2791 Args:
2792 created_items (dict): All created items belongs to VM
2793
2794 Returns:
2795 updated_created_items (dict): Dict which does not include keep flag for volumes.
2796
2797 """
2798 return {
2799 key.replace(":keep", ""): value for (key, value) in created_items.items()
2800 }
2801
2802 def get_vminstance(self, vm_id):
2803 """Returns the VM instance information from VIM"""
2804 # self.logger.debug("Getting VM from VIM")
2805 try:
2806 self._reload_connection()
2807 server = self.nova.servers.find(id=vm_id)
2808 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2809
2810 return server.to_dict()
2811 except (
2812 ksExceptions.ClientException,
2813 nvExceptions.ClientException,
2814 nvExceptions.NotFound,
2815 ConnectionError,
2816 ) as e:
2817 self._format_exception(e)
2818
2819 def get_vminstance_console(self, vm_id, console_type="vnc"):
2820 """
2821 Get a console for the virtual machine
2822 Params:
2823 vm_id: uuid of the VM
2824 console_type, can be:
2825 "novnc" (by default), "xvpvnc" for VNC types,
2826 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2827 Returns dict with the console parameters:
2828 protocol: ssh, ftp, http, https, ...
2829 server: usually ip address
2830 port: the http, ssh, ... port
2831 suffix: extra text, e.g. the http path and query string
2832 """
2833 self.logger.debug("Getting VM CONSOLE from VIM")
2834
2835 try:
2836 self._reload_connection()
2837 server = self.nova.servers.find(id=vm_id)
2838
2839 if console_type is None or console_type == "novnc":
2840 console_dict = server.get_vnc_console("novnc")
2841 elif console_type == "xvpvnc":
2842 console_dict = server.get_vnc_console(console_type)
2843 elif console_type == "rdp-html5":
2844 console_dict = server.get_rdp_console(console_type)
2845 elif console_type == "spice-html5":
2846 console_dict = server.get_spice_console(console_type)
2847 else:
2848 raise vimconn.VimConnException(
2849 "console type '{}' not allowed".format(console_type),
2850 http_code=vimconn.HTTP_Bad_Request,
2851 )
2852
2853 console_dict1 = console_dict.get("console")
2854
2855 if console_dict1:
2856 console_url = console_dict1.get("url")
2857
2858 if console_url:
2859 # parse console_url
2860 protocol_index = console_url.find("//")
2861 suffix_index = (
2862 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2863 )
2864 port_index = (
2865 console_url[protocol_index + 2 : suffix_index].find(":")
2866 + protocol_index
2867 + 2
2868 )
2869
2870 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2871 return (
2872 -vimconn.HTTP_Internal_Server_Error,
2873 "Unexpected response from VIM",
2874 )
2875
2876 console_dict = {
2877 "protocol": console_url[0:protocol_index],
2878 "server": console_url[protocol_index + 2 : port_index],
2879 "port": console_url[port_index:suffix_index],
2880 "suffix": console_url[suffix_index + 1 :],
2881 }
2882 protocol_index += 2
2883
2884 return console_dict
2885 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2886 except (
2887 nvExceptions.NotFound,
2888 ksExceptions.ClientException,
2889 nvExceptions.ClientException,
2890 nvExceptions.BadRequest,
2891 ConnectionError,
2892 ) as e:
2893 self._format_exception(e)
2894
2895 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2896 """Neutron delete ports by id.
2897 Args:
2898 k_id (str): Port id in the VIM
2899 """
2900 try:
2901 port_dict = self.neutron.list_ports()
2902 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2903
2904 if k_id in existing_ports:
2905 self.neutron.delete_port(k_id)
2906
2907 except Exception as e:
2908 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2909
2910 def _delete_volumes_by_id_wth_cinder(
2911 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2912 ) -> bool:
2913 """Cinder delete volume by id.
2914 Args:
2915 k (str): Full item name in created_items
2916 k_id (str): ID of floating ip in VIM
2917 volumes_to_hold (list): Volumes not to delete
2918 created_items (dict): All created items belongs to VM
2919 """
2920 try:
2921 if k_id in volumes_to_hold:
2922 return
2923
2924 if self.cinder.volumes.get(k_id).status != "available":
2925 return True
2926
2927 else:
2928 self.cinder.volumes.delete(k_id)
2929 created_items[k] = None
2930
2931 except Exception as e:
2932 self.logger.error(
2933 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2934 )
2935
2936 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2937 """Neutron delete floating ip by id.
2938 Args:
2939 k (str): Full item name in created_items
2940 k_id (str): ID of floating ip in VIM
2941 created_items (dict): All created items belongs to VM
2942 """
2943 try:
2944 self.neutron.delete_floatingip(k_id)
2945 created_items[k] = None
2946
2947 except Exception as e:
2948 self.logger.error(
2949 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2950 )
2951
2952 @staticmethod
2953 def _get_item_name_id(k: str) -> Tuple[str, str]:
2954 k_item, _, k_id = k.partition(":")
2955 return k_item, k_id
2956
2957 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2958 """Delete VM ports attached to the networks before deleting virtual machine.
2959 Args:
2960 created_items (dict): All created items belongs to VM
2961 """
2962
2963 for k, v in created_items.items():
2964 if not v: # skip already deleted
2965 continue
2966
2967 try:
2968 k_item, k_id = self._get_item_name_id(k)
2969 if k_item == "port":
2970 self._delete_ports_by_id_wth_neutron(k_id)
2971
2972 except Exception as e:
2973 self.logger.error(
2974 "Error deleting port: {}: {}".format(type(e).__name__, e)
2975 )
2976
2977 def _delete_created_items(
2978 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2979 ) -> bool:
2980 """Delete Volumes and floating ip if they exist in created_items."""
2981 for k, v in created_items.items():
2982 if not v: # skip already deleted
2983 continue
2984
2985 try:
2986 k_item, k_id = self._get_item_name_id(k)
2987
2988 if k_item == "volume":
2989 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2990 k, k_id, volumes_to_hold, created_items
2991 )
2992
2993 if unavailable_vol:
2994 keep_waiting = True
2995
2996 elif k_item == "floating_ip":
2997 self._delete_floating_ip_by_id(k, k_id, created_items)
2998
2999 except Exception as e:
3000 self.logger.error("Error deleting {}: {}".format(k, e))
3001
3002 return keep_waiting
3003
3004 @staticmethod
3005 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3006 """Remove the volumes which has key flag from created_items
3007
3008 Args:
3009 created_items (dict): All created items belongs to VM
3010
3011 Returns:
3012 created_items (dict): Persistent volumes eliminated created_items
3013 """
3014 return {
3015 key: value
3016 for (key, value) in created_items.items()
3017 if len(key.split(":")) == 2
3018 }
3019
3020 def delete_vminstance(
3021 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3022 ) -> None:
3023 """Removes a VM instance from VIM. Returns the old identifier.
3024 Args:
3025 vm_id (str): Identifier of VM instance
3026 created_items (dict): All created items belongs to VM
3027 volumes_to_hold (list): Volumes_to_hold
3028 """
3029 if created_items is None:
3030 created_items = {}
3031 if volumes_to_hold is None:
3032 volumes_to_hold = []
3033
3034 try:
3035 created_items = self._extract_items_wth_keep_flag_from_created_items(
3036 created_items
3037 )
3038
3039 self._reload_connection()
3040
3041 # Delete VM ports attached to the networks before the virtual machine
3042 if created_items:
3043 self._delete_vm_ports_attached_to_network(created_items)
3044
3045 if vm_id:
3046 self.nova.servers.delete(vm_id)
3047
3048 # Although having detached, volumes should have in active status before deleting.
3049 # We ensure in this loop
3050 keep_waiting = True
3051 elapsed_time = 0
3052
3053 while keep_waiting and elapsed_time < volume_timeout:
3054 keep_waiting = False
3055
3056 # Delete volumes and floating IP.
3057 keep_waiting = self._delete_created_items(
3058 created_items, volumes_to_hold, keep_waiting
3059 )
3060
3061 if keep_waiting:
3062 time.sleep(1)
3063 elapsed_time += 1
3064
3065 except (
3066 nvExceptions.NotFound,
3067 ksExceptions.ClientException,
3068 nvExceptions.ClientException,
3069 ConnectionError,
3070 ) as e:
3071 self._format_exception(e)
3072
3073 def refresh_vms_status(self, vm_list):
3074 """Get the status of the virtual machines and their interfaces/ports
3075 Params: the list of VM identifiers
3076 Returns a dictionary with:
3077 vm_id: #VIM id of this Virtual Machine
3078 status: #Mandatory. Text with one of:
3079 # DELETED (not found at vim)
3080 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3081 # OTHER (Vim reported other status not understood)
3082 # ERROR (VIM indicates an ERROR status)
3083 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3084 # CREATING (on building process), ERROR
3085 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3086 #
3087 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3088 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3089 interfaces:
3090 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3091 mac_address: #Text format XX:XX:XX:XX:XX:XX
3092 vim_net_id: #network id where this interface is connected
3093 vim_interface_id: #interface/port VIM id
3094 ip_address: #null, or text with IPv4, IPv6 address
3095 compute_node: #identification of compute node where PF,VF interface is allocated
3096 pci: #PCI address of the NIC that hosts the PF,VF
3097 vlan: #physical VLAN used for VF
3098 """
3099 vm_dict = {}
3100 self.logger.debug(
3101 "refresh_vms status: Getting tenant VM instance information from VIM"
3102 )
3103
3104 for vm_id in vm_list:
3105 vm = {}
3106
3107 try:
3108 vm_vim = self.get_vminstance(vm_id)
3109
3110 if vm_vim["status"] in vmStatus2manoFormat:
3111 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3112 else:
3113 vm["status"] = "OTHER"
3114 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3115
3116 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3117 vm_vim.pop("user_data", None)
3118 vm["vim_info"] = self.serialize(vm_vim)
3119
3120 vm["interfaces"] = []
3121 if vm_vim.get("fault"):
3122 vm["error_msg"] = str(vm_vim["fault"])
3123
3124 # get interfaces
3125 try:
3126 self._reload_connection()
3127 port_dict = self.neutron.list_ports(device_id=vm_id)
3128
3129 for port in port_dict["ports"]:
3130 interface = {}
3131 interface["vim_info"] = self.serialize(port)
3132 interface["mac_address"] = port.get("mac_address")
3133 interface["vim_net_id"] = port["network_id"]
3134 interface["vim_interface_id"] = port["id"]
3135 # check if OS-EXT-SRV-ATTR:host is there,
3136 # in case of non-admin credentials, it will be missing
3137
3138 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3139 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3140
3141 interface["pci"] = None
3142
3143 # check if binding:profile is there,
3144 # in case of non-admin credentials, it will be missing
3145 if port.get("binding:profile"):
3146 if port["binding:profile"].get("pci_slot"):
3147 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3148 # the slot to 0x00
3149 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3150 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3151 pci = port["binding:profile"]["pci_slot"]
3152 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3153 interface["pci"] = pci
3154
3155 interface["vlan"] = None
3156
3157 if port.get("binding:vif_details"):
3158 interface["vlan"] = port["binding:vif_details"].get("vlan")
3159
3160 # Get vlan from network in case not present in port for those old openstacks and cases where
3161 # it is needed vlan at PT
3162 if not interface["vlan"]:
3163 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3164 network = self.neutron.show_network(port["network_id"])
3165
3166 if (
3167 network["network"].get("provider:network_type")
3168 == "vlan"
3169 ):
3170 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3171 interface["vlan"] = network["network"].get(
3172 "provider:segmentation_id"
3173 )
3174
3175 ips = []
3176 # look for floating ip address
3177 try:
3178 floating_ip_dict = self.neutron.list_floatingips(
3179 port_id=port["id"]
3180 )
3181
3182 if floating_ip_dict.get("floatingips"):
3183 ips.append(
3184 floating_ip_dict["floatingips"][0].get(
3185 "floating_ip_address"
3186 )
3187 )
3188 except Exception:
3189 pass
3190
3191 for subnet in port["fixed_ips"]:
3192 ips.append(subnet["ip_address"])
3193
3194 interface["ip_address"] = ";".join(ips)
3195 vm["interfaces"].append(interface)
3196 except Exception as e:
3197 self.logger.error(
3198 "Error getting vm interface information {}: {}".format(
3199 type(e).__name__, e
3200 ),
3201 exc_info=True,
3202 )
3203 except vimconn.VimConnNotFoundException as e:
3204 self.logger.error("Exception getting vm status: %s", str(e))
3205 vm["status"] = "DELETED"
3206 vm["error_msg"] = str(e)
3207 except vimconn.VimConnException as e:
3208 self.logger.error("Exception getting vm status: %s", str(e))
3209 vm["status"] = "VIM_ERROR"
3210 vm["error_msg"] = str(e)
3211
3212 vm_dict[vm_id] = vm
3213
3214 return vm_dict
3215
3216 def action_vminstance(self, vm_id, action_dict, created_items={}):
3217 """Send and action over a VM instance from VIM
3218 Returns None or the console dict if the action was successfully sent to the VIM
3219 """
3220 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3221
3222 try:
3223 self._reload_connection()
3224 server = self.nova.servers.find(id=vm_id)
3225
3226 if "start" in action_dict:
3227 if action_dict["start"] == "rebuild":
3228 server.rebuild()
3229 else:
3230 if server.status == "PAUSED":
3231 server.unpause()
3232 elif server.status == "SUSPENDED":
3233 server.resume()
3234 elif server.status == "SHUTOFF":
3235 server.start()
3236 else:
3237 self.logger.debug(
3238 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3239 )
3240 raise vimconn.VimConnException(
3241 "Cannot 'start' instance while it is in active state",
3242 http_code=vimconn.HTTP_Bad_Request,
3243 )
3244
3245 elif "pause" in action_dict:
3246 server.pause()
3247 elif "resume" in action_dict:
3248 server.resume()
3249 elif "shutoff" in action_dict or "shutdown" in action_dict:
3250 self.logger.debug("server status %s", server.status)
3251 if server.status == "ACTIVE":
3252 server.stop()
3253 else:
3254 self.logger.debug("ERROR: VM is not in Active state")
3255 raise vimconn.VimConnException(
3256 "VM is not in active state, stop operation is not allowed",
3257 http_code=vimconn.HTTP_Bad_Request,
3258 )
3259 elif "forceOff" in action_dict:
3260 server.stop() # TODO
3261 elif "terminate" in action_dict:
3262 server.delete()
3263 elif "createImage" in action_dict:
3264 server.create_image()
3265 # "path":path_schema,
3266 # "description":description_schema,
3267 # "name":name_schema,
3268 # "metadata":metadata_schema,
3269 # "imageRef": id_schema,
3270 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3271 elif "rebuild" in action_dict:
3272 server.rebuild(server.image["id"])
3273 elif "reboot" in action_dict:
3274 server.reboot() # reboot_type="SOFT"
3275 elif "console" in action_dict:
3276 console_type = action_dict["console"]
3277
3278 if console_type is None or console_type == "novnc":
3279 console_dict = server.get_vnc_console("novnc")
3280 elif console_type == "xvpvnc":
3281 console_dict = server.get_vnc_console(console_type)
3282 elif console_type == "rdp-html5":
3283 console_dict = server.get_rdp_console(console_type)
3284 elif console_type == "spice-html5":
3285 console_dict = server.get_spice_console(console_type)
3286 else:
3287 raise vimconn.VimConnException(
3288 "console type '{}' not allowed".format(console_type),
3289 http_code=vimconn.HTTP_Bad_Request,
3290 )
3291
3292 try:
3293 console_url = console_dict["console"]["url"]
3294 # parse console_url
3295 protocol_index = console_url.find("//")
3296 suffix_index = (
3297 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3298 )
3299 port_index = (
3300 console_url[protocol_index + 2 : suffix_index].find(":")
3301 + protocol_index
3302 + 2
3303 )
3304
3305 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3306 raise vimconn.VimConnException(
3307 "Unexpected response from VIM " + str(console_dict)
3308 )
3309
3310 console_dict2 = {
3311 "protocol": console_url[0:protocol_index],
3312 "server": console_url[protocol_index + 2 : port_index],
3313 "port": int(console_url[port_index + 1 : suffix_index]),
3314 "suffix": console_url[suffix_index + 1 :],
3315 }
3316
3317 return console_dict2
3318 except Exception:
3319 raise vimconn.VimConnException(
3320 "Unexpected response from VIM " + str(console_dict)
3321 )
3322
3323 return None
3324 except (
3325 ksExceptions.ClientException,
3326 nvExceptions.ClientException,
3327 nvExceptions.NotFound,
3328 ConnectionError,
3329 ) as e:
3330 self._format_exception(e)
3331 # TODO insert exception vimconn.HTTP_Unauthorized
3332
3333 # ###### VIO Specific Changes #########
3334 def _generate_vlanID(self):
3335 """
3336 Method to get unused vlanID
3337 Args:
3338 None
3339 Returns:
3340 vlanID
3341 """
3342 # Get used VLAN IDs
3343 usedVlanIDs = []
3344 networks = self.get_network_list()
3345
3346 for net in networks:
3347 if net.get("provider:segmentation_id"):
3348 usedVlanIDs.append(net.get("provider:segmentation_id"))
3349
3350 used_vlanIDs = set(usedVlanIDs)
3351
3352 # find unused VLAN ID
3353 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3354 try:
3355 start_vlanid, end_vlanid = map(
3356 int, vlanID_range.replace(" ", "").split("-")
3357 )
3358
3359 for vlanID in range(start_vlanid, end_vlanid + 1):
3360 if vlanID not in used_vlanIDs:
3361 return vlanID
3362 except Exception as exp:
3363 raise vimconn.VimConnException(
3364 "Exception {} occurred while generating VLAN ID.".format(exp)
3365 )
3366 else:
3367 raise vimconn.VimConnConflictException(
3368 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3369 self.config.get("dataplane_net_vlan_range")
3370 )
3371 )
3372
3373 def _generate_multisegment_vlanID(self):
3374 """
3375 Method to get unused vlanID
3376 Args:
3377 None
3378 Returns:
3379 vlanID
3380 """
3381 # Get used VLAN IDs
3382 usedVlanIDs = []
3383 networks = self.get_network_list()
3384 for net in networks:
3385 if net.get("provider:network_type") == "vlan" and net.get(
3386 "provider:segmentation_id"
3387 ):
3388 usedVlanIDs.append(net.get("provider:segmentation_id"))
3389 elif net.get("segments"):
3390 for segment in net.get("segments"):
3391 if segment.get("provider:network_type") == "vlan" and segment.get(
3392 "provider:segmentation_id"
3393 ):
3394 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3395
3396 used_vlanIDs = set(usedVlanIDs)
3397
3398 # find unused VLAN ID
3399 for vlanID_range in self.config.get("multisegment_vlan_range"):
3400 try:
3401 start_vlanid, end_vlanid = map(
3402 int, vlanID_range.replace(" ", "").split("-")
3403 )
3404
3405 for vlanID in range(start_vlanid, end_vlanid + 1):
3406 if vlanID not in used_vlanIDs:
3407 return vlanID
3408 except Exception as exp:
3409 raise vimconn.VimConnException(
3410 "Exception {} occurred while generating VLAN ID.".format(exp)
3411 )
3412 else:
3413 raise vimconn.VimConnConflictException(
3414 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3415 self.config.get("multisegment_vlan_range")
3416 )
3417 )
3418
3419 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3420 """
3421 Method to validate user given vlanID ranges
3422 Args: None
3423 Returns: None
3424 """
3425 for vlanID_range in input_vlan_range:
3426 vlan_range = vlanID_range.replace(" ", "")
3427 # validate format
3428 vlanID_pattern = r"(\d)*-(\d)*$"
3429 match_obj = re.match(vlanID_pattern, vlan_range)
3430 if not match_obj:
3431 raise vimconn.VimConnConflictException(
3432 "Invalid VLAN range for {}: {}.You must provide "
3433 "'{}' in format [start_ID - end_ID].".format(
3434 text_vlan_range, vlanID_range, text_vlan_range
3435 )
3436 )
3437
3438 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3439 if start_vlanid <= 0:
3440 raise vimconn.VimConnConflictException(
3441 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3442 "networks valid IDs are 1 to 4094 ".format(
3443 text_vlan_range, vlanID_range
3444 )
3445 )
3446
3447 if end_vlanid > 4094:
3448 raise vimconn.VimConnConflictException(
3449 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3450 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3451 text_vlan_range, vlanID_range
3452 )
3453 )
3454
3455 if start_vlanid > end_vlanid:
3456 raise vimconn.VimConnConflictException(
3457 "Invalid VLAN range for {}: {}. You must provide '{}'"
3458 " in format start_ID - end_ID and start_ID < end_ID ".format(
3459 text_vlan_range, vlanID_range, text_vlan_range
3460 )
3461 )
3462
3463 def get_hosts_info(self):
3464 """Get the information of deployed hosts
3465 Returns the hosts content"""
3466 if self.debug:
3467 print("osconnector: Getting Host info from VIM")
3468
3469 try:
3470 h_list = []
3471 self._reload_connection()
3472 hypervisors = self.nova.hypervisors.list()
3473
3474 for hype in hypervisors:
3475 h_list.append(hype.to_dict())
3476
3477 return 1, {"hosts": h_list}
3478 except nvExceptions.NotFound as e:
3479 error_value = -vimconn.HTTP_Not_Found
3480 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3481 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3482 error_value = -vimconn.HTTP_Bad_Request
3483 error_text = (
3484 type(e).__name__
3485 + ": "
3486 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3487 )
3488
3489 # TODO insert exception vimconn.HTTP_Unauthorized
3490 # if reaching here is because an exception
3491 self.logger.debug("get_hosts_info " + error_text)
3492
3493 return error_value, error_text
3494
3495 def get_hosts(self, vim_tenant):
3496 """Get the hosts and deployed instances
3497 Returns the hosts content"""
3498 r, hype_dict = self.get_hosts_info()
3499
3500 if r < 0:
3501 return r, hype_dict
3502
3503 hypervisors = hype_dict["hosts"]
3504
3505 try:
3506 servers = self.nova.servers.list()
3507 for hype in hypervisors:
3508 for server in servers:
3509 if (
3510 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3511 == hype["hypervisor_hostname"]
3512 ):
3513 if "vm" in hype:
3514 hype["vm"].append(server.id)
3515 else:
3516 hype["vm"] = [server.id]
3517
3518 return 1, hype_dict
3519 except nvExceptions.NotFound as e:
3520 error_value = -vimconn.HTTP_Not_Found
3521 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3522 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3523 error_value = -vimconn.HTTP_Bad_Request
3524 error_text = (
3525 type(e).__name__
3526 + ": "
3527 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3528 )
3529
3530 # TODO insert exception vimconn.HTTP_Unauthorized
3531 # if reaching here is because an exception
3532 self.logger.debug("get_hosts " + error_text)
3533
3534 return error_value, error_text
3535
3536 def new_affinity_group(self, affinity_group_data):
3537 """Adds a server group to VIM
3538 affinity_group_data contains a dictionary with information, keys:
3539 name: name in VIM for the server group
3540 type: affinity or anti-affinity
3541 scope: Only nfvi-node allowed
3542 Returns the server group identifier"""
3543 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3544
3545 try:
3546 name = affinity_group_data["name"]
3547 policy = affinity_group_data["type"]
3548
3549 self._reload_connection()
3550 new_server_group = self.nova.server_groups.create(name, policy)
3551
3552 return new_server_group.id
3553 except (
3554 ksExceptions.ClientException,
3555 nvExceptions.ClientException,
3556 ConnectionError,
3557 KeyError,
3558 ) as e:
3559 self._format_exception(e)
3560
3561 def get_affinity_group(self, affinity_group_id):
3562 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3563 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3564 try:
3565 self._reload_connection()
3566 server_group = self.nova.server_groups.find(id=affinity_group_id)
3567
3568 return server_group.to_dict()
3569 except (
3570 nvExceptions.NotFound,
3571 nvExceptions.ClientException,
3572 ksExceptions.ClientException,
3573 ConnectionError,
3574 ) as e:
3575 self._format_exception(e)
3576
3577 def delete_affinity_group(self, affinity_group_id):
3578 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3579 self.logger.debug("Getting server group '%s'", affinity_group_id)
3580 try:
3581 self._reload_connection()
3582 self.nova.server_groups.delete(affinity_group_id)
3583
3584 return affinity_group_id
3585 except (
3586 nvExceptions.NotFound,
3587 ksExceptions.ClientException,
3588 nvExceptions.ClientException,
3589 ConnectionError,
3590 ) as e:
3591 self._format_exception(e)
3592
3593 def get_vdu_state(self, vm_id):
3594 """
3595 Getting the state of a vdu
3596 param:
3597 vm_id: ID of an instance
3598 """
3599 self.logger.debug("Getting the status of VM")
3600 self.logger.debug("VIM VM ID %s", vm_id)
3601 self._reload_connection()
3602 server = self.nova.servers.find(id=vm_id)
3603 server_dict = server.to_dict()
3604 vdu_data = [
3605 server_dict["status"],
3606 server_dict["flavor"]["id"],
3607 server_dict["OS-EXT-SRV-ATTR:host"],
3608 server_dict["OS-EXT-AZ:availability_zone"],
3609 ]
3610 self.logger.debug("vdu_data %s", vdu_data)
3611 return vdu_data
3612
3613 def check_compute_availability(self, host, server_flavor_details):
3614 self._reload_connection()
3615 hypervisor_search = self.nova.hypervisors.search(
3616 hypervisor_match=host, servers=True
3617 )
3618 for hypervisor in hypervisor_search:
3619 hypervisor_id = hypervisor.to_dict()["id"]
3620 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3621 hypervisor_dict = hypervisor_details.to_dict()
3622 hypervisor_temp = json.dumps(hypervisor_dict)
3623 hypervisor_json = json.loads(hypervisor_temp)
3624 resources_available = [
3625 hypervisor_json["free_ram_mb"],
3626 hypervisor_json["disk_available_least"],
3627 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3628 ]
3629 compute_available = all(
3630 x > y for x, y in zip(resources_available, server_flavor_details)
3631 )
3632 if compute_available:
3633 return host
3634
3635 def check_availability_zone(
3636 self, old_az, server_flavor_details, old_host, host=None
3637 ):
3638 self._reload_connection()
3639 az_check = {"zone_check": False, "compute_availability": None}
3640 aggregates_list = self.nova.aggregates.list()
3641 for aggregate in aggregates_list:
3642 aggregate_details = aggregate.to_dict()
3643 aggregate_temp = json.dumps(aggregate_details)
3644 aggregate_json = json.loads(aggregate_temp)
3645 if aggregate_json["availability_zone"] == old_az:
3646 hosts_list = aggregate_json["hosts"]
3647 if host is not None:
3648 if host in hosts_list:
3649 az_check["zone_check"] = True
3650 available_compute_id = self.check_compute_availability(
3651 host, server_flavor_details
3652 )
3653 if available_compute_id is not None:
3654 az_check["compute_availability"] = available_compute_id
3655 else:
3656 for check_host in hosts_list:
3657 if check_host != old_host:
3658 available_compute_id = self.check_compute_availability(
3659 check_host, server_flavor_details
3660 )
3661 if available_compute_id is not None:
3662 az_check["zone_check"] = True
3663 az_check["compute_availability"] = available_compute_id
3664 break
3665 else:
3666 az_check["zone_check"] = True
3667 return az_check
3668
3669 def migrate_instance(self, vm_id, compute_host=None):
3670 """
3671 Migrate a vdu
3672 param:
3673 vm_id: ID of an instance
3674 compute_host: Host to migrate the vdu to
3675 """
3676 self._reload_connection()
3677 vm_state = False
3678 instance_state = self.get_vdu_state(vm_id)
3679 server_flavor_id = instance_state[1]
3680 server_hypervisor_name = instance_state[2]
3681 server_availability_zone = instance_state[3]
3682 try:
3683 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3684 server_flavor_details = [
3685 server_flavor["ram"],
3686 server_flavor["disk"],
3687 server_flavor["vcpus"],
3688 ]
3689 if compute_host == server_hypervisor_name:
3690 raise vimconn.VimConnException(
3691 "Unable to migrate instance '{}' to the same host '{}'".format(
3692 vm_id, compute_host
3693 ),
3694 http_code=vimconn.HTTP_Bad_Request,
3695 )
3696 az_status = self.check_availability_zone(
3697 server_availability_zone,
3698 server_flavor_details,
3699 server_hypervisor_name,
3700 compute_host,
3701 )
3702 availability_zone_check = az_status["zone_check"]
3703 available_compute_id = az_status.get("compute_availability")
3704
3705 if availability_zone_check is False:
3706 raise vimconn.VimConnException(
3707 "Unable to migrate instance '{}' to a different availability zone".format(
3708 vm_id
3709 ),
3710 http_code=vimconn.HTTP_Bad_Request,
3711 )
3712 if available_compute_id is not None:
3713 self.nova.servers.live_migrate(
3714 server=vm_id,
3715 host=available_compute_id,
3716 block_migration=True,
3717 disk_over_commit=False,
3718 )
3719 state = "MIGRATING"
3720 changed_compute_host = ""
3721 if state == "MIGRATING":
3722 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3723 changed_compute_host = self.get_vdu_state(vm_id)[2]
3724 if vm_state and changed_compute_host == available_compute_id:
3725 self.logger.debug(
3726 "Instance '{}' migrated to the new compute host '{}'".format(
3727 vm_id, changed_compute_host
3728 )
3729 )
3730 return state, available_compute_id
3731 else:
3732 raise vimconn.VimConnException(
3733 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3734 vm_id, available_compute_id
3735 ),
3736 http_code=vimconn.HTTP_Bad_Request,
3737 )
3738 else:
3739 raise vimconn.VimConnException(
3740 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3741 available_compute_id
3742 ),
3743 http_code=vimconn.HTTP_Bad_Request,
3744 )
3745 except (
3746 nvExceptions.BadRequest,
3747 nvExceptions.ClientException,
3748 nvExceptions.NotFound,
3749 ) as e:
3750 self._format_exception(e)
3751
3752 def resize_instance(self, vm_id, new_flavor_id):
3753 """
3754 For resizing the vm based on the given
3755 flavor details
3756 param:
3757 vm_id : ID of an instance
3758 new_flavor_id : Flavor id to be resized
3759 Return the status of a resized instance
3760 """
3761 self._reload_connection()
3762 self.logger.debug("resize the flavor of an instance")
3763 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3764 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3765 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3766 try:
3767 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3768 if old_flavor_disk > new_flavor_disk:
3769 raise nvExceptions.BadRequest(
3770 400,
3771 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3772 )
3773 else:
3774 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3775 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3776 if vm_state:
3777 instance_resized_status = self.confirm_resize(vm_id)
3778 return instance_resized_status
3779 else:
3780 raise nvExceptions.BadRequest(
3781 409,
3782 message="Cannot 'resize' vm_state is in ERROR",
3783 )
3784
3785 else:
3786 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3787 raise nvExceptions.BadRequest(
3788 409,
3789 message="Cannot 'resize' instance while it is in vm_state resized",
3790 )
3791 except (
3792 nvExceptions.BadRequest,
3793 nvExceptions.ClientException,
3794 nvExceptions.NotFound,
3795 ) as e:
3796 self._format_exception(e)
3797
3798 def confirm_resize(self, vm_id):
3799 """
3800 Confirm the resize of an instance
3801 param:
3802 vm_id: ID of an instance
3803 """
3804 self._reload_connection()
3805 self.nova.servers.confirm_resize(server=vm_id)
3806 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3807 self.__wait_for_vm(vm_id, "ACTIVE")
3808 instance_status = self.get_vdu_state(vm_id)[0]
3809 return instance_status