3539cc5b3d29d0129484c493b6d46b7472173e5d
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 threads (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads = numa["paired-threads"] * 2
1298 extra_specs["hw:cpu_thread_policy"] = "require"
1299 extra_specs["hw:cpu_policy"] = "dedicated"
1300 return threads
1301
1302 @staticmethod
1303 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304 """Fill up extra_specs if numa has cores.
1305 Args:
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1308
1309 Returns:
1310 cores (int) Number of virtual cpus
1311
1312 """
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa.get("cores"):
1316 return
1317 cores = numa["cores"]
1318 extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs["hw:cpu_policy"] = "dedicated"
1320 return cores
1321
1322 @staticmethod
1323 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324 """Fill up extra_specs if numa has threads.
1325 Args:
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1328
1329 Returns:
1330 threads (int) Number of virtual cpus
1331
1332 """
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa.get("threads"):
1335 return
1336 threads = numa["threads"]
1337 extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs["hw:cpu_policy"] = "dedicated"
1339 return threads
1340
1341 def _process_numa_parameters_of_flavor(
1342 self, numas: List, extra_specs: Dict
1343 ) -> None:
1344 """Process numa parameters and fill up extra_specs.
1345
1346 Args:
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1349
1350 """
1351 numa_nodes = len(numas)
1352 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 cpu_cores, cpu_threads = 0, 0
1354
1355 if self.vim_type == "VIO":
1356 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1357
1358 for numa in numas:
1359 if "id" in numa:
1360 node_id = numa["id"]
1361 # overwrite ram and vcpus
1362 # check if key "memory" is present in numa else use ram value at flavor
1363 self.process_numa_memory(numa, node_id, extra_specs)
1364 self.process_numa_vcpu(numa, node_id, extra_specs)
1365
1366 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1367 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1368
1369 if "paired-threads" in numa:
1370 threads = self.process_numa_paired_threads(numa, extra_specs)
1371 cpu_threads += threads
1372
1373 elif "cores" in numa:
1374 cores = self.process_numa_cores(numa, extra_specs)
1375 cpu_cores += cores
1376
1377 elif "threads" in numa:
1378 threads = self.process_numa_threads(numa, extra_specs)
1379 cpu_threads += threads
1380
1381 if cpu_cores:
1382 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1383 if cpu_threads:
1384 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1385
1386 @staticmethod
1387 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1388 """According to number of numa nodes, updates the extra_specs for VIO.
1389
1390 Args:
1391
1392 numa_nodes (int): List keeps the numa node numbers
1393 extra_specs (dict): Extra specs dict to be updated
1394
1395 """
1396 # If there is not any numa, numas_nodes equals to 0.
1397 if not numa_nodes:
1398 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1399
1400 # If there are several numas, we do not define specific affinity.
1401 extra_specs["vmware:latency_sensitivity_level"] = "high"
1402
1403 def _change_flavor_name(
1404 self, name: str, name_suffix: int, flavor_data: dict
1405 ) -> str:
1406 """Change the flavor name if the name already exists.
1407
1408 Args:
1409 name (str): Flavor name to be checked
1410 name_suffix (int): Suffix to be appended to name
1411 flavor_data (dict): Flavor dict
1412
1413 Returns:
1414 name (str): New flavor name to be used
1415
1416 """
1417 # Get used names
1418 fl = self.nova.flavors.list()
1419 fl_names = [f.name for f in fl]
1420
1421 while name in fl_names:
1422 name_suffix += 1
1423 name = flavor_data["name"] + "-" + str(name_suffix)
1424
1425 return name
1426
1427 def _process_extended_config_of_flavor(
1428 self, extended: dict, extra_specs: dict
1429 ) -> None:
1430 """Process the extended dict to fill up extra_specs.
1431 Args:
1432
1433 extended (dict): Keeping the extra specification of flavor
1434 extra_specs (dict) Dict to be filled to be used during flavor creation
1435
1436 """
1437 quotas = {
1438 "cpu-quota": "cpu",
1439 "mem-quota": "memory",
1440 "vif-quota": "vif",
1441 "disk-io-quota": "disk_io",
1442 }
1443
1444 page_sizes = {
1445 "LARGE": "large",
1446 "SMALL": "small",
1447 "SIZE_2MB": "2MB",
1448 "SIZE_1GB": "1GB",
1449 "PREFER_LARGE": "any",
1450 }
1451
1452 policies = {
1453 "cpu-pinning-policy": "hw:cpu_policy",
1454 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1455 "mem-policy": "hw:numa_mempolicy",
1456 }
1457
1458 numas = extended.get("numas")
1459 if numas:
1460 self._process_numa_parameters_of_flavor(numas, extra_specs)
1461
1462 for quota, item in quotas.items():
1463 if quota in extended.keys():
1464 self.process_resource_quota(extended.get(quota), item, extra_specs)
1465
1466 # Set the mempage size as specified in the descriptor
1467 if extended.get("mempage-size"):
1468 if extended["mempage-size"] in page_sizes.keys():
1469 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1470 else:
1471 # Normally, validations in NBI should not allow to this condition.
1472 self.logger.debug(
1473 "Invalid mempage-size %s. Will be ignored",
1474 extended.get("mempage-size"),
1475 )
1476
1477 for policy, hw_policy in policies.items():
1478 if extended.get(policy):
1479 extra_specs[hw_policy] = extended[policy].lower()
1480
1481 @staticmethod
1482 def _get_flavor_details(flavor_data: dict) -> Tuple:
1483 """Returns the details of flavor
1484 Args:
1485 flavor_data (dict): Dictionary that includes required flavor details
1486
1487 Returns:
1488 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1489
1490 """
1491 return (
1492 flavor_data.get("ram", 64),
1493 flavor_data.get("vcpus", 1),
1494 {},
1495 flavor_data.get("extended"),
1496 )
1497
1498 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1499 """Adds a tenant flavor to openstack VIM.
1500 if change_name_if_used is True, it will change name in case of conflict,
1501 because it is not supported name repetition.
1502
1503 Args:
1504 flavor_data (dict): Flavor details to be processed
1505 change_name_if_used (bool): Change name in case of conflict
1506
1507 Returns:
1508 flavor_id (str): flavor identifier
1509
1510 """
1511 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1512 retry = 0
1513 max_retries = 3
1514 name_suffix = 0
1515
1516 try:
1517 name = flavor_data["name"]
1518 while retry < max_retries:
1519 retry += 1
1520 try:
1521 self._reload_connection()
1522
1523 if change_name_if_used:
1524 name = self._change_flavor_name(name, name_suffix, flavor_data)
1525
1526 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1527 flavor_data
1528 )
1529 if extended:
1530 self._process_extended_config_of_flavor(extended, extra_specs)
1531
1532 # Create flavor
1533
1534 new_flavor = self.nova.flavors.create(
1535 name=name,
1536 ram=ram,
1537 vcpus=vcpus,
1538 disk=flavor_data.get("disk", 0),
1539 ephemeral=flavor_data.get("ephemeral", 0),
1540 swap=flavor_data.get("swap", 0),
1541 is_public=flavor_data.get("is_public", True),
1542 )
1543
1544 # Add metadata
1545 if extra_specs:
1546 new_flavor.set_keys(extra_specs)
1547
1548 return new_flavor.id
1549
1550 except nvExceptions.Conflict as e:
1551
1552 if change_name_if_used and retry < max_retries:
1553 continue
1554
1555 self._format_exception(e)
1556
1557 except (
1558 ksExceptions.ClientException,
1559 nvExceptions.ClientException,
1560 ConnectionError,
1561 KeyError,
1562 ) as e:
1563 self._format_exception(e)
1564
1565 def delete_flavor(self, flavor_id):
1566 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1567 try:
1568 self._reload_connection()
1569 self.nova.flavors.delete(flavor_id)
1570
1571 return flavor_id
1572 # except nvExceptions.BadRequest as e:
1573 except (
1574 nvExceptions.NotFound,
1575 ksExceptions.ClientException,
1576 nvExceptions.ClientException,
1577 ConnectionError,
1578 ) as e:
1579 self._format_exception(e)
1580
1581 def new_image(self, image_dict):
1582 """
1583 Adds a tenant image to VIM. imge_dict is a dictionary with:
1584 name: name
1585 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1586 location: path or URI
1587 public: "yes" or "no"
1588 metadata: metadata of the image
1589 Returns the image_id
1590 """
1591 retry = 0
1592 max_retries = 3
1593
1594 while retry < max_retries:
1595 retry += 1
1596 try:
1597 self._reload_connection()
1598
1599 # determine format http://docs.openstack.org/developer/glance/formats.html
1600 if "disk_format" in image_dict:
1601 disk_format = image_dict["disk_format"]
1602 else: # autodiscover based on extension
1603 if image_dict["location"].endswith(".qcow2"):
1604 disk_format = "qcow2"
1605 elif image_dict["location"].endswith(".vhd"):
1606 disk_format = "vhd"
1607 elif image_dict["location"].endswith(".vmdk"):
1608 disk_format = "vmdk"
1609 elif image_dict["location"].endswith(".vdi"):
1610 disk_format = "vdi"
1611 elif image_dict["location"].endswith(".iso"):
1612 disk_format = "iso"
1613 elif image_dict["location"].endswith(".aki"):
1614 disk_format = "aki"
1615 elif image_dict["location"].endswith(".ari"):
1616 disk_format = "ari"
1617 elif image_dict["location"].endswith(".ami"):
1618 disk_format = "ami"
1619 else:
1620 disk_format = "raw"
1621
1622 self.logger.debug(
1623 "new_image: '%s' loading from '%s'",
1624 image_dict["name"],
1625 image_dict["location"],
1626 )
1627 if self.vim_type == "VIO":
1628 container_format = "bare"
1629 if "container_format" in image_dict:
1630 container_format = image_dict["container_format"]
1631
1632 new_image = self.glance.images.create(
1633 name=image_dict["name"],
1634 container_format=container_format,
1635 disk_format=disk_format,
1636 )
1637 else:
1638 new_image = self.glance.images.create(name=image_dict["name"])
1639
1640 if image_dict["location"].startswith("http"):
1641 # TODO there is not a method to direct download. It must be downloaded locally with requests
1642 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1643 else: # local path
1644 with open(image_dict["location"]) as fimage:
1645 self.glance.images.upload(new_image.id, fimage)
1646 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1647 # image_dict.get("public","yes")=="yes",
1648 # container_format="bare", data=fimage, disk_format=disk_format)
1649
1650 metadata_to_load = image_dict.get("metadata")
1651
1652 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1653 # for openstack
1654 if self.vim_type == "VIO":
1655 metadata_to_load["upload_location"] = image_dict["location"]
1656 else:
1657 metadata_to_load["location"] = image_dict["location"]
1658
1659 self.glance.images.update(new_image.id, **metadata_to_load)
1660
1661 return new_image.id
1662 except (
1663 nvExceptions.Conflict,
1664 ksExceptions.ClientException,
1665 nvExceptions.ClientException,
1666 ) as e:
1667 self._format_exception(e)
1668 except (
1669 HTTPException,
1670 gl1Exceptions.HTTPException,
1671 gl1Exceptions.CommunicationError,
1672 ConnectionError,
1673 ) as e:
1674 if retry == max_retries:
1675 continue
1676
1677 self._format_exception(e)
1678 except IOError as e: # can not open the file
1679 raise vimconn.VimConnConnectionException(
1680 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1681 http_code=vimconn.HTTP_Bad_Request,
1682 )
1683
1684 def delete_image(self, image_id):
1685 """Deletes a tenant image from openstack VIM. Returns the old id"""
1686 try:
1687 self._reload_connection()
1688 self.glance.images.delete(image_id)
1689
1690 return image_id
1691 except (
1692 nvExceptions.NotFound,
1693 ksExceptions.ClientException,
1694 nvExceptions.ClientException,
1695 gl1Exceptions.CommunicationError,
1696 gl1Exceptions.HTTPNotFound,
1697 ConnectionError,
1698 ) as e: # TODO remove
1699 self._format_exception(e)
1700
1701 def get_image_id_from_path(self, path):
1702 """Get the image id from image path in the VIM database. Returns the image_id"""
1703 try:
1704 self._reload_connection()
1705 images = self.glance.images.list()
1706
1707 for image in images:
1708 if image.metadata.get("location") == path:
1709 return image.id
1710
1711 raise vimconn.VimConnNotFoundException(
1712 "image with location '{}' not found".format(path)
1713 )
1714 except (
1715 ksExceptions.ClientException,
1716 nvExceptions.ClientException,
1717 gl1Exceptions.CommunicationError,
1718 ConnectionError,
1719 ) as e:
1720 self._format_exception(e)
1721
1722 def get_image_list(self, filter_dict={}):
1723 """Obtain tenant images from VIM
1724 Filter_dict can be:
1725 id: image id
1726 name: image name
1727 checksum: image checksum
1728 Returns the image list of dictionaries:
1729 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1730 List can be empty
1731 """
1732 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1733
1734 try:
1735 self._reload_connection()
1736 # filter_dict_os = filter_dict.copy()
1737 # First we filter by the available filter fields: name, id. The others are removed.
1738 image_list = self.glance.images.list()
1739 filtered_list = []
1740
1741 for image in image_list:
1742 try:
1743 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1744 continue
1745
1746 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1747 continue
1748
1749 if (
1750 filter_dict.get("checksum")
1751 and image["checksum"] != filter_dict["checksum"]
1752 ):
1753 continue
1754
1755 filtered_list.append(image.copy())
1756 except gl1Exceptions.HTTPNotFound:
1757 pass
1758
1759 return filtered_list
1760 except (
1761 ksExceptions.ClientException,
1762 nvExceptions.ClientException,
1763 gl1Exceptions.CommunicationError,
1764 ConnectionError,
1765 ) as e:
1766 self._format_exception(e)
1767
1768 def __wait_for_vm(self, vm_id, status):
1769 """wait until vm is in the desired status and return True.
1770 If the VM gets in ERROR status, return false.
1771 If the timeout is reached generate an exception"""
1772 elapsed_time = 0
1773 while elapsed_time < server_timeout:
1774 vm_status = self.nova.servers.get(vm_id).status
1775
1776 if vm_status == status:
1777 return True
1778
1779 if vm_status == "ERROR":
1780 return False
1781
1782 time.sleep(5)
1783 elapsed_time += 5
1784
1785 # if we exceeded the timeout rollback
1786 if elapsed_time >= server_timeout:
1787 raise vimconn.VimConnException(
1788 "Timeout waiting for instance " + vm_id + " to get " + status,
1789 http_code=vimconn.HTTP_Request_Timeout,
1790 )
1791
1792 def _get_openstack_availablity_zones(self):
1793 """
1794 Get from openstack availability zones available
1795 :return:
1796 """
1797 try:
1798 openstack_availability_zone = self.nova.availability_zones.list()
1799 openstack_availability_zone = [
1800 str(zone.zoneName)
1801 for zone in openstack_availability_zone
1802 if zone.zoneName != "internal"
1803 ]
1804
1805 return openstack_availability_zone
1806 except Exception:
1807 return None
1808
1809 def _set_availablity_zones(self):
1810 """
1811 Set vim availablity zone
1812 :return:
1813 """
1814 if "availability_zone" in self.config:
1815 vim_availability_zones = self.config.get("availability_zone")
1816
1817 if isinstance(vim_availability_zones, str):
1818 self.availability_zone = [vim_availability_zones]
1819 elif isinstance(vim_availability_zones, list):
1820 self.availability_zone = vim_availability_zones
1821 else:
1822 self.availability_zone = self._get_openstack_availablity_zones()
1823
1824 def _get_vm_availability_zone(
1825 self, availability_zone_index, availability_zone_list
1826 ):
1827 """
1828 Return thge availability zone to be used by the created VM.
1829 :return: The VIM availability zone to be used or None
1830 """
1831 if availability_zone_index is None:
1832 if not self.config.get("availability_zone"):
1833 return None
1834 elif isinstance(self.config.get("availability_zone"), str):
1835 return self.config["availability_zone"]
1836 else:
1837 # TODO consider using a different parameter at config for default AV and AV list match
1838 return self.config["availability_zone"][0]
1839
1840 vim_availability_zones = self.availability_zone
1841 # check if VIM offer enough availability zones describe in the VNFD
1842 if vim_availability_zones and len(availability_zone_list) <= len(
1843 vim_availability_zones
1844 ):
1845 # check if all the names of NFV AV match VIM AV names
1846 match_by_index = False
1847 for av in availability_zone_list:
1848 if av not in vim_availability_zones:
1849 match_by_index = True
1850 break
1851
1852 if match_by_index:
1853 return vim_availability_zones[availability_zone_index]
1854 else:
1855 return availability_zone_list[availability_zone_index]
1856 else:
1857 raise vimconn.VimConnConflictException(
1858 "No enough availability zones at VIM for this deployment"
1859 )
1860
1861 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1862 """Fill up the security_groups in the port_dict.
1863
1864 Args:
1865 net (dict): Network details
1866 port_dict (dict): Port details
1867
1868 """
1869 if (
1870 self.config.get("security_groups")
1871 and net.get("port_security") is not False
1872 and not self.config.get("no_port_security_extension")
1873 ):
1874 if not self.security_groups_id:
1875 self._get_ids_from_name()
1876
1877 port_dict["security_groups"] = self.security_groups_id
1878
1879 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1880 """Fill up the network binding depending on network type in the port_dict.
1881
1882 Args:
1883 net (dict): Network details
1884 port_dict (dict): Port details
1885
1886 """
1887 if not net.get("type"):
1888 raise vimconn.VimConnException("Type is missing in the network details.")
1889
1890 if net["type"] == "virtual":
1891 pass
1892
1893 # For VF
1894 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1895
1896 port_dict["binding:vnic_type"] = "direct"
1897
1898 # VIO specific Changes
1899 if self.vim_type == "VIO":
1900 # Need to create port with port_security_enabled = False and no-security-groups
1901 port_dict["port_security_enabled"] = False
1902 port_dict["provider_security_groups"] = []
1903 port_dict["security_groups"] = []
1904
1905 else:
1906 # For PT PCI-PASSTHROUGH
1907 port_dict["binding:vnic_type"] = "direct-physical"
1908
1909 @staticmethod
1910 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1911 """Set the "ip" parameter in net dictionary.
1912
1913 Args:
1914 new_port (dict): New created port
1915 net (dict): Network details
1916
1917 """
1918 fixed_ips = new_port["port"].get("fixed_ips")
1919
1920 if fixed_ips:
1921 net["ip"] = fixed_ips[0].get("ip_address")
1922 else:
1923 net["ip"] = None
1924
1925 @staticmethod
1926 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1927 """Fill up the mac_address and fixed_ips in port_dict.
1928
1929 Args:
1930 net (dict): Network details
1931 port_dict (dict): Port details
1932
1933 """
1934 if net.get("mac_address"):
1935 port_dict["mac_address"] = net["mac_address"]
1936
1937 if net.get("ip_address"):
1938 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1939 # TODO add "subnet_id": <subnet_id>
1940
1941 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1942 """Create new port using neutron.
1943
1944 Args:
1945 port_dict (dict): Port details
1946 created_items (dict): All created items
1947 net (dict): Network details
1948
1949 Returns:
1950 new_port (dict): New created port
1951
1952 """
1953 new_port = self.neutron.create_port({"port": port_dict})
1954 created_items["port:" + str(new_port["port"]["id"])] = True
1955 net["mac_adress"] = new_port["port"]["mac_address"]
1956 net["vim_id"] = new_port["port"]["id"]
1957
1958 return new_port
1959
1960 def _create_port(
1961 self, net: dict, name: str, created_items: dict
1962 ) -> Tuple[dict, dict]:
1963 """Create port using net details.
1964
1965 Args:
1966 net (dict): Network details
1967 name (str): Name to be used as network name if net dict does not include name
1968 created_items (dict): All created items
1969
1970 Returns:
1971 new_port, port New created port, port dictionary
1972
1973 """
1974
1975 port_dict = {
1976 "network_id": net["net_id"],
1977 "name": net.get("name"),
1978 "admin_state_up": True,
1979 }
1980
1981 if not port_dict["name"]:
1982 port_dict["name"] = name
1983
1984 self._prepare_port_dict_security_groups(net, port_dict)
1985
1986 self._prepare_port_dict_binding(net, port_dict)
1987
1988 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1989
1990 new_port = self._create_new_port(port_dict, created_items, net)
1991
1992 vimconnector._set_fixed_ip(new_port, net)
1993
1994 port = {"port-id": new_port["port"]["id"]}
1995
1996 if float(self.nova.api_version.get_string()) >= 2.32:
1997 port["tag"] = new_port["port"]["name"]
1998
1999 return new_port, port
2000
2001 def _prepare_network_for_vminstance(
2002 self,
2003 name: str,
2004 net_list: list,
2005 created_items: dict,
2006 net_list_vim: list,
2007 external_network: list,
2008 no_secured_ports: list,
2009 ) -> None:
2010 """Create port and fill up net dictionary for new VM instance creation.
2011
2012 Args:
2013 name (str): Name of network
2014 net_list (list): List of networks
2015 created_items (dict): All created items belongs to a VM
2016 net_list_vim (list): List of ports
2017 external_network (list): List of external-networks
2018 no_secured_ports (list): Port security disabled ports
2019 """
2020
2021 self._reload_connection()
2022
2023 for net in net_list:
2024 # Skip non-connected iface
2025 if not net.get("net_id"):
2026 continue
2027
2028 new_port, port = self._create_port(net, name, created_items)
2029
2030 net_list_vim.append(port)
2031
2032 if net.get("floating_ip", False):
2033 net["exit_on_floating_ip_error"] = True
2034 external_network.append(net)
2035
2036 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2037 net["exit_on_floating_ip_error"] = False
2038 external_network.append(net)
2039 net["floating_ip"] = self.config.get("use_floating_ip")
2040
2041 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2042 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2043 if net.get("port_security") is False and not self.config.get(
2044 "no_port_security_extension"
2045 ):
2046 no_secured_ports.append(
2047 (
2048 new_port["port"]["id"],
2049 net.get("port_security_disable_strategy"),
2050 )
2051 )
2052
2053 def _prepare_persistent_root_volumes(
2054 self,
2055 name: str,
2056 vm_av_zone: list,
2057 disk: dict,
2058 base_disk_index: int,
2059 block_device_mapping: dict,
2060 existing_vim_volumes: list,
2061 created_items: dict,
2062 ) -> Optional[str]:
2063 """Prepare persistent root volumes for new VM instance.
2064
2065 Args:
2066 name (str): Name of VM instance
2067 vm_av_zone (list): List of availability zones
2068 disk (dict): Disk details
2069 base_disk_index (int): Disk index
2070 block_device_mapping (dict): Block device details
2071 existing_vim_volumes (list): Existing disk details
2072 created_items (dict): All created items belongs to VM
2073
2074 Returns:
2075 boot_volume_id (str): ID of boot volume
2076
2077 """
2078 # Disk may include only vim_volume_id or only vim_id."
2079 # Use existing persistent root volume finding with volume_id or vim_id
2080 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2081
2082 if disk.get(key_id):
2083
2084 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2085 existing_vim_volumes.append({"id": disk[key_id]})
2086
2087 else:
2088 # Create persistent root volume
2089 volume = self.cinder.volumes.create(
2090 size=disk["size"],
2091 name=name + "vd" + chr(base_disk_index),
2092 imageRef=disk["image_id"],
2093 # Make sure volume is in the same AZ as the VM to be attached to
2094 availability_zone=vm_av_zone,
2095 )
2096 boot_volume_id = volume.id
2097 self.update_block_device_mapping(
2098 volume=volume,
2099 block_device_mapping=block_device_mapping,
2100 base_disk_index=base_disk_index,
2101 disk=disk,
2102 created_items=created_items,
2103 )
2104
2105 return boot_volume_id
2106
2107 @staticmethod
2108 def update_block_device_mapping(
2109 volume: object,
2110 block_device_mapping: dict,
2111 base_disk_index: int,
2112 disk: dict,
2113 created_items: dict,
2114 ) -> None:
2115 """Add volume information to block device mapping dict.
2116 Args:
2117 volume (object): Created volume object
2118 block_device_mapping (dict): Block device details
2119 base_disk_index (int): Disk index
2120 disk (dict): Disk details
2121 created_items (dict): All created items belongs to VM
2122 """
2123 if not volume:
2124 raise vimconn.VimConnException("Volume is empty.")
2125
2126 if not hasattr(volume, "id"):
2127 raise vimconn.VimConnException(
2128 "Created volume is not valid, does not have id attribute."
2129 )
2130
2131 volume_txt = "volume:" + str(volume.id)
2132 if disk.get("keep"):
2133 volume_txt += ":keep"
2134 created_items[volume_txt] = True
2135 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2136
2137 def _prepare_non_root_persistent_volumes(
2138 self,
2139 name: str,
2140 disk: dict,
2141 vm_av_zone: list,
2142 block_device_mapping: dict,
2143 base_disk_index: int,
2144 existing_vim_volumes: list,
2145 created_items: dict,
2146 ) -> None:
2147 """Prepare persistent volumes for new VM instance.
2148
2149 Args:
2150 name (str): Name of VM instance
2151 disk (dict): Disk details
2152 vm_av_zone (list): List of availability zones
2153 block_device_mapping (dict): Block device details
2154 base_disk_index (int): Disk index
2155 existing_vim_volumes (list): Existing disk details
2156 created_items (dict): All created items belongs to VM
2157 """
2158 # Non-root persistent volumes
2159 # Disk may include only vim_volume_id or only vim_id."
2160 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2161
2162 if disk.get(key_id):
2163
2164 # Use existing persistent volume
2165 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2166 existing_vim_volumes.append({"id": disk[key_id]})
2167
2168 else:
2169 # Create persistent volume
2170 volume = self.cinder.volumes.create(
2171 size=disk["size"],
2172 name=name + "vd" + chr(base_disk_index),
2173 # Make sure volume is in the same AZ as the VM to be attached to
2174 availability_zone=vm_av_zone,
2175 )
2176 self.update_block_device_mapping(
2177 volume=volume,
2178 block_device_mapping=block_device_mapping,
2179 base_disk_index=base_disk_index,
2180 disk=disk,
2181 created_items=created_items,
2182 )
2183
2184 def _wait_for_created_volumes_availability(
2185 self, elapsed_time: int, created_items: dict
2186 ) -> Optional[int]:
2187 """Wait till created volumes become available.
2188
2189 Args:
2190 elapsed_time (int): Passed time while waiting
2191 created_items (dict): All created items belongs to VM
2192
2193 Returns:
2194 elapsed_time (int): Time spent while waiting
2195
2196 """
2197
2198 while elapsed_time < volume_timeout:
2199 for created_item in created_items:
2200 v, volume_id = (
2201 created_item.split(":")[0],
2202 created_item.split(":")[1],
2203 )
2204 if v == "volume":
2205 if self.cinder.volumes.get(volume_id).status != "available":
2206 break
2207 else:
2208 # All ready: break from while
2209 break
2210
2211 time.sleep(5)
2212 elapsed_time += 5
2213
2214 return elapsed_time
2215
2216 def _wait_for_existing_volumes_availability(
2217 self, elapsed_time: int, existing_vim_volumes: list
2218 ) -> Optional[int]:
2219 """Wait till existing volumes become available.
2220
2221 Args:
2222 elapsed_time (int): Passed time while waiting
2223 existing_vim_volumes (list): Existing volume details
2224
2225 Returns:
2226 elapsed_time (int): Time spent while waiting
2227
2228 """
2229
2230 while elapsed_time < volume_timeout:
2231 for volume in existing_vim_volumes:
2232 if self.cinder.volumes.get(volume["id"]).status != "available":
2233 break
2234 else: # all ready: break from while
2235 break
2236
2237 time.sleep(5)
2238 elapsed_time += 5
2239
2240 return elapsed_time
2241
2242 def _prepare_disk_for_vminstance(
2243 self,
2244 name: str,
2245 existing_vim_volumes: list,
2246 created_items: dict,
2247 vm_av_zone: list,
2248 block_device_mapping: dict,
2249 disk_list: list = None,
2250 ) -> None:
2251 """Prepare all volumes for new VM instance.
2252
2253 Args:
2254 name (str): Name of Instance
2255 existing_vim_volumes (list): List of existing volumes
2256 created_items (dict): All created items belongs to VM
2257 vm_av_zone (list): VM availability zone
2258 block_device_mapping (dict): Block devices to be attached to VM
2259 disk_list (list): List of disks
2260
2261 """
2262 # Create additional volumes in case these are present in disk_list
2263 base_disk_index = ord("b")
2264 boot_volume_id = None
2265 elapsed_time = 0
2266
2267 for disk in disk_list:
2268 if "image_id" in disk:
2269 # Root persistent volume
2270 base_disk_index = ord("a")
2271 boot_volume_id = self._prepare_persistent_root_volumes(
2272 name=name,
2273 vm_av_zone=vm_av_zone,
2274 disk=disk,
2275 base_disk_index=base_disk_index,
2276 block_device_mapping=block_device_mapping,
2277 existing_vim_volumes=existing_vim_volumes,
2278 created_items=created_items,
2279 )
2280 else:
2281 # Non-root persistent volume
2282 self._prepare_non_root_persistent_volumes(
2283 name=name,
2284 disk=disk,
2285 vm_av_zone=vm_av_zone,
2286 block_device_mapping=block_device_mapping,
2287 base_disk_index=base_disk_index,
2288 existing_vim_volumes=existing_vim_volumes,
2289 created_items=created_items,
2290 )
2291 base_disk_index += 1
2292
2293 # Wait until created volumes are with status available
2294 elapsed_time = self._wait_for_created_volumes_availability(
2295 elapsed_time, created_items
2296 )
2297 # Wait until existing volumes in vim are with status available
2298 elapsed_time = self._wait_for_existing_volumes_availability(
2299 elapsed_time, existing_vim_volumes
2300 )
2301 # If we exceeded the timeout rollback
2302 if elapsed_time >= volume_timeout:
2303 raise vimconn.VimConnException(
2304 "Timeout creating volumes for instance " + name,
2305 http_code=vimconn.HTTP_Request_Timeout,
2306 )
2307 if boot_volume_id:
2308 self.cinder.volumes.set_bootable(boot_volume_id, True)
2309
2310 def _find_the_external_network_for_floating_ip(self):
2311 """Get the external network ip in order to create floating IP.
2312
2313 Returns:
2314 pool_id (str): External network pool ID
2315
2316 """
2317
2318 # Find the external network
2319 external_nets = list()
2320
2321 for net in self.neutron.list_networks()["networks"]:
2322 if net["router:external"]:
2323 external_nets.append(net)
2324
2325 if len(external_nets) == 0:
2326 raise vimconn.VimConnException(
2327 "Cannot create floating_ip automatically since "
2328 "no external network is present",
2329 http_code=vimconn.HTTP_Conflict,
2330 )
2331
2332 if len(external_nets) > 1:
2333 raise vimconn.VimConnException(
2334 "Cannot create floating_ip automatically since "
2335 "multiple external networks are present",
2336 http_code=vimconn.HTTP_Conflict,
2337 )
2338
2339 # Pool ID
2340 return external_nets[0].get("id")
2341
2342 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2343 """Trigger neutron to create a new floating IP using external network ID.
2344
2345 Args:
2346 param (dict): Input parameters to create a floating IP
2347 created_items (dict): All created items belongs to new VM instance
2348
2349 Raises:
2350
2351 VimConnException
2352 """
2353 try:
2354 self.logger.debug("Creating floating IP")
2355 new_floating_ip = self.neutron.create_floatingip(param)
2356 free_floating_ip = new_floating_ip["floatingip"]["id"]
2357 created_items["floating_ip:" + str(free_floating_ip)] = True
2358
2359 except Exception as e:
2360 raise vimconn.VimConnException(
2361 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2362 http_code=vimconn.HTTP_Conflict,
2363 )
2364
2365 def _create_floating_ip(
2366 self, floating_network: dict, server: object, created_items: dict
2367 ) -> None:
2368 """Get the available Pool ID and create a new floating IP.
2369
2370 Args:
2371 floating_network (dict): Dict including external network ID
2372 server (object): Server object
2373 created_items (dict): All created items belongs to new VM instance
2374
2375 """
2376
2377 # Pool_id is available
2378 if (
2379 isinstance(floating_network["floating_ip"], str)
2380 and floating_network["floating_ip"].lower() != "true"
2381 ):
2382 pool_id = floating_network["floating_ip"]
2383
2384 # Find the Pool_id
2385 else:
2386 pool_id = self._find_the_external_network_for_floating_ip()
2387
2388 param = {
2389 "floatingip": {
2390 "floating_network_id": pool_id,
2391 "tenant_id": server.tenant_id,
2392 }
2393 }
2394
2395 self._neutron_create_float_ip(param, created_items)
2396
2397 def _find_floating_ip(
2398 self,
2399 server: object,
2400 floating_ips: list,
2401 floating_network: dict,
2402 ) -> Optional[str]:
2403 """Find the available free floating IPs if there are.
2404
2405 Args:
2406 server (object): Server object
2407 floating_ips (list): List of floating IPs
2408 floating_network (dict): Details of floating network such as ID
2409
2410 Returns:
2411 free_floating_ip (str): Free floating ip address
2412
2413 """
2414 for fip in floating_ips:
2415 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2416 continue
2417
2418 if isinstance(floating_network["floating_ip"], str):
2419 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2420 continue
2421
2422 return fip["id"]
2423
2424 def _assign_floating_ip(
2425 self, free_floating_ip: str, floating_network: dict
2426 ) -> Dict:
2427 """Assign the free floating ip address to port.
2428
2429 Args:
2430 free_floating_ip (str): Floating IP to be assigned
2431 floating_network (dict): ID of floating network
2432
2433 Returns:
2434 fip (dict) (dict): Floating ip details
2435
2436 """
2437 # The vim_id key contains the neutron.port_id
2438 self.neutron.update_floatingip(
2439 free_floating_ip,
2440 {"floatingip": {"port_id": floating_network["vim_id"]}},
2441 )
2442 # For race condition ensure not re-assigned to other VM after 5 seconds
2443 time.sleep(5)
2444
2445 return self.neutron.show_floatingip(free_floating_ip)
2446
2447 def _get_free_floating_ip(
2448 self, server: object, floating_network: dict
2449 ) -> Optional[str]:
2450 """Get the free floating IP address.
2451
2452 Args:
2453 server (object): Server Object
2454 floating_network (dict): Floating network details
2455
2456 Returns:
2457 free_floating_ip (str): Free floating ip addr
2458
2459 """
2460
2461 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2462
2463 # Randomize
2464 random.shuffle(floating_ips)
2465
2466 return self._find_floating_ip(server, floating_ips, floating_network)
2467
2468 def _prepare_external_network_for_vminstance(
2469 self,
2470 external_network: list,
2471 server: object,
2472 created_items: dict,
2473 vm_start_time: float,
2474 ) -> None:
2475 """Assign floating IP address for VM instance.
2476
2477 Args:
2478 external_network (list): ID of External network
2479 server (object): Server Object
2480 created_items (dict): All created items belongs to new VM instance
2481 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2482
2483 Raises:
2484 VimConnException
2485
2486 """
2487 for floating_network in external_network:
2488 try:
2489 assigned = False
2490 floating_ip_retries = 3
2491 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2492 # several times
2493 while not assigned:
2494
2495 free_floating_ip = self._get_free_floating_ip(
2496 server, floating_network
2497 )
2498
2499 if not free_floating_ip:
2500 self._create_floating_ip(
2501 floating_network, server, created_items
2502 )
2503
2504 try:
2505 # For race condition ensure not already assigned
2506 fip = self.neutron.show_floatingip(free_floating_ip)
2507
2508 if fip["floatingip"].get("port_id"):
2509 continue
2510
2511 # Assign floating ip
2512 fip = self._assign_floating_ip(
2513 free_floating_ip, floating_network
2514 )
2515
2516 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2517 self.logger.warning(
2518 "floating_ip {} re-assigned to other port".format(
2519 free_floating_ip
2520 )
2521 )
2522 continue
2523
2524 self.logger.debug(
2525 "Assigned floating_ip {} to VM {}".format(
2526 free_floating_ip, server.id
2527 )
2528 )
2529
2530 assigned = True
2531
2532 except Exception as e:
2533 # Openstack need some time after VM creation to assign an IP. So retry if fails
2534 vm_status = self.nova.servers.get(server.id).status
2535
2536 if vm_status not in ("ACTIVE", "ERROR"):
2537 if time.time() - vm_start_time < server_timeout:
2538 time.sleep(5)
2539 continue
2540 elif floating_ip_retries > 0:
2541 floating_ip_retries -= 1
2542 continue
2543
2544 raise vimconn.VimConnException(
2545 "Cannot create floating_ip: {} {}".format(
2546 type(e).__name__, e
2547 ),
2548 http_code=vimconn.HTTP_Conflict,
2549 )
2550
2551 except Exception as e:
2552 if not floating_network["exit_on_floating_ip_error"]:
2553 self.logger.error("Cannot create floating_ip. %s", str(e))
2554 continue
2555
2556 raise
2557
2558 def _update_port_security_for_vminstance(
2559 self,
2560 no_secured_ports: list,
2561 server: object,
2562 ) -> None:
2563 """Updates the port security according to no_secured_ports list.
2564
2565 Args:
2566 no_secured_ports (list): List of ports that security will be disabled
2567 server (object): Server Object
2568
2569 Raises:
2570 VimConnException
2571
2572 """
2573 # Wait until the VM is active and then disable the port-security
2574 if no_secured_ports:
2575 self.__wait_for_vm(server.id, "ACTIVE")
2576
2577 for port in no_secured_ports:
2578 port_update = {
2579 "port": {"port_security_enabled": False, "security_groups": None}
2580 }
2581
2582 if port[1] == "allow-address-pairs":
2583 port_update = {
2584 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2585 }
2586
2587 try:
2588 self.neutron.update_port(port[0], port_update)
2589
2590 except Exception:
2591
2592 raise vimconn.VimConnException(
2593 "It was not possible to disable port security for port {}".format(
2594 port[0]
2595 )
2596 )
2597
2598 def new_vminstance(
2599 self,
2600 name: str,
2601 description: str,
2602 start: bool,
2603 image_id: str,
2604 flavor_id: str,
2605 affinity_group_list: list,
2606 net_list: list,
2607 cloud_config=None,
2608 disk_list=None,
2609 availability_zone_index=None,
2610 availability_zone_list=None,
2611 ) -> tuple:
2612 """Adds a VM instance to VIM.
2613
2614 Args:
2615 name (str): name of VM
2616 description (str): description
2617 start (bool): indicates if VM must start or boot in pause mode. Ignored
2618 image_id (str) image uuid
2619 flavor_id (str) flavor uuid
2620 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2621 net_list (list): list of interfaces, each one is a dictionary with:
2622 name: name of network
2623 net_id: network uuid to connect
2624 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2625 model: interface model, ignored #TODO
2626 mac_address: used for SR-IOV ifaces #TODO for other types
2627 use: 'data', 'bridge', 'mgmt'
2628 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2629 vim_id: filled/added by this function
2630 floating_ip: True/False (or it can be None)
2631 port_security: True/False
2632 cloud_config (dict): (optional) dictionary with:
2633 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2634 users: (optional) list of users to be inserted, each item is a dict with:
2635 name: (mandatory) user name,
2636 key-pairs: (optional) list of strings with the public key to be inserted to the user
2637 user-data: (optional) string is a text script to be passed directly to cloud-init
2638 config-files: (optional). List of files to be transferred. Each item is a dict with:
2639 dest: (mandatory) string with the destination absolute path
2640 encoding: (optional, by default text). Can be one of:
2641 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2642 content : (mandatory) string with the content of the file
2643 permissions: (optional) string with file permissions, typically octal notation '0644'
2644 owner: (optional) file owner, string with the format 'owner:group'
2645 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2646 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2647 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2648 size: (mandatory) string with the size of the disk in GB
2649 vim_id: (optional) should use this existing volume id
2650 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2651 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2652 availability_zone_index is None
2653 #TODO ip, security groups
2654
2655 Returns:
2656 A tuple with the instance identifier and created_items or raises an exception on error
2657 created_items can be None or a dictionary where this method can include key-values that will be passed to
2658 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2659 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2660 as not present.
2661
2662 """
2663 self.logger.debug(
2664 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2665 image_id,
2666 flavor_id,
2667 str(net_list),
2668 )
2669
2670 try:
2671 server = None
2672 created_items = {}
2673 net_list_vim = []
2674 # list of external networks to be connected to instance, later on used to create floating_ip
2675 external_network = []
2676 # List of ports with port-security disabled
2677 no_secured_ports = []
2678 block_device_mapping = {}
2679 existing_vim_volumes = []
2680 server_group_id = None
2681 scheduller_hints = {}
2682
2683 # Check the Openstack Connection
2684 self._reload_connection()
2685
2686 # Prepare network list
2687 self._prepare_network_for_vminstance(
2688 name=name,
2689 net_list=net_list,
2690 created_items=created_items,
2691 net_list_vim=net_list_vim,
2692 external_network=external_network,
2693 no_secured_ports=no_secured_ports,
2694 )
2695
2696 # Cloud config
2697 config_drive, userdata = self._create_user_data(cloud_config)
2698
2699 # Get availability Zone
2700 vm_av_zone = self._get_vm_availability_zone(
2701 availability_zone_index, availability_zone_list
2702 )
2703
2704 if disk_list:
2705 # Prepare disks
2706 self._prepare_disk_for_vminstance(
2707 name=name,
2708 existing_vim_volumes=existing_vim_volumes,
2709 created_items=created_items,
2710 vm_av_zone=vm_av_zone,
2711 block_device_mapping=block_device_mapping,
2712 disk_list=disk_list,
2713 )
2714
2715 if affinity_group_list:
2716 # Only first id on the list will be used. Openstack restriction
2717 server_group_id = affinity_group_list[0]["affinity_group_id"]
2718 scheduller_hints["group"] = server_group_id
2719
2720 self.logger.debug(
2721 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2722 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2723 "block_device_mapping={}, server_group={})".format(
2724 name,
2725 image_id,
2726 flavor_id,
2727 net_list_vim,
2728 self.config.get("security_groups"),
2729 vm_av_zone,
2730 self.config.get("keypair"),
2731 userdata,
2732 config_drive,
2733 block_device_mapping,
2734 server_group_id,
2735 )
2736 )
2737
2738 # Create VM
2739 server = self.nova.servers.create(
2740 name=name,
2741 image=image_id,
2742 flavor=flavor_id,
2743 nics=net_list_vim,
2744 security_groups=self.config.get("security_groups"),
2745 # TODO remove security_groups in future versions. Already at neutron port
2746 availability_zone=vm_av_zone,
2747 key_name=self.config.get("keypair"),
2748 userdata=userdata,
2749 config_drive=config_drive,
2750 block_device_mapping=block_device_mapping,
2751 scheduler_hints=scheduller_hints,
2752 )
2753
2754 vm_start_time = time.time()
2755
2756 self._update_port_security_for_vminstance(no_secured_ports, server)
2757
2758 self._prepare_external_network_for_vminstance(
2759 external_network=external_network,
2760 server=server,
2761 created_items=created_items,
2762 vm_start_time=vm_start_time,
2763 )
2764
2765 return server.id, created_items
2766
2767 except Exception as e:
2768 server_id = None
2769 if server:
2770 server_id = server.id
2771
2772 try:
2773 created_items = self.remove_keep_tag_from_persistent_volumes(
2774 created_items
2775 )
2776
2777 self.delete_vminstance(server_id, created_items)
2778
2779 except Exception as e2:
2780 self.logger.error("new_vminstance rollback fail {}".format(e2))
2781
2782 self._format_exception(e)
2783
2784 @staticmethod
2785 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2786 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2787
2788 Args:
2789 created_items (dict): All created items belongs to VM
2790
2791 Returns:
2792 updated_created_items (dict): Dict which does not include keep flag for volumes.
2793
2794 """
2795 return {
2796 key.replace(":keep", ""): value for (key, value) in created_items.items()
2797 }
2798
2799 def get_vminstance(self, vm_id):
2800 """Returns the VM instance information from VIM"""
2801 # self.logger.debug("Getting VM from VIM")
2802 try:
2803 self._reload_connection()
2804 server = self.nova.servers.find(id=vm_id)
2805 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2806
2807 return server.to_dict()
2808 except (
2809 ksExceptions.ClientException,
2810 nvExceptions.ClientException,
2811 nvExceptions.NotFound,
2812 ConnectionError,
2813 ) as e:
2814 self._format_exception(e)
2815
2816 def get_vminstance_console(self, vm_id, console_type="vnc"):
2817 """
2818 Get a console for the virtual machine
2819 Params:
2820 vm_id: uuid of the VM
2821 console_type, can be:
2822 "novnc" (by default), "xvpvnc" for VNC types,
2823 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2824 Returns dict with the console parameters:
2825 protocol: ssh, ftp, http, https, ...
2826 server: usually ip address
2827 port: the http, ssh, ... port
2828 suffix: extra text, e.g. the http path and query string
2829 """
2830 self.logger.debug("Getting VM CONSOLE from VIM")
2831
2832 try:
2833 self._reload_connection()
2834 server = self.nova.servers.find(id=vm_id)
2835
2836 if console_type is None or console_type == "novnc":
2837 console_dict = server.get_vnc_console("novnc")
2838 elif console_type == "xvpvnc":
2839 console_dict = server.get_vnc_console(console_type)
2840 elif console_type == "rdp-html5":
2841 console_dict = server.get_rdp_console(console_type)
2842 elif console_type == "spice-html5":
2843 console_dict = server.get_spice_console(console_type)
2844 else:
2845 raise vimconn.VimConnException(
2846 "console type '{}' not allowed".format(console_type),
2847 http_code=vimconn.HTTP_Bad_Request,
2848 )
2849
2850 console_dict1 = console_dict.get("console")
2851
2852 if console_dict1:
2853 console_url = console_dict1.get("url")
2854
2855 if console_url:
2856 # parse console_url
2857 protocol_index = console_url.find("//")
2858 suffix_index = (
2859 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2860 )
2861 port_index = (
2862 console_url[protocol_index + 2 : suffix_index].find(":")
2863 + protocol_index
2864 + 2
2865 )
2866
2867 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2868 return (
2869 -vimconn.HTTP_Internal_Server_Error,
2870 "Unexpected response from VIM",
2871 )
2872
2873 console_dict = {
2874 "protocol": console_url[0:protocol_index],
2875 "server": console_url[protocol_index + 2 : port_index],
2876 "port": console_url[port_index:suffix_index],
2877 "suffix": console_url[suffix_index + 1 :],
2878 }
2879 protocol_index += 2
2880
2881 return console_dict
2882 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2883 except (
2884 nvExceptions.NotFound,
2885 ksExceptions.ClientException,
2886 nvExceptions.ClientException,
2887 nvExceptions.BadRequest,
2888 ConnectionError,
2889 ) as e:
2890 self._format_exception(e)
2891
2892 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2893 """Neutron delete ports by id.
2894 Args:
2895 k_id (str): Port id in the VIM
2896 """
2897 try:
2898
2899 port_dict = self.neutron.list_ports()
2900 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2901
2902 if k_id in existing_ports:
2903 self.neutron.delete_port(k_id)
2904
2905 except Exception as e:
2906
2907 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2908
2909 def _delete_volumes_by_id_wth_cinder(
2910 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2911 ) -> bool:
2912 """Cinder delete volume by id.
2913 Args:
2914 k (str): Full item name in created_items
2915 k_id (str): ID of floating ip in VIM
2916 volumes_to_hold (list): Volumes not to delete
2917 created_items (dict): All created items belongs to VM
2918 """
2919 try:
2920 if k_id in volumes_to_hold:
2921 return
2922
2923 if self.cinder.volumes.get(k_id).status != "available":
2924 return True
2925
2926 else:
2927 self.cinder.volumes.delete(k_id)
2928 created_items[k] = None
2929
2930 except Exception as e:
2931 self.logger.error(
2932 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2933 )
2934
2935 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2936 """Neutron delete floating ip by id.
2937 Args:
2938 k (str): Full item name in created_items
2939 k_id (str): ID of floating ip in VIM
2940 created_items (dict): All created items belongs to VM
2941 """
2942 try:
2943 self.neutron.delete_floatingip(k_id)
2944 created_items[k] = None
2945
2946 except Exception as e:
2947 self.logger.error(
2948 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2949 )
2950
2951 @staticmethod
2952 def _get_item_name_id(k: str) -> Tuple[str, str]:
2953 k_item, _, k_id = k.partition(":")
2954 return k_item, k_id
2955
2956 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2957 """Delete VM ports attached to the networks before deleting virtual machine.
2958 Args:
2959 created_items (dict): All created items belongs to VM
2960 """
2961
2962 for k, v in created_items.items():
2963 if not v: # skip already deleted
2964 continue
2965
2966 try:
2967 k_item, k_id = self._get_item_name_id(k)
2968 if k_item == "port":
2969 self._delete_ports_by_id_wth_neutron(k_id)
2970
2971 except Exception as e:
2972 self.logger.error(
2973 "Error deleting port: {}: {}".format(type(e).__name__, e)
2974 )
2975
2976 def _delete_created_items(
2977 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2978 ) -> bool:
2979 """Delete Volumes and floating ip if they exist in created_items."""
2980 for k, v in created_items.items():
2981 if not v: # skip already deleted
2982 continue
2983
2984 try:
2985 k_item, k_id = self._get_item_name_id(k)
2986
2987 if k_item == "volume":
2988
2989 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2990 k, k_id, volumes_to_hold, created_items
2991 )
2992
2993 if unavailable_vol:
2994 keep_waiting = True
2995
2996 elif k_item == "floating_ip":
2997
2998 self._delete_floating_ip_by_id(k, k_id, created_items)
2999
3000 except Exception as e:
3001 self.logger.error("Error deleting {}: {}".format(k, e))
3002
3003 return keep_waiting
3004
3005 @staticmethod
3006 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3007 """Remove the volumes which has key flag from created_items
3008
3009 Args:
3010 created_items (dict): All created items belongs to VM
3011
3012 Returns:
3013 created_items (dict): Persistent volumes eliminated created_items
3014 """
3015 return {
3016 key: value
3017 for (key, value) in created_items.items()
3018 if len(key.split(":")) == 2
3019 }
3020
3021 def delete_vminstance(
3022 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3023 ) -> None:
3024 """Removes a VM instance from VIM. Returns the old identifier.
3025 Args:
3026 vm_id (str): Identifier of VM instance
3027 created_items (dict): All created items belongs to VM
3028 volumes_to_hold (list): Volumes_to_hold
3029 """
3030 if created_items is None:
3031 created_items = {}
3032 if volumes_to_hold is None:
3033 volumes_to_hold = []
3034
3035 try:
3036 created_items = self._extract_items_wth_keep_flag_from_created_items(
3037 created_items
3038 )
3039
3040 self._reload_connection()
3041
3042 # Delete VM ports attached to the networks before the virtual machine
3043 if created_items:
3044 self._delete_vm_ports_attached_to_network(created_items)
3045
3046 if vm_id:
3047 self.nova.servers.delete(vm_id)
3048
3049 # Although having detached, volumes should have in active status before deleting.
3050 # We ensure in this loop
3051 keep_waiting = True
3052 elapsed_time = 0
3053
3054 while keep_waiting and elapsed_time < volume_timeout:
3055 keep_waiting = False
3056
3057 # Delete volumes and floating IP.
3058 keep_waiting = self._delete_created_items(
3059 created_items, volumes_to_hold, keep_waiting
3060 )
3061
3062 if keep_waiting:
3063 time.sleep(1)
3064 elapsed_time += 1
3065
3066 except (
3067 nvExceptions.NotFound,
3068 ksExceptions.ClientException,
3069 nvExceptions.ClientException,
3070 ConnectionError,
3071 ) as e:
3072 self._format_exception(e)
3073
3074 def refresh_vms_status(self, vm_list):
3075 """Get the status of the virtual machines and their interfaces/ports
3076 Params: the list of VM identifiers
3077 Returns a dictionary with:
3078 vm_id: #VIM id of this Virtual Machine
3079 status: #Mandatory. Text with one of:
3080 # DELETED (not found at vim)
3081 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3082 # OTHER (Vim reported other status not understood)
3083 # ERROR (VIM indicates an ERROR status)
3084 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3085 # CREATING (on building process), ERROR
3086 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3087 #
3088 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3089 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3090 interfaces:
3091 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3092 mac_address: #Text format XX:XX:XX:XX:XX:XX
3093 vim_net_id: #network id where this interface is connected
3094 vim_interface_id: #interface/port VIM id
3095 ip_address: #null, or text with IPv4, IPv6 address
3096 compute_node: #identification of compute node where PF,VF interface is allocated
3097 pci: #PCI address of the NIC that hosts the PF,VF
3098 vlan: #physical VLAN used for VF
3099 """
3100 vm_dict = {}
3101 self.logger.debug(
3102 "refresh_vms status: Getting tenant VM instance information from VIM"
3103 )
3104
3105 for vm_id in vm_list:
3106 vm = {}
3107
3108 try:
3109 vm_vim = self.get_vminstance(vm_id)
3110
3111 if vm_vim["status"] in vmStatus2manoFormat:
3112 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3113 else:
3114 vm["status"] = "OTHER"
3115 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3116
3117 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3118 vm_vim.pop("user_data", None)
3119 vm["vim_info"] = self.serialize(vm_vim)
3120
3121 vm["interfaces"] = []
3122 if vm_vim.get("fault"):
3123 vm["error_msg"] = str(vm_vim["fault"])
3124
3125 # get interfaces
3126 try:
3127 self._reload_connection()
3128 port_dict = self.neutron.list_ports(device_id=vm_id)
3129
3130 for port in port_dict["ports"]:
3131 interface = {}
3132 interface["vim_info"] = self.serialize(port)
3133 interface["mac_address"] = port.get("mac_address")
3134 interface["vim_net_id"] = port["network_id"]
3135 interface["vim_interface_id"] = port["id"]
3136 # check if OS-EXT-SRV-ATTR:host is there,
3137 # in case of non-admin credentials, it will be missing
3138
3139 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3140 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3141
3142 interface["pci"] = None
3143
3144 # check if binding:profile is there,
3145 # in case of non-admin credentials, it will be missing
3146 if port.get("binding:profile"):
3147 if port["binding:profile"].get("pci_slot"):
3148 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3149 # the slot to 0x00
3150 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3151 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3152 pci = port["binding:profile"]["pci_slot"]
3153 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3154 interface["pci"] = pci
3155
3156 interface["vlan"] = None
3157
3158 if port.get("binding:vif_details"):
3159 interface["vlan"] = port["binding:vif_details"].get("vlan")
3160
3161 # Get vlan from network in case not present in port for those old openstacks and cases where
3162 # it is needed vlan at PT
3163 if not interface["vlan"]:
3164 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3165 network = self.neutron.show_network(port["network_id"])
3166
3167 if (
3168 network["network"].get("provider:network_type")
3169 == "vlan"
3170 ):
3171 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3172 interface["vlan"] = network["network"].get(
3173 "provider:segmentation_id"
3174 )
3175
3176 ips = []
3177 # look for floating ip address
3178 try:
3179 floating_ip_dict = self.neutron.list_floatingips(
3180 port_id=port["id"]
3181 )
3182
3183 if floating_ip_dict.get("floatingips"):
3184 ips.append(
3185 floating_ip_dict["floatingips"][0].get(
3186 "floating_ip_address"
3187 )
3188 )
3189 except Exception:
3190 pass
3191
3192 for subnet in port["fixed_ips"]:
3193 ips.append(subnet["ip_address"])
3194
3195 interface["ip_address"] = ";".join(ips)
3196 vm["interfaces"].append(interface)
3197 except Exception as e:
3198 self.logger.error(
3199 "Error getting vm interface information {}: {}".format(
3200 type(e).__name__, e
3201 ),
3202 exc_info=True,
3203 )
3204 except vimconn.VimConnNotFoundException as e:
3205 self.logger.error("Exception getting vm status: %s", str(e))
3206 vm["status"] = "DELETED"
3207 vm["error_msg"] = str(e)
3208 except vimconn.VimConnException as e:
3209 self.logger.error("Exception getting vm status: %s", str(e))
3210 vm["status"] = "VIM_ERROR"
3211 vm["error_msg"] = str(e)
3212
3213 vm_dict[vm_id] = vm
3214
3215 return vm_dict
3216
3217 def action_vminstance(self, vm_id, action_dict, created_items={}):
3218 """Send and action over a VM instance from VIM
3219 Returns None or the console dict if the action was successfully sent to the VIM"""
3220 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3221
3222 try:
3223 self._reload_connection()
3224 server = self.nova.servers.find(id=vm_id)
3225
3226 if "start" in action_dict:
3227 if action_dict["start"] == "rebuild":
3228 server.rebuild()
3229 else:
3230 if server.status == "PAUSED":
3231 server.unpause()
3232 elif server.status == "SUSPENDED":
3233 server.resume()
3234 elif server.status == "SHUTOFF":
3235 server.start()
3236 else:
3237 self.logger.debug(
3238 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3239 )
3240 raise vimconn.VimConnException(
3241 "Cannot 'start' instance while it is in active state",
3242 http_code=vimconn.HTTP_Bad_Request,
3243 )
3244
3245 elif "pause" in action_dict:
3246 server.pause()
3247 elif "resume" in action_dict:
3248 server.resume()
3249 elif "shutoff" in action_dict or "shutdown" in action_dict:
3250 self.logger.debug("server status %s", server.status)
3251 if server.status == "ACTIVE":
3252 server.stop()
3253 else:
3254 self.logger.debug("ERROR: VM is not in Active state")
3255 raise vimconn.VimConnException(
3256 "VM is not in active state, stop operation is not allowed",
3257 http_code=vimconn.HTTP_Bad_Request,
3258 )
3259 elif "forceOff" in action_dict:
3260 server.stop() # TODO
3261 elif "terminate" in action_dict:
3262 server.delete()
3263 elif "createImage" in action_dict:
3264 server.create_image()
3265 # "path":path_schema,
3266 # "description":description_schema,
3267 # "name":name_schema,
3268 # "metadata":metadata_schema,
3269 # "imageRef": id_schema,
3270 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3271 elif "rebuild" in action_dict:
3272 server.rebuild(server.image["id"])
3273 elif "reboot" in action_dict:
3274 server.reboot() # reboot_type="SOFT"
3275 elif "console" in action_dict:
3276 console_type = action_dict["console"]
3277
3278 if console_type is None or console_type == "novnc":
3279 console_dict = server.get_vnc_console("novnc")
3280 elif console_type == "xvpvnc":
3281 console_dict = server.get_vnc_console(console_type)
3282 elif console_type == "rdp-html5":
3283 console_dict = server.get_rdp_console(console_type)
3284 elif console_type == "spice-html5":
3285 console_dict = server.get_spice_console(console_type)
3286 else:
3287 raise vimconn.VimConnException(
3288 "console type '{}' not allowed".format(console_type),
3289 http_code=vimconn.HTTP_Bad_Request,
3290 )
3291
3292 try:
3293 console_url = console_dict["console"]["url"]
3294 # parse console_url
3295 protocol_index = console_url.find("//")
3296 suffix_index = (
3297 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3298 )
3299 port_index = (
3300 console_url[protocol_index + 2 : suffix_index].find(":")
3301 + protocol_index
3302 + 2
3303 )
3304
3305 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3306 raise vimconn.VimConnException(
3307 "Unexpected response from VIM " + str(console_dict)
3308 )
3309
3310 console_dict2 = {
3311 "protocol": console_url[0:protocol_index],
3312 "server": console_url[protocol_index + 2 : port_index],
3313 "port": int(console_url[port_index + 1 : suffix_index]),
3314 "suffix": console_url[suffix_index + 1 :],
3315 }
3316
3317 return console_dict2
3318 except Exception:
3319 raise vimconn.VimConnException(
3320 "Unexpected response from VIM " + str(console_dict)
3321 )
3322
3323 return None
3324 except (
3325 ksExceptions.ClientException,
3326 nvExceptions.ClientException,
3327 nvExceptions.NotFound,
3328 ConnectionError,
3329 ) as e:
3330 self._format_exception(e)
3331 # TODO insert exception vimconn.HTTP_Unauthorized
3332
3333 # ###### VIO Specific Changes #########
3334 def _generate_vlanID(self):
3335 """
3336 Method to get unused vlanID
3337 Args:
3338 None
3339 Returns:
3340 vlanID
3341 """
3342 # Get used VLAN IDs
3343 usedVlanIDs = []
3344 networks = self.get_network_list()
3345
3346 for net in networks:
3347 if net.get("provider:segmentation_id"):
3348 usedVlanIDs.append(net.get("provider:segmentation_id"))
3349
3350 used_vlanIDs = set(usedVlanIDs)
3351
3352 # find unused VLAN ID
3353 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3354 try:
3355 start_vlanid, end_vlanid = map(
3356 int, vlanID_range.replace(" ", "").split("-")
3357 )
3358
3359 for vlanID in range(start_vlanid, end_vlanid + 1):
3360 if vlanID not in used_vlanIDs:
3361 return vlanID
3362 except Exception as exp:
3363 raise vimconn.VimConnException(
3364 "Exception {} occurred while generating VLAN ID.".format(exp)
3365 )
3366 else:
3367 raise vimconn.VimConnConflictException(
3368 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3369 self.config.get("dataplane_net_vlan_range")
3370 )
3371 )
3372
3373 def _generate_multisegment_vlanID(self):
3374 """
3375 Method to get unused vlanID
3376 Args:
3377 None
3378 Returns:
3379 vlanID
3380 """
3381 # Get used VLAN IDs
3382 usedVlanIDs = []
3383 networks = self.get_network_list()
3384 for net in networks:
3385 if net.get("provider:network_type") == "vlan" and net.get(
3386 "provider:segmentation_id"
3387 ):
3388 usedVlanIDs.append(net.get("provider:segmentation_id"))
3389 elif net.get("segments"):
3390 for segment in net.get("segments"):
3391 if segment.get("provider:network_type") == "vlan" and segment.get(
3392 "provider:segmentation_id"
3393 ):
3394 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3395
3396 used_vlanIDs = set(usedVlanIDs)
3397
3398 # find unused VLAN ID
3399 for vlanID_range in self.config.get("multisegment_vlan_range"):
3400 try:
3401 start_vlanid, end_vlanid = map(
3402 int, vlanID_range.replace(" ", "").split("-")
3403 )
3404
3405 for vlanID in range(start_vlanid, end_vlanid + 1):
3406 if vlanID not in used_vlanIDs:
3407 return vlanID
3408 except Exception as exp:
3409 raise vimconn.VimConnException(
3410 "Exception {} occurred while generating VLAN ID.".format(exp)
3411 )
3412 else:
3413 raise vimconn.VimConnConflictException(
3414 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3415 self.config.get("multisegment_vlan_range")
3416 )
3417 )
3418
3419 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3420 """
3421 Method to validate user given vlanID ranges
3422 Args: None
3423 Returns: None
3424 """
3425 for vlanID_range in input_vlan_range:
3426 vlan_range = vlanID_range.replace(" ", "")
3427 # validate format
3428 vlanID_pattern = r"(\d)*-(\d)*$"
3429 match_obj = re.match(vlanID_pattern, vlan_range)
3430 if not match_obj:
3431 raise vimconn.VimConnConflictException(
3432 "Invalid VLAN range for {}: {}.You must provide "
3433 "'{}' in format [start_ID - end_ID].".format(
3434 text_vlan_range, vlanID_range, text_vlan_range
3435 )
3436 )
3437
3438 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3439 if start_vlanid <= 0:
3440 raise vimconn.VimConnConflictException(
3441 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3442 "networks valid IDs are 1 to 4094 ".format(
3443 text_vlan_range, vlanID_range
3444 )
3445 )
3446
3447 if end_vlanid > 4094:
3448 raise vimconn.VimConnConflictException(
3449 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3450 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3451 text_vlan_range, vlanID_range
3452 )
3453 )
3454
3455 if start_vlanid > end_vlanid:
3456 raise vimconn.VimConnConflictException(
3457 "Invalid VLAN range for {}: {}. You must provide '{}'"
3458 " in format start_ID - end_ID and start_ID < end_ID ".format(
3459 text_vlan_range, vlanID_range, text_vlan_range
3460 )
3461 )
3462
3463 def get_hosts_info(self):
3464 """Get the information of deployed hosts
3465 Returns the hosts content"""
3466 if self.debug:
3467 print("osconnector: Getting Host info from VIM")
3468
3469 try:
3470 h_list = []
3471 self._reload_connection()
3472 hypervisors = self.nova.hypervisors.list()
3473
3474 for hype in hypervisors:
3475 h_list.append(hype.to_dict())
3476
3477 return 1, {"hosts": h_list}
3478 except nvExceptions.NotFound as e:
3479 error_value = -vimconn.HTTP_Not_Found
3480 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3481 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3482 error_value = -vimconn.HTTP_Bad_Request
3483 error_text = (
3484 type(e).__name__
3485 + ": "
3486 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3487 )
3488
3489 # TODO insert exception vimconn.HTTP_Unauthorized
3490 # if reaching here is because an exception
3491 self.logger.debug("get_hosts_info " + error_text)
3492
3493 return error_value, error_text
3494
3495 def get_hosts(self, vim_tenant):
3496 """Get the hosts and deployed instances
3497 Returns the hosts content"""
3498 r, hype_dict = self.get_hosts_info()
3499
3500 if r < 0:
3501 return r, hype_dict
3502
3503 hypervisors = hype_dict["hosts"]
3504
3505 try:
3506 servers = self.nova.servers.list()
3507 for hype in hypervisors:
3508 for server in servers:
3509 if (
3510 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3511 == hype["hypervisor_hostname"]
3512 ):
3513 if "vm" in hype:
3514 hype["vm"].append(server.id)
3515 else:
3516 hype["vm"] = [server.id]
3517
3518 return 1, hype_dict
3519 except nvExceptions.NotFound as e:
3520 error_value = -vimconn.HTTP_Not_Found
3521 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3522 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3523 error_value = -vimconn.HTTP_Bad_Request
3524 error_text = (
3525 type(e).__name__
3526 + ": "
3527 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3528 )
3529
3530 # TODO insert exception vimconn.HTTP_Unauthorized
3531 # if reaching here is because an exception
3532 self.logger.debug("get_hosts " + error_text)
3533
3534 return error_value, error_text
3535
3536 def new_affinity_group(self, affinity_group_data):
3537 """Adds a server group to VIM
3538 affinity_group_data contains a dictionary with information, keys:
3539 name: name in VIM for the server group
3540 type: affinity or anti-affinity
3541 scope: Only nfvi-node allowed
3542 Returns the server group identifier"""
3543 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3544
3545 try:
3546 name = affinity_group_data["name"]
3547 policy = affinity_group_data["type"]
3548
3549 self._reload_connection()
3550 new_server_group = self.nova.server_groups.create(name, policy)
3551
3552 return new_server_group.id
3553 except (
3554 ksExceptions.ClientException,
3555 nvExceptions.ClientException,
3556 ConnectionError,
3557 KeyError,
3558 ) as e:
3559 self._format_exception(e)
3560
3561 def get_affinity_group(self, affinity_group_id):
3562 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3563 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3564 try:
3565 self._reload_connection()
3566 server_group = self.nova.server_groups.find(id=affinity_group_id)
3567
3568 return server_group.to_dict()
3569 except (
3570 nvExceptions.NotFound,
3571 nvExceptions.ClientException,
3572 ksExceptions.ClientException,
3573 ConnectionError,
3574 ) as e:
3575 self._format_exception(e)
3576
3577 def delete_affinity_group(self, affinity_group_id):
3578 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3579 self.logger.debug("Getting server group '%s'", affinity_group_id)
3580 try:
3581 self._reload_connection()
3582 self.nova.server_groups.delete(affinity_group_id)
3583
3584 return affinity_group_id
3585 except (
3586 nvExceptions.NotFound,
3587 ksExceptions.ClientException,
3588 nvExceptions.ClientException,
3589 ConnectionError,
3590 ) as e:
3591 self._format_exception(e)
3592
3593 def get_vdu_state(self, vm_id):
3594 """
3595 Getting the state of a vdu
3596 param:
3597 vm_id: ID of an instance
3598 """
3599 self.logger.debug("Getting the status of VM")
3600 self.logger.debug("VIM VM ID %s", vm_id)
3601 self._reload_connection()
3602 server = self.nova.servers.find(id=vm_id)
3603 server_dict = server.to_dict()
3604 vdu_data = [
3605 server_dict["status"],
3606 server_dict["flavor"]["id"],
3607 server_dict["OS-EXT-SRV-ATTR:host"],
3608 server_dict["OS-EXT-AZ:availability_zone"],
3609 ]
3610 self.logger.debug("vdu_data %s", vdu_data)
3611 return vdu_data
3612
3613 def check_compute_availability(self, host, server_flavor_details):
3614 self._reload_connection()
3615 hypervisor_search = self.nova.hypervisors.search(
3616 hypervisor_match=host, servers=True
3617 )
3618 for hypervisor in hypervisor_search:
3619 hypervisor_id = hypervisor.to_dict()["id"]
3620 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3621 hypervisor_dict = hypervisor_details.to_dict()
3622 hypervisor_temp = json.dumps(hypervisor_dict)
3623 hypervisor_json = json.loads(hypervisor_temp)
3624 resources_available = [
3625 hypervisor_json["free_ram_mb"],
3626 hypervisor_json["disk_available_least"],
3627 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3628 ]
3629 compute_available = all(
3630 x > y for x, y in zip(resources_available, server_flavor_details)
3631 )
3632 if compute_available:
3633 return host
3634
3635 def check_availability_zone(
3636 self, old_az, server_flavor_details, old_host, host=None
3637 ):
3638 self._reload_connection()
3639 az_check = {"zone_check": False, "compute_availability": None}
3640 aggregates_list = self.nova.aggregates.list()
3641 for aggregate in aggregates_list:
3642 aggregate_details = aggregate.to_dict()
3643 aggregate_temp = json.dumps(aggregate_details)
3644 aggregate_json = json.loads(aggregate_temp)
3645 if aggregate_json["availability_zone"] == old_az:
3646 hosts_list = aggregate_json["hosts"]
3647 if host is not None:
3648 if host in hosts_list:
3649 az_check["zone_check"] = True
3650 available_compute_id = self.check_compute_availability(
3651 host, server_flavor_details
3652 )
3653 if available_compute_id is not None:
3654 az_check["compute_availability"] = available_compute_id
3655 else:
3656 for check_host in hosts_list:
3657 if check_host != old_host:
3658 available_compute_id = self.check_compute_availability(
3659 check_host, server_flavor_details
3660 )
3661 if available_compute_id is not None:
3662 az_check["zone_check"] = True
3663 az_check["compute_availability"] = available_compute_id
3664 break
3665 else:
3666 az_check["zone_check"] = True
3667 return az_check
3668
3669 def migrate_instance(self, vm_id, compute_host=None):
3670 """
3671 Migrate a vdu
3672 param:
3673 vm_id: ID of an instance
3674 compute_host: Host to migrate the vdu to
3675 """
3676 self._reload_connection()
3677 vm_state = False
3678 instance_state = self.get_vdu_state(vm_id)
3679 server_flavor_id = instance_state[1]
3680 server_hypervisor_name = instance_state[2]
3681 server_availability_zone = instance_state[3]
3682 try:
3683 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3684 server_flavor_details = [
3685 server_flavor["ram"],
3686 server_flavor["disk"],
3687 server_flavor["vcpus"],
3688 ]
3689 if compute_host == server_hypervisor_name:
3690 raise vimconn.VimConnException(
3691 "Unable to migrate instance '{}' to the same host '{}'".format(
3692 vm_id, compute_host
3693 ),
3694 http_code=vimconn.HTTP_Bad_Request,
3695 )
3696 az_status = self.check_availability_zone(
3697 server_availability_zone,
3698 server_flavor_details,
3699 server_hypervisor_name,
3700 compute_host,
3701 )
3702 availability_zone_check = az_status["zone_check"]
3703 available_compute_id = az_status.get("compute_availability")
3704
3705 if availability_zone_check is False:
3706 raise vimconn.VimConnException(
3707 "Unable to migrate instance '{}' to a different availability zone".format(
3708 vm_id
3709 ),
3710 http_code=vimconn.HTTP_Bad_Request,
3711 )
3712 if available_compute_id is not None:
3713 self.nova.servers.live_migrate(
3714 server=vm_id,
3715 host=available_compute_id,
3716 block_migration=True,
3717 disk_over_commit=False,
3718 )
3719 state = "MIGRATING"
3720 changed_compute_host = ""
3721 if state == "MIGRATING":
3722 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3723 changed_compute_host = self.get_vdu_state(vm_id)[2]
3724 if vm_state and changed_compute_host == available_compute_id:
3725 self.logger.debug(
3726 "Instance '{}' migrated to the new compute host '{}'".format(
3727 vm_id, changed_compute_host
3728 )
3729 )
3730 return state, available_compute_id
3731 else:
3732 raise vimconn.VimConnException(
3733 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3734 vm_id, available_compute_id
3735 ),
3736 http_code=vimconn.HTTP_Bad_Request,
3737 )
3738 else:
3739 raise vimconn.VimConnException(
3740 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3741 available_compute_id
3742 ),
3743 http_code=vimconn.HTTP_Bad_Request,
3744 )
3745 except (
3746 nvExceptions.BadRequest,
3747 nvExceptions.ClientException,
3748 nvExceptions.NotFound,
3749 ) as e:
3750 self._format_exception(e)
3751
3752 def resize_instance(self, vm_id, new_flavor_id):
3753 """
3754 For resizing the vm based on the given
3755 flavor details
3756 param:
3757 vm_id : ID of an instance
3758 new_flavor_id : Flavor id to be resized
3759 Return the status of a resized instance
3760 """
3761 self._reload_connection()
3762 self.logger.debug("resize the flavor of an instance")
3763 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3764 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3765 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3766 try:
3767 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3768 if old_flavor_disk > new_flavor_disk:
3769 raise nvExceptions.BadRequest(
3770 400,
3771 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3772 )
3773 else:
3774 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3775 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3776 if vm_state:
3777 instance_resized_status = self.confirm_resize(vm_id)
3778 return instance_resized_status
3779 else:
3780 raise nvExceptions.BadRequest(
3781 409,
3782 message="Cannot 'resize' vm_state is in ERROR",
3783 )
3784
3785 else:
3786 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3787 raise nvExceptions.BadRequest(
3788 409,
3789 message="Cannot 'resize' instance while it is in vm_state resized",
3790 )
3791 except (
3792 nvExceptions.BadRequest,
3793 nvExceptions.ClientException,
3794 nvExceptions.NotFound,
3795 ) as e:
3796 self._format_exception(e)
3797
3798 def confirm_resize(self, vm_id):
3799 """
3800 Confirm the resize of an instance
3801 param:
3802 vm_id: ID of an instance
3803 """
3804 self._reload_connection()
3805 self.nova.servers.confirm_resize(server=vm_id)
3806 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3807 self.__wait_for_vm(vm_id, "ACTIVE")
3808 instance_status = self.get_vdu_state(vm_id)[0]
3809 return instance_status