d70e2ec2f479a774aebbd06db0d359588a7f9aa0
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 vcpus (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1296 vcpus = numa["paired-threads"] * 2
1297 extra_specs["hw:cpu_thread_policy"] = "require"
1298 extra_specs["hw:cpu_policy"] = "dedicated"
1299 return vcpus
1300
1301 @staticmethod
1302 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1303 """Fill up extra_specs if numa has cores.
1304 Args:
1305 numa (dict): A dictionary which includes numa information
1306 extra_specs (dict): To be filled.
1307
1308 Returns:
1309 vcpus (int) Number of virtual cpus
1310
1311 """
1312 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1313 # architecture, or a non-SMT architecture will be emulated
1314 if not numa.get("cores"):
1315 return
1316 vcpus = numa["cores"]
1317 extra_specs["hw:cpu_thread_policy"] = "isolate"
1318 extra_specs["hw:cpu_policy"] = "dedicated"
1319 return vcpus
1320
1321 @staticmethod
1322 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1323 """Fill up extra_specs if numa has threads.
1324 Args:
1325 numa (dict): A dictionary which includes numa information
1326 extra_specs (dict): To be filled.
1327
1328 Returns:
1329 vcpus (int) Number of virtual cpus
1330
1331 """
1332 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1333 if not numa.get("threads"):
1334 return
1335 vcpus = numa["threads"]
1336 extra_specs["hw:cpu_thread_policy"] = "prefer"
1337 extra_specs["hw:cpu_policy"] = "dedicated"
1338 return vcpus
1339
1340 def _process_numa_parameters_of_flavor(
1341 self, numas: List, extra_specs: Dict, vcpus: Optional[int]
1342 ) -> int:
1343 """Process numa parameters and fill up extra_specs.
1344
1345 Args:
1346 numas (list): List of dictionary which includes numa information
1347 extra_specs (dict): To be filled.
1348 vcpus (int) Number of virtual cpus
1349
1350 Returns:
1351 vcpus (int) Number of virtual cpus
1352
1353 """
1354 numa_nodes = len(numas)
1355 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1356
1357 if self.vim_type == "VIO":
1358 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1359 extra_specs["vmware:latency_sensitivity_level"] = "high"
1360
1361 for numa in numas:
1362 if "id" in numa:
1363 node_id = numa["id"]
1364 # overwrite ram and vcpus
1365 # check if key "memory" is present in numa else use ram value at flavor
1366 self.process_numa_memory(numa, node_id, extra_specs)
1367 self.process_numa_vcpu(numa, node_id, extra_specs)
1368
1369 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1370 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1371
1372 if "paired-threads" in numa:
1373 vcpus = self.process_numa_paired_threads(numa, extra_specs)
1374
1375 elif "cores" in numa:
1376 vcpus = self.process_numa_cores(numa, extra_specs)
1377
1378 elif "threads" in numa:
1379 vcpus = self.process_numa_threads(numa, extra_specs)
1380
1381 return vcpus
1382
1383 def _change_flavor_name(
1384 self, name: str, name_suffix: int, flavor_data: dict
1385 ) -> str:
1386 """Change the flavor name if the name already exists.
1387
1388 Args:
1389 name (str): Flavor name to be checked
1390 name_suffix (int): Suffix to be appended to name
1391 flavor_data (dict): Flavor dict
1392
1393 Returns:
1394 name (str): New flavor name to be used
1395
1396 """
1397 # Get used names
1398 fl = self.nova.flavors.list()
1399 fl_names = [f.name for f in fl]
1400
1401 while name in fl_names:
1402 name_suffix += 1
1403 name = flavor_data["name"] + "-" + str(name_suffix)
1404
1405 return name
1406
1407 def _process_extended_config_of_flavor(
1408 self, extended: dict, extra_specs: dict, vcpus: Optional[int]
1409 ) -> int:
1410 """Process the extended dict to fill up extra_specs.
1411 Args:
1412
1413 extended (dict): Keeping the extra specification of flavor
1414 extra_specs (dict) Dict to be filled to be used during flavor creation
1415 vcpus (int) Number of virtual cpus
1416
1417 Returns:
1418 vcpus (int) Number of virtual cpus
1419
1420 """
1421 quotas = {
1422 "cpu-quota": "cpu",
1423 "mem-quota": "memory",
1424 "vif-quota": "vif",
1425 "disk-io-quota": "disk_io",
1426 }
1427
1428 page_sizes = {
1429 "LARGE": "large",
1430 "SMALL": "small",
1431 "SIZE_2MB": "2MB",
1432 "SIZE_1GB": "1GB",
1433 "PREFER_LARGE": "any",
1434 }
1435
1436 policies = {
1437 "cpu-pinning-policy": "hw:cpu_policy",
1438 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1439 "mem-policy": "hw:numa_mempolicy",
1440 }
1441
1442 numas = extended.get("numas")
1443 if numas:
1444 vcpus = self._process_numa_parameters_of_flavor(numas, extra_specs, vcpus)
1445
1446 for quota, item in quotas.items():
1447 if quota in extended.keys():
1448 self.process_resource_quota(extended.get(quota), item, extra_specs)
1449
1450 # Set the mempage size as specified in the descriptor
1451 if extended.get("mempage-size"):
1452 if extended["mempage-size"] in page_sizes.keys():
1453 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1454 else:
1455 # Normally, validations in NBI should not allow to this condition.
1456 self.logger.debug(
1457 "Invalid mempage-size %s. Will be ignored",
1458 extended.get("mempage-size"),
1459 )
1460
1461 for policy, hw_policy in policies.items():
1462 if extended.get(policy):
1463 extra_specs[hw_policy] = extended[policy].lower()
1464
1465 return vcpus
1466
1467 @staticmethod
1468 def _get_flavor_details(flavor_data: dict) -> Tuple:
1469 """Returns the details of flavor
1470 Args:
1471 flavor_data (dict): Dictionary that includes required flavor details
1472
1473 Returns:
1474 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1475
1476 """
1477 return (
1478 flavor_data.get("ram", 64),
1479 flavor_data.get("vcpus", 1),
1480 {},
1481 flavor_data.get("extended"),
1482 )
1483
1484 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1485 """Adds a tenant flavor to openstack VIM.
1486 if change_name_if_used is True, it will change name in case of conflict,
1487 because it is not supported name repetition.
1488
1489 Args:
1490 flavor_data (dict): Flavor details to be processed
1491 change_name_if_used (bool): Change name in case of conflict
1492
1493 Returns:
1494 flavor_id (str): flavor identifier
1495
1496 """
1497 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1498 retry = 0
1499 max_retries = 3
1500 name_suffix = 0
1501
1502 try:
1503 name = flavor_data["name"]
1504 while retry < max_retries:
1505 retry += 1
1506 try:
1507 self._reload_connection()
1508
1509 if change_name_if_used:
1510 name = self._change_flavor_name(name, name_suffix, flavor_data)
1511
1512 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1513 flavor_data
1514 )
1515 if extended:
1516 vcpus = self._process_extended_config_of_flavor(
1517 extended, extra_specs, vcpus
1518 )
1519
1520 # Create flavor
1521
1522 new_flavor = self.nova.flavors.create(
1523 name=name,
1524 ram=ram,
1525 vcpus=vcpus,
1526 disk=flavor_data.get("disk", 0),
1527 ephemeral=flavor_data.get("ephemeral", 0),
1528 swap=flavor_data.get("swap", 0),
1529 is_public=flavor_data.get("is_public", True),
1530 )
1531
1532 # Add metadata
1533 if extra_specs:
1534 new_flavor.set_keys(extra_specs)
1535
1536 return new_flavor.id
1537
1538 except nvExceptions.Conflict as e:
1539 if change_name_if_used and retry < max_retries:
1540 continue
1541
1542 self._format_exception(e)
1543
1544 except (
1545 ksExceptions.ClientException,
1546 nvExceptions.ClientException,
1547 ConnectionError,
1548 KeyError,
1549 ) as e:
1550 self._format_exception(e)
1551
1552 def delete_flavor(self, flavor_id):
1553 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1554 try:
1555 self._reload_connection()
1556 self.nova.flavors.delete(flavor_id)
1557
1558 return flavor_id
1559 # except nvExceptions.BadRequest as e:
1560 except (
1561 nvExceptions.NotFound,
1562 ksExceptions.ClientException,
1563 nvExceptions.ClientException,
1564 ConnectionError,
1565 ) as e:
1566 self._format_exception(e)
1567
1568 def new_image(self, image_dict):
1569 """
1570 Adds a tenant image to VIM. imge_dict is a dictionary with:
1571 name: name
1572 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1573 location: path or URI
1574 public: "yes" or "no"
1575 metadata: metadata of the image
1576 Returns the image_id
1577 """
1578 retry = 0
1579 max_retries = 3
1580
1581 while retry < max_retries:
1582 retry += 1
1583 try:
1584 self._reload_connection()
1585
1586 # determine format http://docs.openstack.org/developer/glance/formats.html
1587 if "disk_format" in image_dict:
1588 disk_format = image_dict["disk_format"]
1589 else: # autodiscover based on extension
1590 if image_dict["location"].endswith(".qcow2"):
1591 disk_format = "qcow2"
1592 elif image_dict["location"].endswith(".vhd"):
1593 disk_format = "vhd"
1594 elif image_dict["location"].endswith(".vmdk"):
1595 disk_format = "vmdk"
1596 elif image_dict["location"].endswith(".vdi"):
1597 disk_format = "vdi"
1598 elif image_dict["location"].endswith(".iso"):
1599 disk_format = "iso"
1600 elif image_dict["location"].endswith(".aki"):
1601 disk_format = "aki"
1602 elif image_dict["location"].endswith(".ari"):
1603 disk_format = "ari"
1604 elif image_dict["location"].endswith(".ami"):
1605 disk_format = "ami"
1606 else:
1607 disk_format = "raw"
1608
1609 self.logger.debug(
1610 "new_image: '%s' loading from '%s'",
1611 image_dict["name"],
1612 image_dict["location"],
1613 )
1614 if self.vim_type == "VIO":
1615 container_format = "bare"
1616 if "container_format" in image_dict:
1617 container_format = image_dict["container_format"]
1618
1619 new_image = self.glance.images.create(
1620 name=image_dict["name"],
1621 container_format=container_format,
1622 disk_format=disk_format,
1623 )
1624 else:
1625 new_image = self.glance.images.create(name=image_dict["name"])
1626
1627 if image_dict["location"].startswith("http"):
1628 # TODO there is not a method to direct download. It must be downloaded locally with requests
1629 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1630 else: # local path
1631 with open(image_dict["location"]) as fimage:
1632 self.glance.images.upload(new_image.id, fimage)
1633 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1634 # image_dict.get("public","yes")=="yes",
1635 # container_format="bare", data=fimage, disk_format=disk_format)
1636
1637 metadata_to_load = image_dict.get("metadata")
1638
1639 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1640 # for openstack
1641 if self.vim_type == "VIO":
1642 metadata_to_load["upload_location"] = image_dict["location"]
1643 else:
1644 metadata_to_load["location"] = image_dict["location"]
1645
1646 self.glance.images.update(new_image.id, **metadata_to_load)
1647
1648 return new_image.id
1649 except (
1650 nvExceptions.Conflict,
1651 ksExceptions.ClientException,
1652 nvExceptions.ClientException,
1653 ) as e:
1654 self._format_exception(e)
1655 except (
1656 HTTPException,
1657 gl1Exceptions.HTTPException,
1658 gl1Exceptions.CommunicationError,
1659 ConnectionError,
1660 ) as e:
1661 if retry == max_retries:
1662 continue
1663
1664 self._format_exception(e)
1665 except IOError as e: # can not open the file
1666 raise vimconn.VimConnConnectionException(
1667 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1668 http_code=vimconn.HTTP_Bad_Request,
1669 )
1670
1671 def delete_image(self, image_id):
1672 """Deletes a tenant image from openstack VIM. Returns the old id"""
1673 try:
1674 self._reload_connection()
1675 self.glance.images.delete(image_id)
1676
1677 return image_id
1678 except (
1679 nvExceptions.NotFound,
1680 ksExceptions.ClientException,
1681 nvExceptions.ClientException,
1682 gl1Exceptions.CommunicationError,
1683 gl1Exceptions.HTTPNotFound,
1684 ConnectionError,
1685 ) as e: # TODO remove
1686 self._format_exception(e)
1687
1688 def get_image_id_from_path(self, path):
1689 """Get the image id from image path in the VIM database. Returns the image_id"""
1690 try:
1691 self._reload_connection()
1692 images = self.glance.images.list()
1693
1694 for image in images:
1695 if image.metadata.get("location") == path:
1696 return image.id
1697
1698 raise vimconn.VimConnNotFoundException(
1699 "image with location '{}' not found".format(path)
1700 )
1701 except (
1702 ksExceptions.ClientException,
1703 nvExceptions.ClientException,
1704 gl1Exceptions.CommunicationError,
1705 ConnectionError,
1706 ) as e:
1707 self._format_exception(e)
1708
1709 def get_image_list(self, filter_dict={}):
1710 """Obtain tenant images from VIM
1711 Filter_dict can be:
1712 id: image id
1713 name: image name
1714 checksum: image checksum
1715 Returns the image list of dictionaries:
1716 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1717 List can be empty
1718 """
1719 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1720
1721 try:
1722 self._reload_connection()
1723 # filter_dict_os = filter_dict.copy()
1724 # First we filter by the available filter fields: name, id. The others are removed.
1725 image_list = self.glance.images.list()
1726 filtered_list = []
1727
1728 for image in image_list:
1729 try:
1730 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1731 continue
1732
1733 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1734 continue
1735
1736 if (
1737 filter_dict.get("checksum")
1738 and image["checksum"] != filter_dict["checksum"]
1739 ):
1740 continue
1741
1742 filtered_list.append(image.copy())
1743 except gl1Exceptions.HTTPNotFound:
1744 pass
1745
1746 return filtered_list
1747 except (
1748 ksExceptions.ClientException,
1749 nvExceptions.ClientException,
1750 gl1Exceptions.CommunicationError,
1751 ConnectionError,
1752 ) as e:
1753 self._format_exception(e)
1754
1755 def __wait_for_vm(self, vm_id, status):
1756 """wait until vm is in the desired status and return True.
1757 If the VM gets in ERROR status, return false.
1758 If the timeout is reached generate an exception"""
1759 elapsed_time = 0
1760 while elapsed_time < server_timeout:
1761 vm_status = self.nova.servers.get(vm_id).status
1762
1763 if vm_status == status:
1764 return True
1765
1766 if vm_status == "ERROR":
1767 return False
1768
1769 time.sleep(5)
1770 elapsed_time += 5
1771
1772 # if we exceeded the timeout rollback
1773 if elapsed_time >= server_timeout:
1774 raise vimconn.VimConnException(
1775 "Timeout waiting for instance " + vm_id + " to get " + status,
1776 http_code=vimconn.HTTP_Request_Timeout,
1777 )
1778
1779 def _get_openstack_availablity_zones(self):
1780 """
1781 Get from openstack availability zones available
1782 :return:
1783 """
1784 try:
1785 openstack_availability_zone = self.nova.availability_zones.list()
1786 openstack_availability_zone = [
1787 str(zone.zoneName)
1788 for zone in openstack_availability_zone
1789 if zone.zoneName != "internal"
1790 ]
1791
1792 return openstack_availability_zone
1793 except Exception:
1794 return None
1795
1796 def _set_availablity_zones(self):
1797 """
1798 Set vim availablity zone
1799 :return:
1800 """
1801 if "availability_zone" in self.config:
1802 vim_availability_zones = self.config.get("availability_zone")
1803
1804 if isinstance(vim_availability_zones, str):
1805 self.availability_zone = [vim_availability_zones]
1806 elif isinstance(vim_availability_zones, list):
1807 self.availability_zone = vim_availability_zones
1808 else:
1809 self.availability_zone = self._get_openstack_availablity_zones()
1810
1811 def _get_vm_availability_zone(
1812 self, availability_zone_index, availability_zone_list
1813 ):
1814 """
1815 Return thge availability zone to be used by the created VM.
1816 :return: The VIM availability zone to be used or None
1817 """
1818 if availability_zone_index is None:
1819 if not self.config.get("availability_zone"):
1820 return None
1821 elif isinstance(self.config.get("availability_zone"), str):
1822 return self.config["availability_zone"]
1823 else:
1824 # TODO consider using a different parameter at config for default AV and AV list match
1825 return self.config["availability_zone"][0]
1826
1827 vim_availability_zones = self.availability_zone
1828 # check if VIM offer enough availability zones describe in the VNFD
1829 if vim_availability_zones and len(availability_zone_list) <= len(
1830 vim_availability_zones
1831 ):
1832 # check if all the names of NFV AV match VIM AV names
1833 match_by_index = False
1834 for av in availability_zone_list:
1835 if av not in vim_availability_zones:
1836 match_by_index = True
1837 break
1838
1839 if match_by_index:
1840 return vim_availability_zones[availability_zone_index]
1841 else:
1842 return availability_zone_list[availability_zone_index]
1843 else:
1844 raise vimconn.VimConnConflictException(
1845 "No enough availability zones at VIM for this deployment"
1846 )
1847
1848 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1849 """Fill up the security_groups in the port_dict.
1850
1851 Args:
1852 net (dict): Network details
1853 port_dict (dict): Port details
1854
1855 """
1856 if (
1857 self.config.get("security_groups")
1858 and net.get("port_security") is not False
1859 and not self.config.get("no_port_security_extension")
1860 ):
1861 if not self.security_groups_id:
1862 self._get_ids_from_name()
1863
1864 port_dict["security_groups"] = self.security_groups_id
1865
1866 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1867 """Fill up the network binding depending on network type in the port_dict.
1868
1869 Args:
1870 net (dict): Network details
1871 port_dict (dict): Port details
1872
1873 """
1874 if not net.get("type"):
1875 raise vimconn.VimConnException("Type is missing in the network details.")
1876
1877 if net["type"] == "virtual":
1878 pass
1879
1880 # For VF
1881 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1882 port_dict["binding:vnic_type"] = "direct"
1883
1884 # VIO specific Changes
1885 if self.vim_type == "VIO":
1886 # Need to create port with port_security_enabled = False and no-security-groups
1887 port_dict["port_security_enabled"] = False
1888 port_dict["provider_security_groups"] = []
1889 port_dict["security_groups"] = []
1890
1891 else:
1892 # For PT PCI-PASSTHROUGH
1893 port_dict["binding:vnic_type"] = "direct-physical"
1894
1895 @staticmethod
1896 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1897 """Set the "ip" parameter in net dictionary.
1898
1899 Args:
1900 new_port (dict): New created port
1901 net (dict): Network details
1902
1903 """
1904 fixed_ips = new_port["port"].get("fixed_ips")
1905
1906 if fixed_ips:
1907 net["ip"] = fixed_ips[0].get("ip_address")
1908 else:
1909 net["ip"] = None
1910
1911 @staticmethod
1912 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1913 """Fill up the mac_address and fixed_ips in port_dict.
1914
1915 Args:
1916 net (dict): Network details
1917 port_dict (dict): Port details
1918
1919 """
1920 if net.get("mac_address"):
1921 port_dict["mac_address"] = net["mac_address"]
1922
1923 if net.get("ip_address"):
1924 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1925 # TODO add "subnet_id": <subnet_id>
1926
1927 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1928 """Create new port using neutron.
1929
1930 Args:
1931 port_dict (dict): Port details
1932 created_items (dict): All created items
1933 net (dict): Network details
1934
1935 Returns:
1936 new_port (dict): New created port
1937
1938 """
1939 new_port = self.neutron.create_port({"port": port_dict})
1940 created_items["port:" + str(new_port["port"]["id"])] = True
1941 net["mac_adress"] = new_port["port"]["mac_address"]
1942 net["vim_id"] = new_port["port"]["id"]
1943
1944 return new_port
1945
1946 def _create_port(
1947 self, net: dict, name: str, created_items: dict
1948 ) -> Tuple[dict, dict]:
1949 """Create port using net details.
1950
1951 Args:
1952 net (dict): Network details
1953 name (str): Name to be used as network name if net dict does not include name
1954 created_items (dict): All created items
1955
1956 Returns:
1957 new_port, port New created port, port dictionary
1958
1959 """
1960
1961 port_dict = {
1962 "network_id": net["net_id"],
1963 "name": net.get("name"),
1964 "admin_state_up": True,
1965 }
1966
1967 if not port_dict["name"]:
1968 port_dict["name"] = name
1969
1970 self._prepare_port_dict_security_groups(net, port_dict)
1971
1972 self._prepare_port_dict_binding(net, port_dict)
1973
1974 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1975
1976 new_port = self._create_new_port(port_dict, created_items, net)
1977
1978 vimconnector._set_fixed_ip(new_port, net)
1979
1980 port = {"port-id": new_port["port"]["id"]}
1981
1982 if float(self.nova.api_version.get_string()) >= 2.32:
1983 port["tag"] = new_port["port"]["name"]
1984
1985 return new_port, port
1986
1987 def _prepare_network_for_vminstance(
1988 self,
1989 name: str,
1990 net_list: list,
1991 created_items: dict,
1992 net_list_vim: list,
1993 external_network: list,
1994 no_secured_ports: list,
1995 ) -> None:
1996 """Create port and fill up net dictionary for new VM instance creation.
1997
1998 Args:
1999 name (str): Name of network
2000 net_list (list): List of networks
2001 created_items (dict): All created items belongs to a VM
2002 net_list_vim (list): List of ports
2003 external_network (list): List of external-networks
2004 no_secured_ports (list): Port security disabled ports
2005 """
2006
2007 self._reload_connection()
2008
2009 for net in net_list:
2010 # Skip non-connected iface
2011 if not net.get("net_id"):
2012 continue
2013
2014 new_port, port = self._create_port(net, name, created_items)
2015
2016 net_list_vim.append(port)
2017
2018 if net.get("floating_ip", False):
2019 net["exit_on_floating_ip_error"] = True
2020 external_network.append(net)
2021
2022 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2023 net["exit_on_floating_ip_error"] = False
2024 external_network.append(net)
2025 net["floating_ip"] = self.config.get("use_floating_ip")
2026
2027 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2028 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2029 if net.get("port_security") is False and not self.config.get(
2030 "no_port_security_extension"
2031 ):
2032 no_secured_ports.append(
2033 (
2034 new_port["port"]["id"],
2035 net.get("port_security_disable_strategy"),
2036 )
2037 )
2038
2039 def _prepare_persistent_root_volumes(
2040 self,
2041 name: str,
2042 vm_av_zone: list,
2043 disk: dict,
2044 base_disk_index: int,
2045 block_device_mapping: dict,
2046 existing_vim_volumes: list,
2047 created_items: dict,
2048 ) -> Optional[str]:
2049 """Prepare persistent root volumes for new VM instance.
2050
2051 Args:
2052 name (str): Name of VM instance
2053 vm_av_zone (list): List of availability zones
2054 disk (dict): Disk details
2055 base_disk_index (int): Disk index
2056 block_device_mapping (dict): Block device details
2057 existing_vim_volumes (list): Existing disk details
2058 created_items (dict): All created items belongs to VM
2059
2060 Returns:
2061 boot_volume_id (str): ID of boot volume
2062
2063 """
2064 # Disk may include only vim_volume_id or only vim_id."
2065 # Use existing persistent root volume finding with volume_id or vim_id
2066 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2067
2068 if disk.get(key_id):
2069 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2070 existing_vim_volumes.append({"id": disk[key_id]})
2071
2072 else:
2073 # Create persistent root volume
2074 volume = self.cinder.volumes.create(
2075 size=disk["size"],
2076 name=name + "vd" + chr(base_disk_index),
2077 imageRef=disk["image_id"],
2078 # Make sure volume is in the same AZ as the VM to be attached to
2079 availability_zone=vm_av_zone,
2080 )
2081 boot_volume_id = volume.id
2082 self.update_block_device_mapping(
2083 volume=volume,
2084 block_device_mapping=block_device_mapping,
2085 base_disk_index=base_disk_index,
2086 disk=disk,
2087 created_items=created_items,
2088 )
2089
2090 return boot_volume_id
2091
2092 @staticmethod
2093 def update_block_device_mapping(
2094 volume: object,
2095 block_device_mapping: dict,
2096 base_disk_index: int,
2097 disk: dict,
2098 created_items: dict,
2099 ) -> None:
2100 """Add volume information to block device mapping dict.
2101 Args:
2102 volume (object): Created volume object
2103 block_device_mapping (dict): Block device details
2104 base_disk_index (int): Disk index
2105 disk (dict): Disk details
2106 created_items (dict): All created items belongs to VM
2107 """
2108 if not volume:
2109 raise vimconn.VimConnException("Volume is empty.")
2110
2111 if not hasattr(volume, "id"):
2112 raise vimconn.VimConnException(
2113 "Created volume is not valid, does not have id attribute."
2114 )
2115
2116 volume_txt = "volume:" + str(volume.id)
2117 if disk.get("keep"):
2118 volume_txt += ":keep"
2119 created_items[volume_txt] = True
2120 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2121
2122 def _prepare_non_root_persistent_volumes(
2123 self,
2124 name: str,
2125 disk: dict,
2126 vm_av_zone: list,
2127 block_device_mapping: dict,
2128 base_disk_index: int,
2129 existing_vim_volumes: list,
2130 created_items: dict,
2131 ) -> None:
2132 """Prepare persistent volumes for new VM instance.
2133
2134 Args:
2135 name (str): Name of VM instance
2136 disk (dict): Disk details
2137 vm_av_zone (list): List of availability zones
2138 block_device_mapping (dict): Block device details
2139 base_disk_index (int): Disk index
2140 existing_vim_volumes (list): Existing disk details
2141 created_items (dict): All created items belongs to VM
2142 """
2143 # Non-root persistent volumes
2144 # Disk may include only vim_volume_id or only vim_id."
2145 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2146
2147 if disk.get(key_id):
2148 # Use existing persistent volume
2149 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2150 existing_vim_volumes.append({"id": disk[key_id]})
2151
2152 else:
2153 # Create persistent volume
2154 volume = self.cinder.volumes.create(
2155 size=disk["size"],
2156 name=name + "vd" + chr(base_disk_index),
2157 # Make sure volume is in the same AZ as the VM to be attached to
2158 availability_zone=vm_av_zone,
2159 )
2160 self.update_block_device_mapping(
2161 volume=volume,
2162 block_device_mapping=block_device_mapping,
2163 base_disk_index=base_disk_index,
2164 disk=disk,
2165 created_items=created_items,
2166 )
2167
2168 def _wait_for_created_volumes_availability(
2169 self, elapsed_time: int, created_items: dict
2170 ) -> Optional[int]:
2171 """Wait till created volumes become available.
2172
2173 Args:
2174 elapsed_time (int): Passed time while waiting
2175 created_items (dict): All created items belongs to VM
2176
2177 Returns:
2178 elapsed_time (int): Time spent while waiting
2179
2180 """
2181
2182 while elapsed_time < volume_timeout:
2183 for created_item in created_items:
2184 v, volume_id = (
2185 created_item.split(":")[0],
2186 created_item.split(":")[1],
2187 )
2188 if v == "volume":
2189 if self.cinder.volumes.get(volume_id).status != "available":
2190 break
2191 else:
2192 # All ready: break from while
2193 break
2194
2195 time.sleep(5)
2196 elapsed_time += 5
2197
2198 return elapsed_time
2199
2200 def _wait_for_existing_volumes_availability(
2201 self, elapsed_time: int, existing_vim_volumes: list
2202 ) -> Optional[int]:
2203 """Wait till existing volumes become available.
2204
2205 Args:
2206 elapsed_time (int): Passed time while waiting
2207 existing_vim_volumes (list): Existing volume details
2208
2209 Returns:
2210 elapsed_time (int): Time spent while waiting
2211
2212 """
2213
2214 while elapsed_time < volume_timeout:
2215 for volume in existing_vim_volumes:
2216 if self.cinder.volumes.get(volume["id"]).status != "available":
2217 break
2218 else: # all ready: break from while
2219 break
2220
2221 time.sleep(5)
2222 elapsed_time += 5
2223
2224 return elapsed_time
2225
2226 def _prepare_disk_for_vminstance(
2227 self,
2228 name: str,
2229 existing_vim_volumes: list,
2230 created_items: dict,
2231 vm_av_zone: list,
2232 block_device_mapping: dict,
2233 disk_list: list = None,
2234 ) -> None:
2235 """Prepare all volumes for new VM instance.
2236
2237 Args:
2238 name (str): Name of Instance
2239 existing_vim_volumes (list): List of existing volumes
2240 created_items (dict): All created items belongs to VM
2241 vm_av_zone (list): VM availability zone
2242 block_device_mapping (dict): Block devices to be attached to VM
2243 disk_list (list): List of disks
2244
2245 """
2246 # Create additional volumes in case these are present in disk_list
2247 base_disk_index = ord("b")
2248 boot_volume_id = None
2249 elapsed_time = 0
2250
2251 for disk in disk_list:
2252 if "image_id" in disk:
2253 # Root persistent volume
2254 base_disk_index = ord("a")
2255 boot_volume_id = self._prepare_persistent_root_volumes(
2256 name=name,
2257 vm_av_zone=vm_av_zone,
2258 disk=disk,
2259 base_disk_index=base_disk_index,
2260 block_device_mapping=block_device_mapping,
2261 existing_vim_volumes=existing_vim_volumes,
2262 created_items=created_items,
2263 )
2264 else:
2265 # Non-root persistent volume
2266 self._prepare_non_root_persistent_volumes(
2267 name=name,
2268 disk=disk,
2269 vm_av_zone=vm_av_zone,
2270 block_device_mapping=block_device_mapping,
2271 base_disk_index=base_disk_index,
2272 existing_vim_volumes=existing_vim_volumes,
2273 created_items=created_items,
2274 )
2275 base_disk_index += 1
2276
2277 # Wait until created volumes are with status available
2278 elapsed_time = self._wait_for_created_volumes_availability(
2279 elapsed_time, created_items
2280 )
2281 # Wait until existing volumes in vim are with status available
2282 elapsed_time = self._wait_for_existing_volumes_availability(
2283 elapsed_time, existing_vim_volumes
2284 )
2285 # If we exceeded the timeout rollback
2286 if elapsed_time >= volume_timeout:
2287 raise vimconn.VimConnException(
2288 "Timeout creating volumes for instance " + name,
2289 http_code=vimconn.HTTP_Request_Timeout,
2290 )
2291 if boot_volume_id:
2292 self.cinder.volumes.set_bootable(boot_volume_id, True)
2293
2294 def _find_the_external_network_for_floating_ip(self):
2295 """Get the external network ip in order to create floating IP.
2296
2297 Returns:
2298 pool_id (str): External network pool ID
2299
2300 """
2301
2302 # Find the external network
2303 external_nets = list()
2304
2305 for net in self.neutron.list_networks()["networks"]:
2306 if net["router:external"]:
2307 external_nets.append(net)
2308
2309 if len(external_nets) == 0:
2310 raise vimconn.VimConnException(
2311 "Cannot create floating_ip automatically since "
2312 "no external network is present",
2313 http_code=vimconn.HTTP_Conflict,
2314 )
2315
2316 if len(external_nets) > 1:
2317 raise vimconn.VimConnException(
2318 "Cannot create floating_ip automatically since "
2319 "multiple external networks are present",
2320 http_code=vimconn.HTTP_Conflict,
2321 )
2322
2323 # Pool ID
2324 return external_nets[0].get("id")
2325
2326 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2327 """Trigger neutron to create a new floating IP using external network ID.
2328
2329 Args:
2330 param (dict): Input parameters to create a floating IP
2331 created_items (dict): All created items belongs to new VM instance
2332
2333 Raises:
2334
2335 VimConnException
2336 """
2337 try:
2338 self.logger.debug("Creating floating IP")
2339 new_floating_ip = self.neutron.create_floatingip(param)
2340 free_floating_ip = new_floating_ip["floatingip"]["id"]
2341 created_items["floating_ip:" + str(free_floating_ip)] = True
2342
2343 except Exception as e:
2344 raise vimconn.VimConnException(
2345 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2346 http_code=vimconn.HTTP_Conflict,
2347 )
2348
2349 def _create_floating_ip(
2350 self, floating_network: dict, server: object, created_items: dict
2351 ) -> None:
2352 """Get the available Pool ID and create a new floating IP.
2353
2354 Args:
2355 floating_network (dict): Dict including external network ID
2356 server (object): Server object
2357 created_items (dict): All created items belongs to new VM instance
2358
2359 """
2360
2361 # Pool_id is available
2362 if (
2363 isinstance(floating_network["floating_ip"], str)
2364 and floating_network["floating_ip"].lower() != "true"
2365 ):
2366 pool_id = floating_network["floating_ip"]
2367
2368 # Find the Pool_id
2369 else:
2370 pool_id = self._find_the_external_network_for_floating_ip()
2371
2372 param = {
2373 "floatingip": {
2374 "floating_network_id": pool_id,
2375 "tenant_id": server.tenant_id,
2376 }
2377 }
2378
2379 self._neutron_create_float_ip(param, created_items)
2380
2381 def _find_floating_ip(
2382 self,
2383 server: object,
2384 floating_ips: list,
2385 floating_network: dict,
2386 ) -> Optional[str]:
2387 """Find the available free floating IPs if there are.
2388
2389 Args:
2390 server (object): Server object
2391 floating_ips (list): List of floating IPs
2392 floating_network (dict): Details of floating network such as ID
2393
2394 Returns:
2395 free_floating_ip (str): Free floating ip address
2396
2397 """
2398 for fip in floating_ips:
2399 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2400 continue
2401
2402 if isinstance(floating_network["floating_ip"], str):
2403 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2404 continue
2405
2406 return fip["id"]
2407
2408 def _assign_floating_ip(
2409 self, free_floating_ip: str, floating_network: dict
2410 ) -> Dict:
2411 """Assign the free floating ip address to port.
2412
2413 Args:
2414 free_floating_ip (str): Floating IP to be assigned
2415 floating_network (dict): ID of floating network
2416
2417 Returns:
2418 fip (dict) (dict): Floating ip details
2419
2420 """
2421 # The vim_id key contains the neutron.port_id
2422 self.neutron.update_floatingip(
2423 free_floating_ip,
2424 {"floatingip": {"port_id": floating_network["vim_id"]}},
2425 )
2426 # For race condition ensure not re-assigned to other VM after 5 seconds
2427 time.sleep(5)
2428
2429 return self.neutron.show_floatingip(free_floating_ip)
2430
2431 def _get_free_floating_ip(
2432 self, server: object, floating_network: dict
2433 ) -> Optional[str]:
2434 """Get the free floating IP address.
2435
2436 Args:
2437 server (object): Server Object
2438 floating_network (dict): Floating network details
2439
2440 Returns:
2441 free_floating_ip (str): Free floating ip addr
2442
2443 """
2444
2445 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2446
2447 # Randomize
2448 random.shuffle(floating_ips)
2449
2450 return self._find_floating_ip(server, floating_ips, floating_network)
2451
2452 def _prepare_external_network_for_vminstance(
2453 self,
2454 external_network: list,
2455 server: object,
2456 created_items: dict,
2457 vm_start_time: float,
2458 ) -> None:
2459 """Assign floating IP address for VM instance.
2460
2461 Args:
2462 external_network (list): ID of External network
2463 server (object): Server Object
2464 created_items (dict): All created items belongs to new VM instance
2465 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2466
2467 Raises:
2468 VimConnException
2469
2470 """
2471 for floating_network in external_network:
2472 try:
2473 assigned = False
2474 floating_ip_retries = 3
2475 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2476 # several times
2477 while not assigned:
2478 free_floating_ip = self._get_free_floating_ip(
2479 server, floating_network
2480 )
2481
2482 if not free_floating_ip:
2483 self._create_floating_ip(
2484 floating_network, server, created_items
2485 )
2486
2487 try:
2488 # For race condition ensure not already assigned
2489 fip = self.neutron.show_floatingip(free_floating_ip)
2490
2491 if fip["floatingip"].get("port_id"):
2492 continue
2493
2494 # Assign floating ip
2495 fip = self._assign_floating_ip(
2496 free_floating_ip, floating_network
2497 )
2498
2499 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2500 self.logger.warning(
2501 "floating_ip {} re-assigned to other port".format(
2502 free_floating_ip
2503 )
2504 )
2505 continue
2506
2507 self.logger.debug(
2508 "Assigned floating_ip {} to VM {}".format(
2509 free_floating_ip, server.id
2510 )
2511 )
2512
2513 assigned = True
2514
2515 except Exception as e:
2516 # Openstack need some time after VM creation to assign an IP. So retry if fails
2517 vm_status = self.nova.servers.get(server.id).status
2518
2519 if vm_status not in ("ACTIVE", "ERROR"):
2520 if time.time() - vm_start_time < server_timeout:
2521 time.sleep(5)
2522 continue
2523 elif floating_ip_retries > 0:
2524 floating_ip_retries -= 1
2525 continue
2526
2527 raise vimconn.VimConnException(
2528 "Cannot create floating_ip: {} {}".format(
2529 type(e).__name__, e
2530 ),
2531 http_code=vimconn.HTTP_Conflict,
2532 )
2533
2534 except Exception as e:
2535 if not floating_network["exit_on_floating_ip_error"]:
2536 self.logger.error("Cannot create floating_ip. %s", str(e))
2537 continue
2538
2539 raise
2540
2541 def _update_port_security_for_vminstance(
2542 self,
2543 no_secured_ports: list,
2544 server: object,
2545 ) -> None:
2546 """Updates the port security according to no_secured_ports list.
2547
2548 Args:
2549 no_secured_ports (list): List of ports that security will be disabled
2550 server (object): Server Object
2551
2552 Raises:
2553 VimConnException
2554
2555 """
2556 # Wait until the VM is active and then disable the port-security
2557 if no_secured_ports:
2558 self.__wait_for_vm(server.id, "ACTIVE")
2559
2560 for port in no_secured_ports:
2561 port_update = {
2562 "port": {"port_security_enabled": False, "security_groups": None}
2563 }
2564
2565 if port[1] == "allow-address-pairs":
2566 port_update = {
2567 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2568 }
2569
2570 try:
2571 self.neutron.update_port(port[0], port_update)
2572
2573 except Exception:
2574 raise vimconn.VimConnException(
2575 "It was not possible to disable port security for port {}".format(
2576 port[0]
2577 )
2578 )
2579
2580 def new_vminstance(
2581 self,
2582 name: str,
2583 description: str,
2584 start: bool,
2585 image_id: str,
2586 flavor_id: str,
2587 affinity_group_list: list,
2588 net_list: list,
2589 cloud_config=None,
2590 disk_list=None,
2591 availability_zone_index=None,
2592 availability_zone_list=None,
2593 ) -> tuple:
2594 """Adds a VM instance to VIM.
2595
2596 Args:
2597 name (str): name of VM
2598 description (str): description
2599 start (bool): indicates if VM must start or boot in pause mode. Ignored
2600 image_id (str) image uuid
2601 flavor_id (str) flavor uuid
2602 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2603 net_list (list): list of interfaces, each one is a dictionary with:
2604 name: name of network
2605 net_id: network uuid to connect
2606 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2607 model: interface model, ignored #TODO
2608 mac_address: used for SR-IOV ifaces #TODO for other types
2609 use: 'data', 'bridge', 'mgmt'
2610 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2611 vim_id: filled/added by this function
2612 floating_ip: True/False (or it can be None)
2613 port_security: True/False
2614 cloud_config (dict): (optional) dictionary with:
2615 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2616 users: (optional) list of users to be inserted, each item is a dict with:
2617 name: (mandatory) user name,
2618 key-pairs: (optional) list of strings with the public key to be inserted to the user
2619 user-data: (optional) string is a text script to be passed directly to cloud-init
2620 config-files: (optional). List of files to be transferred. Each item is a dict with:
2621 dest: (mandatory) string with the destination absolute path
2622 encoding: (optional, by default text). Can be one of:
2623 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2624 content : (mandatory) string with the content of the file
2625 permissions: (optional) string with file permissions, typically octal notation '0644'
2626 owner: (optional) file owner, string with the format 'owner:group'
2627 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2628 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2629 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2630 size: (mandatory) string with the size of the disk in GB
2631 vim_id: (optional) should use this existing volume id
2632 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2633 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2634 availability_zone_index is None
2635 #TODO ip, security groups
2636
2637 Returns:
2638 A tuple with the instance identifier and created_items or raises an exception on error
2639 created_items can be None or a dictionary where this method can include key-values that will be passed to
2640 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2641 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2642 as not present.
2643
2644 """
2645 self.logger.debug(
2646 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2647 image_id,
2648 flavor_id,
2649 str(net_list),
2650 )
2651
2652 try:
2653 server = None
2654 created_items = {}
2655 net_list_vim = []
2656 # list of external networks to be connected to instance, later on used to create floating_ip
2657 external_network = []
2658 # List of ports with port-security disabled
2659 no_secured_ports = []
2660 block_device_mapping = {}
2661 existing_vim_volumes = []
2662 server_group_id = None
2663 scheduller_hints = {}
2664
2665 # Check the Openstack Connection
2666 self._reload_connection()
2667
2668 # Prepare network list
2669 self._prepare_network_for_vminstance(
2670 name=name,
2671 net_list=net_list,
2672 created_items=created_items,
2673 net_list_vim=net_list_vim,
2674 external_network=external_network,
2675 no_secured_ports=no_secured_ports,
2676 )
2677
2678 # Cloud config
2679 config_drive, userdata = self._create_user_data(cloud_config)
2680
2681 # Get availability Zone
2682 vm_av_zone = self._get_vm_availability_zone(
2683 availability_zone_index, availability_zone_list
2684 )
2685
2686 if disk_list:
2687 # Prepare disks
2688 self._prepare_disk_for_vminstance(
2689 name=name,
2690 existing_vim_volumes=existing_vim_volumes,
2691 created_items=created_items,
2692 vm_av_zone=vm_av_zone,
2693 block_device_mapping=block_device_mapping,
2694 disk_list=disk_list,
2695 )
2696
2697 if affinity_group_list:
2698 # Only first id on the list will be used. Openstack restriction
2699 server_group_id = affinity_group_list[0]["affinity_group_id"]
2700 scheduller_hints["group"] = server_group_id
2701
2702 self.logger.debug(
2703 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2704 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2705 "block_device_mapping={}, server_group={})".format(
2706 name,
2707 image_id,
2708 flavor_id,
2709 net_list_vim,
2710 self.config.get("security_groups"),
2711 vm_av_zone,
2712 self.config.get("keypair"),
2713 userdata,
2714 config_drive,
2715 block_device_mapping,
2716 server_group_id,
2717 )
2718 )
2719
2720 # Create VM
2721 server = self.nova.servers.create(
2722 name=name,
2723 image=image_id,
2724 flavor=flavor_id,
2725 nics=net_list_vim,
2726 security_groups=self.config.get("security_groups"),
2727 # TODO remove security_groups in future versions. Already at neutron port
2728 availability_zone=vm_av_zone,
2729 key_name=self.config.get("keypair"),
2730 userdata=userdata,
2731 config_drive=config_drive,
2732 block_device_mapping=block_device_mapping,
2733 scheduler_hints=scheduller_hints,
2734 )
2735
2736 vm_start_time = time.time()
2737
2738 self._update_port_security_for_vminstance(no_secured_ports, server)
2739
2740 self._prepare_external_network_for_vminstance(
2741 external_network=external_network,
2742 server=server,
2743 created_items=created_items,
2744 vm_start_time=vm_start_time,
2745 )
2746
2747 return server.id, created_items
2748
2749 except Exception as e:
2750 server_id = None
2751 if server:
2752 server_id = server.id
2753
2754 try:
2755 created_items = self.remove_keep_tag_from_persistent_volumes(
2756 created_items
2757 )
2758
2759 self.delete_vminstance(server_id, created_items)
2760
2761 except Exception as e2:
2762 self.logger.error("new_vminstance rollback fail {}".format(e2))
2763
2764 self._format_exception(e)
2765
2766 @staticmethod
2767 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2768 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2769
2770 Args:
2771 created_items (dict): All created items belongs to VM
2772
2773 Returns:
2774 updated_created_items (dict): Dict which does not include keep flag for volumes.
2775
2776 """
2777 return {
2778 key.replace(":keep", ""): value for (key, value) in created_items.items()
2779 }
2780
2781 def get_vminstance(self, vm_id):
2782 """Returns the VM instance information from VIM"""
2783 # self.logger.debug("Getting VM from VIM")
2784 try:
2785 self._reload_connection()
2786 server = self.nova.servers.find(id=vm_id)
2787 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2788
2789 return server.to_dict()
2790 except (
2791 ksExceptions.ClientException,
2792 nvExceptions.ClientException,
2793 nvExceptions.NotFound,
2794 ConnectionError,
2795 ) as e:
2796 self._format_exception(e)
2797
2798 def get_vminstance_console(self, vm_id, console_type="vnc"):
2799 """
2800 Get a console for the virtual machine
2801 Params:
2802 vm_id: uuid of the VM
2803 console_type, can be:
2804 "novnc" (by default), "xvpvnc" for VNC types,
2805 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2806 Returns dict with the console parameters:
2807 protocol: ssh, ftp, http, https, ...
2808 server: usually ip address
2809 port: the http, ssh, ... port
2810 suffix: extra text, e.g. the http path and query string
2811 """
2812 self.logger.debug("Getting VM CONSOLE from VIM")
2813
2814 try:
2815 self._reload_connection()
2816 server = self.nova.servers.find(id=vm_id)
2817
2818 if console_type is None or console_type == "novnc":
2819 console_dict = server.get_vnc_console("novnc")
2820 elif console_type == "xvpvnc":
2821 console_dict = server.get_vnc_console(console_type)
2822 elif console_type == "rdp-html5":
2823 console_dict = server.get_rdp_console(console_type)
2824 elif console_type == "spice-html5":
2825 console_dict = server.get_spice_console(console_type)
2826 else:
2827 raise vimconn.VimConnException(
2828 "console type '{}' not allowed".format(console_type),
2829 http_code=vimconn.HTTP_Bad_Request,
2830 )
2831
2832 console_dict1 = console_dict.get("console")
2833
2834 if console_dict1:
2835 console_url = console_dict1.get("url")
2836
2837 if console_url:
2838 # parse console_url
2839 protocol_index = console_url.find("//")
2840 suffix_index = (
2841 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2842 )
2843 port_index = (
2844 console_url[protocol_index + 2 : suffix_index].find(":")
2845 + protocol_index
2846 + 2
2847 )
2848
2849 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2850 return (
2851 -vimconn.HTTP_Internal_Server_Error,
2852 "Unexpected response from VIM",
2853 )
2854
2855 console_dict = {
2856 "protocol": console_url[0:protocol_index],
2857 "server": console_url[protocol_index + 2 : port_index],
2858 "port": console_url[port_index:suffix_index],
2859 "suffix": console_url[suffix_index + 1 :],
2860 }
2861 protocol_index += 2
2862
2863 return console_dict
2864 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2865 except (
2866 nvExceptions.NotFound,
2867 ksExceptions.ClientException,
2868 nvExceptions.ClientException,
2869 nvExceptions.BadRequest,
2870 ConnectionError,
2871 ) as e:
2872 self._format_exception(e)
2873
2874 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2875 """Neutron delete ports by id.
2876 Args:
2877 k_id (str): Port id in the VIM
2878 """
2879 try:
2880 port_dict = self.neutron.list_ports()
2881 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2882
2883 if k_id in existing_ports:
2884 self.neutron.delete_port(k_id)
2885
2886 except Exception as e:
2887 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2888
2889 def _delete_volumes_by_id_wth_cinder(
2890 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2891 ) -> bool:
2892 """Cinder delete volume by id.
2893 Args:
2894 k (str): Full item name in created_items
2895 k_id (str): ID of floating ip in VIM
2896 volumes_to_hold (list): Volumes not to delete
2897 created_items (dict): All created items belongs to VM
2898 """
2899 try:
2900 if k_id in volumes_to_hold:
2901 return
2902
2903 if self.cinder.volumes.get(k_id).status != "available":
2904 return True
2905
2906 else:
2907 self.cinder.volumes.delete(k_id)
2908 created_items[k] = None
2909
2910 except Exception as e:
2911 self.logger.error(
2912 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2913 )
2914
2915 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2916 """Neutron delete floating ip by id.
2917 Args:
2918 k (str): Full item name in created_items
2919 k_id (str): ID of floating ip in VIM
2920 created_items (dict): All created items belongs to VM
2921 """
2922 try:
2923 self.neutron.delete_floatingip(k_id)
2924 created_items[k] = None
2925
2926 except Exception as e:
2927 self.logger.error(
2928 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2929 )
2930
2931 @staticmethod
2932 def _get_item_name_id(k: str) -> Tuple[str, str]:
2933 k_item, _, k_id = k.partition(":")
2934 return k_item, k_id
2935
2936 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2937 """Delete VM ports attached to the networks before deleting virtual machine.
2938 Args:
2939 created_items (dict): All created items belongs to VM
2940 """
2941
2942 for k, v in created_items.items():
2943 if not v: # skip already deleted
2944 continue
2945
2946 try:
2947 k_item, k_id = self._get_item_name_id(k)
2948 if k_item == "port":
2949 self._delete_ports_by_id_wth_neutron(k_id)
2950
2951 except Exception as e:
2952 self.logger.error(
2953 "Error deleting port: {}: {}".format(type(e).__name__, e)
2954 )
2955
2956 def _delete_created_items(
2957 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2958 ) -> bool:
2959 """Delete Volumes and floating ip if they exist in created_items."""
2960 for k, v in created_items.items():
2961 if not v: # skip already deleted
2962 continue
2963
2964 try:
2965 k_item, k_id = self._get_item_name_id(k)
2966
2967 if k_item == "volume":
2968 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2969 k, k_id, volumes_to_hold, created_items
2970 )
2971
2972 if unavailable_vol:
2973 keep_waiting = True
2974
2975 elif k_item == "floating_ip":
2976 self._delete_floating_ip_by_id(k, k_id, created_items)
2977
2978 except Exception as e:
2979 self.logger.error("Error deleting {}: {}".format(k, e))
2980
2981 return keep_waiting
2982
2983 @staticmethod
2984 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
2985 """Remove the volumes which has key flag from created_items
2986
2987 Args:
2988 created_items (dict): All created items belongs to VM
2989
2990 Returns:
2991 created_items (dict): Persistent volumes eliminated created_items
2992 """
2993 return {
2994 key: value
2995 for (key, value) in created_items.items()
2996 if len(key.split(":")) == 2
2997 }
2998
2999 def delete_vminstance(
3000 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3001 ) -> None:
3002 """Removes a VM instance from VIM. Returns the old identifier.
3003 Args:
3004 vm_id (str): Identifier of VM instance
3005 created_items (dict): All created items belongs to VM
3006 volumes_to_hold (list): Volumes_to_hold
3007 """
3008 if created_items is None:
3009 created_items = {}
3010 if volumes_to_hold is None:
3011 volumes_to_hold = []
3012
3013 try:
3014 created_items = self._extract_items_wth_keep_flag_from_created_items(
3015 created_items
3016 )
3017
3018 self._reload_connection()
3019
3020 # Delete VM ports attached to the networks before the virtual machine
3021 if created_items:
3022 self._delete_vm_ports_attached_to_network(created_items)
3023
3024 if vm_id:
3025 self.nova.servers.delete(vm_id)
3026
3027 # Although having detached, volumes should have in active status before deleting.
3028 # We ensure in this loop
3029 keep_waiting = True
3030 elapsed_time = 0
3031
3032 while keep_waiting and elapsed_time < volume_timeout:
3033 keep_waiting = False
3034
3035 # Delete volumes and floating IP.
3036 keep_waiting = self._delete_created_items(
3037 created_items, volumes_to_hold, keep_waiting
3038 )
3039
3040 if keep_waiting:
3041 time.sleep(1)
3042 elapsed_time += 1
3043
3044 except (
3045 nvExceptions.NotFound,
3046 ksExceptions.ClientException,
3047 nvExceptions.ClientException,
3048 ConnectionError,
3049 ) as e:
3050 self._format_exception(e)
3051
3052 def refresh_vms_status(self, vm_list):
3053 """Get the status of the virtual machines and their interfaces/ports
3054 Params: the list of VM identifiers
3055 Returns a dictionary with:
3056 vm_id: #VIM id of this Virtual Machine
3057 status: #Mandatory. Text with one of:
3058 # DELETED (not found at vim)
3059 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3060 # OTHER (Vim reported other status not understood)
3061 # ERROR (VIM indicates an ERROR status)
3062 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3063 # CREATING (on building process), ERROR
3064 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3065 #
3066 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3067 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3068 interfaces:
3069 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3070 mac_address: #Text format XX:XX:XX:XX:XX:XX
3071 vim_net_id: #network id where this interface is connected
3072 vim_interface_id: #interface/port VIM id
3073 ip_address: #null, or text with IPv4, IPv6 address
3074 compute_node: #identification of compute node where PF,VF interface is allocated
3075 pci: #PCI address of the NIC that hosts the PF,VF
3076 vlan: #physical VLAN used for VF
3077 """
3078 vm_dict = {}
3079 self.logger.debug(
3080 "refresh_vms status: Getting tenant VM instance information from VIM"
3081 )
3082
3083 for vm_id in vm_list:
3084 vm = {}
3085
3086 try:
3087 vm_vim = self.get_vminstance(vm_id)
3088
3089 if vm_vim["status"] in vmStatus2manoFormat:
3090 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3091 else:
3092 vm["status"] = "OTHER"
3093 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3094
3095 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3096 vm_vim.pop("user_data", None)
3097 vm["vim_info"] = self.serialize(vm_vim)
3098
3099 vm["interfaces"] = []
3100 if vm_vim.get("fault"):
3101 vm["error_msg"] = str(vm_vim["fault"])
3102
3103 # get interfaces
3104 try:
3105 self._reload_connection()
3106 port_dict = self.neutron.list_ports(device_id=vm_id)
3107
3108 for port in port_dict["ports"]:
3109 interface = {}
3110 interface["vim_info"] = self.serialize(port)
3111 interface["mac_address"] = port.get("mac_address")
3112 interface["vim_net_id"] = port["network_id"]
3113 interface["vim_interface_id"] = port["id"]
3114 # check if OS-EXT-SRV-ATTR:host is there,
3115 # in case of non-admin credentials, it will be missing
3116
3117 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3118 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3119
3120 interface["pci"] = None
3121
3122 # check if binding:profile is there,
3123 # in case of non-admin credentials, it will be missing
3124 if port.get("binding:profile"):
3125 if port["binding:profile"].get("pci_slot"):
3126 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3127 # the slot to 0x00
3128 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3129 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3130 pci = port["binding:profile"]["pci_slot"]
3131 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3132 interface["pci"] = pci
3133
3134 interface["vlan"] = None
3135
3136 if port.get("binding:vif_details"):
3137 interface["vlan"] = port["binding:vif_details"].get("vlan")
3138
3139 # Get vlan from network in case not present in port for those old openstacks and cases where
3140 # it is needed vlan at PT
3141 if not interface["vlan"]:
3142 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3143 network = self.neutron.show_network(port["network_id"])
3144
3145 if (
3146 network["network"].get("provider:network_type")
3147 == "vlan"
3148 ):
3149 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3150 interface["vlan"] = network["network"].get(
3151 "provider:segmentation_id"
3152 )
3153
3154 ips = []
3155 # look for floating ip address
3156 try:
3157 floating_ip_dict = self.neutron.list_floatingips(
3158 port_id=port["id"]
3159 )
3160
3161 if floating_ip_dict.get("floatingips"):
3162 ips.append(
3163 floating_ip_dict["floatingips"][0].get(
3164 "floating_ip_address"
3165 )
3166 )
3167 except Exception:
3168 pass
3169
3170 for subnet in port["fixed_ips"]:
3171 ips.append(subnet["ip_address"])
3172
3173 interface["ip_address"] = ";".join(ips)
3174 vm["interfaces"].append(interface)
3175 except Exception as e:
3176 self.logger.error(
3177 "Error getting vm interface information {}: {}".format(
3178 type(e).__name__, e
3179 ),
3180 exc_info=True,
3181 )
3182 except vimconn.VimConnNotFoundException as e:
3183 self.logger.error("Exception getting vm status: %s", str(e))
3184 vm["status"] = "DELETED"
3185 vm["error_msg"] = str(e)
3186 except vimconn.VimConnException as e:
3187 self.logger.error("Exception getting vm status: %s", str(e))
3188 vm["status"] = "VIM_ERROR"
3189 vm["error_msg"] = str(e)
3190
3191 vm_dict[vm_id] = vm
3192
3193 return vm_dict
3194
3195 def action_vminstance(self, vm_id, action_dict, created_items={}):
3196 """Send and action over a VM instance from VIM
3197 Returns None or the console dict if the action was successfully sent to the VIM
3198 """
3199 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3200
3201 try:
3202 self._reload_connection()
3203 server = self.nova.servers.find(id=vm_id)
3204
3205 if "start" in action_dict:
3206 if action_dict["start"] == "rebuild":
3207 server.rebuild()
3208 else:
3209 if server.status == "PAUSED":
3210 server.unpause()
3211 elif server.status == "SUSPENDED":
3212 server.resume()
3213 elif server.status == "SHUTOFF":
3214 server.start()
3215 else:
3216 self.logger.debug(
3217 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3218 )
3219 raise vimconn.VimConnException(
3220 "Cannot 'start' instance while it is in active state",
3221 http_code=vimconn.HTTP_Bad_Request,
3222 )
3223
3224 elif "pause" in action_dict:
3225 server.pause()
3226 elif "resume" in action_dict:
3227 server.resume()
3228 elif "shutoff" in action_dict or "shutdown" in action_dict:
3229 self.logger.debug("server status %s", server.status)
3230 if server.status == "ACTIVE":
3231 server.stop()
3232 else:
3233 self.logger.debug("ERROR: VM is not in Active state")
3234 raise vimconn.VimConnException(
3235 "VM is not in active state, stop operation is not allowed",
3236 http_code=vimconn.HTTP_Bad_Request,
3237 )
3238 elif "forceOff" in action_dict:
3239 server.stop() # TODO
3240 elif "terminate" in action_dict:
3241 server.delete()
3242 elif "createImage" in action_dict:
3243 server.create_image()
3244 # "path":path_schema,
3245 # "description":description_schema,
3246 # "name":name_schema,
3247 # "metadata":metadata_schema,
3248 # "imageRef": id_schema,
3249 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3250 elif "rebuild" in action_dict:
3251 server.rebuild(server.image["id"])
3252 elif "reboot" in action_dict:
3253 server.reboot() # reboot_type="SOFT"
3254 elif "console" in action_dict:
3255 console_type = action_dict["console"]
3256
3257 if console_type is None or console_type == "novnc":
3258 console_dict = server.get_vnc_console("novnc")
3259 elif console_type == "xvpvnc":
3260 console_dict = server.get_vnc_console(console_type)
3261 elif console_type == "rdp-html5":
3262 console_dict = server.get_rdp_console(console_type)
3263 elif console_type == "spice-html5":
3264 console_dict = server.get_spice_console(console_type)
3265 else:
3266 raise vimconn.VimConnException(
3267 "console type '{}' not allowed".format(console_type),
3268 http_code=vimconn.HTTP_Bad_Request,
3269 )
3270
3271 try:
3272 console_url = console_dict["console"]["url"]
3273 # parse console_url
3274 protocol_index = console_url.find("//")
3275 suffix_index = (
3276 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3277 )
3278 port_index = (
3279 console_url[protocol_index + 2 : suffix_index].find(":")
3280 + protocol_index
3281 + 2
3282 )
3283
3284 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3285 raise vimconn.VimConnException(
3286 "Unexpected response from VIM " + str(console_dict)
3287 )
3288
3289 console_dict2 = {
3290 "protocol": console_url[0:protocol_index],
3291 "server": console_url[protocol_index + 2 : port_index],
3292 "port": int(console_url[port_index + 1 : suffix_index]),
3293 "suffix": console_url[suffix_index + 1 :],
3294 }
3295
3296 return console_dict2
3297 except Exception:
3298 raise vimconn.VimConnException(
3299 "Unexpected response from VIM " + str(console_dict)
3300 )
3301
3302 return None
3303 except (
3304 ksExceptions.ClientException,
3305 nvExceptions.ClientException,
3306 nvExceptions.NotFound,
3307 ConnectionError,
3308 ) as e:
3309 self._format_exception(e)
3310 # TODO insert exception vimconn.HTTP_Unauthorized
3311
3312 # ###### VIO Specific Changes #########
3313 def _generate_vlanID(self):
3314 """
3315 Method to get unused vlanID
3316 Args:
3317 None
3318 Returns:
3319 vlanID
3320 """
3321 # Get used VLAN IDs
3322 usedVlanIDs = []
3323 networks = self.get_network_list()
3324
3325 for net in networks:
3326 if net.get("provider:segmentation_id"):
3327 usedVlanIDs.append(net.get("provider:segmentation_id"))
3328
3329 used_vlanIDs = set(usedVlanIDs)
3330
3331 # find unused VLAN ID
3332 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3333 try:
3334 start_vlanid, end_vlanid = map(
3335 int, vlanID_range.replace(" ", "").split("-")
3336 )
3337
3338 for vlanID in range(start_vlanid, end_vlanid + 1):
3339 if vlanID not in used_vlanIDs:
3340 return vlanID
3341 except Exception as exp:
3342 raise vimconn.VimConnException(
3343 "Exception {} occurred while generating VLAN ID.".format(exp)
3344 )
3345 else:
3346 raise vimconn.VimConnConflictException(
3347 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3348 self.config.get("dataplane_net_vlan_range")
3349 )
3350 )
3351
3352 def _generate_multisegment_vlanID(self):
3353 """
3354 Method to get unused vlanID
3355 Args:
3356 None
3357 Returns:
3358 vlanID
3359 """
3360 # Get used VLAN IDs
3361 usedVlanIDs = []
3362 networks = self.get_network_list()
3363 for net in networks:
3364 if net.get("provider:network_type") == "vlan" and net.get(
3365 "provider:segmentation_id"
3366 ):
3367 usedVlanIDs.append(net.get("provider:segmentation_id"))
3368 elif net.get("segments"):
3369 for segment in net.get("segments"):
3370 if segment.get("provider:network_type") == "vlan" and segment.get(
3371 "provider:segmentation_id"
3372 ):
3373 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3374
3375 used_vlanIDs = set(usedVlanIDs)
3376
3377 # find unused VLAN ID
3378 for vlanID_range in self.config.get("multisegment_vlan_range"):
3379 try:
3380 start_vlanid, end_vlanid = map(
3381 int, vlanID_range.replace(" ", "").split("-")
3382 )
3383
3384 for vlanID in range(start_vlanid, end_vlanid + 1):
3385 if vlanID not in used_vlanIDs:
3386 return vlanID
3387 except Exception as exp:
3388 raise vimconn.VimConnException(
3389 "Exception {} occurred while generating VLAN ID.".format(exp)
3390 )
3391 else:
3392 raise vimconn.VimConnConflictException(
3393 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3394 self.config.get("multisegment_vlan_range")
3395 )
3396 )
3397
3398 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3399 """
3400 Method to validate user given vlanID ranges
3401 Args: None
3402 Returns: None
3403 """
3404 for vlanID_range in input_vlan_range:
3405 vlan_range = vlanID_range.replace(" ", "")
3406 # validate format
3407 vlanID_pattern = r"(\d)*-(\d)*$"
3408 match_obj = re.match(vlanID_pattern, vlan_range)
3409 if not match_obj:
3410 raise vimconn.VimConnConflictException(
3411 "Invalid VLAN range for {}: {}.You must provide "
3412 "'{}' in format [start_ID - end_ID].".format(
3413 text_vlan_range, vlanID_range, text_vlan_range
3414 )
3415 )
3416
3417 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3418 if start_vlanid <= 0:
3419 raise vimconn.VimConnConflictException(
3420 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3421 "networks valid IDs are 1 to 4094 ".format(
3422 text_vlan_range, vlanID_range
3423 )
3424 )
3425
3426 if end_vlanid > 4094:
3427 raise vimconn.VimConnConflictException(
3428 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3429 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3430 text_vlan_range, vlanID_range
3431 )
3432 )
3433
3434 if start_vlanid > end_vlanid:
3435 raise vimconn.VimConnConflictException(
3436 "Invalid VLAN range for {}: {}. You must provide '{}'"
3437 " in format start_ID - end_ID and start_ID < end_ID ".format(
3438 text_vlan_range, vlanID_range, text_vlan_range
3439 )
3440 )
3441
3442 # NOT USED FUNCTIONS
3443
3444 def new_external_port(self, port_data):
3445 """Adds a external port to VIM
3446 Returns the port identifier"""
3447 # TODO openstack if needed
3448 return (
3449 -vimconn.HTTP_Internal_Server_Error,
3450 "osconnector.new_external_port() not implemented",
3451 )
3452
3453 def connect_port_network(self, port_id, network_id, admin=False):
3454 """Connects a external port to a network
3455 Returns status code of the VIM response"""
3456 # TODO openstack if needed
3457 return (
3458 -vimconn.HTTP_Internal_Server_Error,
3459 "osconnector.connect_port_network() not implemented",
3460 )
3461
3462 def new_user(self, user_name, user_passwd, tenant_id=None):
3463 """Adds a new user to openstack VIM
3464 Returns the user identifier"""
3465 self.logger.debug("osconnector: Adding a new user to VIM")
3466
3467 try:
3468 self._reload_connection()
3469 user = self.keystone.users.create(
3470 user_name, password=user_passwd, default_project=tenant_id
3471 )
3472 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3473
3474 return user.id
3475 except ksExceptions.ConnectionError as e:
3476 error_value = -vimconn.HTTP_Bad_Request
3477 error_text = (
3478 type(e).__name__
3479 + ": "
3480 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3481 )
3482 except ksExceptions.ClientException as e: # TODO remove
3483 error_value = -vimconn.HTTP_Bad_Request
3484 error_text = (
3485 type(e).__name__
3486 + ": "
3487 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3488 )
3489
3490 # TODO insert exception vimconn.HTTP_Unauthorized
3491 # if reaching here is because an exception
3492 self.logger.debug("new_user " + error_text)
3493
3494 return error_value, error_text
3495
3496 def delete_user(self, user_id):
3497 """Delete a user from openstack VIM
3498 Returns the user identifier"""
3499 if self.debug:
3500 print("osconnector: Deleting a user from VIM")
3501
3502 try:
3503 self._reload_connection()
3504 self.keystone.users.delete(user_id)
3505
3506 return 1, user_id
3507 except ksExceptions.ConnectionError as e:
3508 error_value = -vimconn.HTTP_Bad_Request
3509 error_text = (
3510 type(e).__name__
3511 + ": "
3512 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3513 )
3514 except ksExceptions.NotFound as e:
3515 error_value = -vimconn.HTTP_Not_Found
3516 error_text = (
3517 type(e).__name__
3518 + ": "
3519 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3520 )
3521 except ksExceptions.ClientException as e: # TODO remove
3522 error_value = -vimconn.HTTP_Bad_Request
3523 error_text = (
3524 type(e).__name__
3525 + ": "
3526 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3527 )
3528
3529 # TODO insert exception vimconn.HTTP_Unauthorized
3530 # if reaching here is because an exception
3531 self.logger.debug("delete_tenant " + error_text)
3532
3533 return error_value, error_text
3534
3535 def get_hosts_info(self):
3536 """Get the information of deployed hosts
3537 Returns the hosts content"""
3538 if self.debug:
3539 print("osconnector: Getting Host info from VIM")
3540
3541 try:
3542 h_list = []
3543 self._reload_connection()
3544 hypervisors = self.nova.hypervisors.list()
3545
3546 for hype in hypervisors:
3547 h_list.append(hype.to_dict())
3548
3549 return 1, {"hosts": h_list}
3550 except nvExceptions.NotFound as e:
3551 error_value = -vimconn.HTTP_Not_Found
3552 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3553 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3554 error_value = -vimconn.HTTP_Bad_Request
3555 error_text = (
3556 type(e).__name__
3557 + ": "
3558 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3559 )
3560
3561 # TODO insert exception vimconn.HTTP_Unauthorized
3562 # if reaching here is because an exception
3563 self.logger.debug("get_hosts_info " + error_text)
3564
3565 return error_value, error_text
3566
3567 def get_hosts(self, vim_tenant):
3568 """Get the hosts and deployed instances
3569 Returns the hosts content"""
3570 r, hype_dict = self.get_hosts_info()
3571
3572 if r < 0:
3573 return r, hype_dict
3574
3575 hypervisors = hype_dict["hosts"]
3576
3577 try:
3578 servers = self.nova.servers.list()
3579 for hype in hypervisors:
3580 for server in servers:
3581 if (
3582 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3583 == hype["hypervisor_hostname"]
3584 ):
3585 if "vm" in hype:
3586 hype["vm"].append(server.id)
3587 else:
3588 hype["vm"] = [server.id]
3589
3590 return 1, hype_dict
3591 except nvExceptions.NotFound as e:
3592 error_value = -vimconn.HTTP_Not_Found
3593 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3594 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3595 error_value = -vimconn.HTTP_Bad_Request
3596 error_text = (
3597 type(e).__name__
3598 + ": "
3599 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3600 )
3601
3602 # TODO insert exception vimconn.HTTP_Unauthorized
3603 # if reaching here is because an exception
3604 self.logger.debug("get_hosts " + error_text)
3605
3606 return error_value, error_text
3607
3608 def new_classification(self, name, ctype, definition):
3609 self.logger.debug(
3610 "Adding a new (Traffic) Classification to VIM, named %s", name
3611 )
3612
3613 try:
3614 new_class = None
3615 self._reload_connection()
3616
3617 if ctype not in supportedClassificationTypes:
3618 raise vimconn.VimConnNotSupportedException(
3619 "OpenStack VIM connector does not support provided "
3620 "Classification Type {}, supported ones are: {}".format(
3621 ctype, supportedClassificationTypes
3622 )
3623 )
3624
3625 if not self._validate_classification(ctype, definition):
3626 raise vimconn.VimConnException(
3627 "Incorrect Classification definition for the type specified."
3628 )
3629
3630 classification_dict = definition
3631 classification_dict["name"] = name
3632 new_class = self.neutron.create_sfc_flow_classifier(
3633 {"flow_classifier": classification_dict}
3634 )
3635
3636 return new_class["flow_classifier"]["id"]
3637 except (
3638 neExceptions.ConnectionFailed,
3639 ksExceptions.ClientException,
3640 neExceptions.NeutronException,
3641 ConnectionError,
3642 ) as e:
3643 self.logger.error("Creation of Classification failed.")
3644 self._format_exception(e)
3645
3646 def get_classification(self, class_id):
3647 self.logger.debug(" Getting Classification %s from VIM", class_id)
3648 filter_dict = {"id": class_id}
3649 class_list = self.get_classification_list(filter_dict)
3650
3651 if len(class_list) == 0:
3652 raise vimconn.VimConnNotFoundException(
3653 "Classification '{}' not found".format(class_id)
3654 )
3655 elif len(class_list) > 1:
3656 raise vimconn.VimConnConflictException(
3657 "Found more than one Classification with this criteria"
3658 )
3659
3660 classification = class_list[0]
3661
3662 return classification
3663
3664 def get_classification_list(self, filter_dict={}):
3665 self.logger.debug(
3666 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3667 )
3668
3669 try:
3670 filter_dict_os = filter_dict.copy()
3671 self._reload_connection()
3672
3673 if self.api_version3 and "tenant_id" in filter_dict_os:
3674 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3675
3676 classification_dict = self.neutron.list_sfc_flow_classifiers(
3677 **filter_dict_os
3678 )
3679 classification_list = classification_dict["flow_classifiers"]
3680 self.__classification_os2mano(classification_list)
3681
3682 return classification_list
3683 except (
3684 neExceptions.ConnectionFailed,
3685 ksExceptions.ClientException,
3686 neExceptions.NeutronException,
3687 ConnectionError,
3688 ) as e:
3689 self._format_exception(e)
3690
3691 def delete_classification(self, class_id):
3692 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3693
3694 try:
3695 self._reload_connection()
3696 self.neutron.delete_sfc_flow_classifier(class_id)
3697
3698 return class_id
3699 except (
3700 neExceptions.ConnectionFailed,
3701 neExceptions.NeutronException,
3702 ksExceptions.ClientException,
3703 neExceptions.NeutronException,
3704 ConnectionError,
3705 ) as e:
3706 self._format_exception(e)
3707
3708 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3709 self.logger.debug(
3710 "Adding a new Service Function Instance to VIM, named '%s'", name
3711 )
3712
3713 try:
3714 new_sfi = None
3715 self._reload_connection()
3716 correlation = None
3717
3718 if sfc_encap:
3719 correlation = "nsh"
3720
3721 if len(ingress_ports) != 1:
3722 raise vimconn.VimConnNotSupportedException(
3723 "OpenStack VIM connector can only have 1 ingress port per SFI"
3724 )
3725
3726 if len(egress_ports) != 1:
3727 raise vimconn.VimConnNotSupportedException(
3728 "OpenStack VIM connector can only have 1 egress port per SFI"
3729 )
3730
3731 sfi_dict = {
3732 "name": name,
3733 "ingress": ingress_ports[0],
3734 "egress": egress_ports[0],
3735 "service_function_parameters": {"correlation": correlation},
3736 }
3737 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3738
3739 return new_sfi["port_pair"]["id"]
3740 except (
3741 neExceptions.ConnectionFailed,
3742 ksExceptions.ClientException,
3743 neExceptions.NeutronException,
3744 ConnectionError,
3745 ) as e:
3746 if new_sfi:
3747 try:
3748 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3749 except Exception:
3750 self.logger.error(
3751 "Creation of Service Function Instance failed, with "
3752 "subsequent deletion failure as well."
3753 )
3754
3755 self._format_exception(e)
3756
3757 def get_sfi(self, sfi_id):
3758 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3759 filter_dict = {"id": sfi_id}
3760 sfi_list = self.get_sfi_list(filter_dict)
3761
3762 if len(sfi_list) == 0:
3763 raise vimconn.VimConnNotFoundException(
3764 "Service Function Instance '{}' not found".format(sfi_id)
3765 )
3766 elif len(sfi_list) > 1:
3767 raise vimconn.VimConnConflictException(
3768 "Found more than one Service Function Instance with this criteria"
3769 )
3770
3771 sfi = sfi_list[0]
3772
3773 return sfi
3774
3775 def get_sfi_list(self, filter_dict={}):
3776 self.logger.debug(
3777 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3778 )
3779
3780 try:
3781 self._reload_connection()
3782 filter_dict_os = filter_dict.copy()
3783
3784 if self.api_version3 and "tenant_id" in filter_dict_os:
3785 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3786
3787 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3788 sfi_list = sfi_dict["port_pairs"]
3789 self.__sfi_os2mano(sfi_list)
3790
3791 return sfi_list
3792 except (
3793 neExceptions.ConnectionFailed,
3794 ksExceptions.ClientException,
3795 neExceptions.NeutronException,
3796 ConnectionError,
3797 ) as e:
3798 self._format_exception(e)
3799
3800 def delete_sfi(self, sfi_id):
3801 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3802
3803 try:
3804 self._reload_connection()
3805 self.neutron.delete_sfc_port_pair(sfi_id)
3806
3807 return sfi_id
3808 except (
3809 neExceptions.ConnectionFailed,
3810 neExceptions.NeutronException,
3811 ksExceptions.ClientException,
3812 neExceptions.NeutronException,
3813 ConnectionError,
3814 ) as e:
3815 self._format_exception(e)
3816
3817 def new_sf(self, name, sfis, sfc_encap=True):
3818 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3819
3820 try:
3821 new_sf = None
3822 self._reload_connection()
3823 # correlation = None
3824 # if sfc_encap:
3825 # correlation = "nsh"
3826
3827 for instance in sfis:
3828 sfi = self.get_sfi(instance)
3829
3830 if sfi.get("sfc_encap") != sfc_encap:
3831 raise vimconn.VimConnNotSupportedException(
3832 "OpenStack VIM connector requires all SFIs of the "
3833 "same SF to share the same SFC Encapsulation"
3834 )
3835
3836 sf_dict = {"name": name, "port_pairs": sfis}
3837 new_sf = self.neutron.create_sfc_port_pair_group(
3838 {"port_pair_group": sf_dict}
3839 )
3840
3841 return new_sf["port_pair_group"]["id"]
3842 except (
3843 neExceptions.ConnectionFailed,
3844 ksExceptions.ClientException,
3845 neExceptions.NeutronException,
3846 ConnectionError,
3847 ) as e:
3848 if new_sf:
3849 try:
3850 self.neutron.delete_sfc_port_pair_group(
3851 new_sf["port_pair_group"]["id"]
3852 )
3853 except Exception:
3854 self.logger.error(
3855 "Creation of Service Function failed, with "
3856 "subsequent deletion failure as well."
3857 )
3858
3859 self._format_exception(e)
3860
3861 def get_sf(self, sf_id):
3862 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3863 filter_dict = {"id": sf_id}
3864 sf_list = self.get_sf_list(filter_dict)
3865
3866 if len(sf_list) == 0:
3867 raise vimconn.VimConnNotFoundException(
3868 "Service Function '{}' not found".format(sf_id)
3869 )
3870 elif len(sf_list) > 1:
3871 raise vimconn.VimConnConflictException(
3872 "Found more than one Service Function with this criteria"
3873 )
3874
3875 sf = sf_list[0]
3876
3877 return sf
3878
3879 def get_sf_list(self, filter_dict={}):
3880 self.logger.debug(
3881 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3882 )
3883
3884 try:
3885 self._reload_connection()
3886 filter_dict_os = filter_dict.copy()
3887
3888 if self.api_version3 and "tenant_id" in filter_dict_os:
3889 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3890
3891 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3892 sf_list = sf_dict["port_pair_groups"]
3893 self.__sf_os2mano(sf_list)
3894
3895 return sf_list
3896 except (
3897 neExceptions.ConnectionFailed,
3898 ksExceptions.ClientException,
3899 neExceptions.NeutronException,
3900 ConnectionError,
3901 ) as e:
3902 self._format_exception(e)
3903
3904 def delete_sf(self, sf_id):
3905 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3906
3907 try:
3908 self._reload_connection()
3909 self.neutron.delete_sfc_port_pair_group(sf_id)
3910
3911 return sf_id
3912 except (
3913 neExceptions.ConnectionFailed,
3914 neExceptions.NeutronException,
3915 ksExceptions.ClientException,
3916 neExceptions.NeutronException,
3917 ConnectionError,
3918 ) as e:
3919 self._format_exception(e)
3920
3921 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3922 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3923
3924 try:
3925 new_sfp = None
3926 self._reload_connection()
3927 # In networking-sfc the MPLS encapsulation is legacy
3928 # should be used when no full SFC Encapsulation is intended
3929 correlation = "mpls"
3930
3931 if sfc_encap:
3932 correlation = "nsh"
3933
3934 sfp_dict = {
3935 "name": name,
3936 "flow_classifiers": classifications,
3937 "port_pair_groups": sfs,
3938 "chain_parameters": {"correlation": correlation},
3939 }
3940
3941 if spi:
3942 sfp_dict["chain_id"] = spi
3943
3944 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3945
3946 return new_sfp["port_chain"]["id"]
3947 except (
3948 neExceptions.ConnectionFailed,
3949 ksExceptions.ClientException,
3950 neExceptions.NeutronException,
3951 ConnectionError,
3952 ) as e:
3953 if new_sfp:
3954 try:
3955 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3956 except Exception:
3957 self.logger.error(
3958 "Creation of Service Function Path failed, with "
3959 "subsequent deletion failure as well."
3960 )
3961
3962 self._format_exception(e)
3963
3964 def get_sfp(self, sfp_id):
3965 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3966
3967 filter_dict = {"id": sfp_id}
3968 sfp_list = self.get_sfp_list(filter_dict)
3969
3970 if len(sfp_list) == 0:
3971 raise vimconn.VimConnNotFoundException(
3972 "Service Function Path '{}' not found".format(sfp_id)
3973 )
3974 elif len(sfp_list) > 1:
3975 raise vimconn.VimConnConflictException(
3976 "Found more than one Service Function Path with this criteria"
3977 )
3978
3979 sfp = sfp_list[0]
3980
3981 return sfp
3982
3983 def get_sfp_list(self, filter_dict={}):
3984 self.logger.debug(
3985 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3986 )
3987
3988 try:
3989 self._reload_connection()
3990 filter_dict_os = filter_dict.copy()
3991
3992 if self.api_version3 and "tenant_id" in filter_dict_os:
3993 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3994
3995 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3996 sfp_list = sfp_dict["port_chains"]
3997 self.__sfp_os2mano(sfp_list)
3998
3999 return sfp_list
4000 except (
4001 neExceptions.ConnectionFailed,
4002 ksExceptions.ClientException,
4003 neExceptions.NeutronException,
4004 ConnectionError,
4005 ) as e:
4006 self._format_exception(e)
4007
4008 def delete_sfp(self, sfp_id):
4009 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
4010
4011 try:
4012 self._reload_connection()
4013 self.neutron.delete_sfc_port_chain(sfp_id)
4014
4015 return sfp_id
4016 except (
4017 neExceptions.ConnectionFailed,
4018 neExceptions.NeutronException,
4019 ksExceptions.ClientException,
4020 neExceptions.NeutronException,
4021 ConnectionError,
4022 ) as e:
4023 self._format_exception(e)
4024
4025 def refresh_sfps_status(self, sfp_list):
4026 """Get the status of the service function path
4027 Params: the list of sfp identifiers
4028 Returns a dictionary with:
4029 vm_id: #VIM id of this service function path
4030 status: #Mandatory. Text with one of:
4031 # DELETED (not found at vim)
4032 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4033 # OTHER (Vim reported other status not understood)
4034 # ERROR (VIM indicates an ERROR status)
4035 # ACTIVE,
4036 # CREATING (on building process)
4037 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4038 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
4039 """
4040 sfp_dict = {}
4041 self.logger.debug(
4042 "refresh_sfps status: Getting tenant SFP information from VIM"
4043 )
4044
4045 for sfp_id in sfp_list:
4046 sfp = {}
4047
4048 try:
4049 sfp_vim = self.get_sfp(sfp_id)
4050
4051 if sfp_vim["spi"]:
4052 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4053 else:
4054 sfp["status"] = "OTHER"
4055 sfp["error_msg"] = "VIM status reported " + sfp["status"]
4056
4057 sfp["vim_info"] = self.serialize(sfp_vim)
4058
4059 if sfp_vim.get("fault"):
4060 sfp["error_msg"] = str(sfp_vim["fault"])
4061 except vimconn.VimConnNotFoundException as e:
4062 self.logger.error("Exception getting sfp status: %s", str(e))
4063 sfp["status"] = "DELETED"
4064 sfp["error_msg"] = str(e)
4065 except vimconn.VimConnException as e:
4066 self.logger.error("Exception getting sfp status: %s", str(e))
4067 sfp["status"] = "VIM_ERROR"
4068 sfp["error_msg"] = str(e)
4069
4070 sfp_dict[sfp_id] = sfp
4071
4072 return sfp_dict
4073
4074 def refresh_sfis_status(self, sfi_list):
4075 """Get the status of the service function instances
4076 Params: the list of sfi identifiers
4077 Returns a dictionary with:
4078 vm_id: #VIM id of this service function instance
4079 status: #Mandatory. Text with one of:
4080 # DELETED (not found at vim)
4081 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4082 # OTHER (Vim reported other status not understood)
4083 # ERROR (VIM indicates an ERROR status)
4084 # ACTIVE,
4085 # CREATING (on building process)
4086 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4087 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4088 """
4089 sfi_dict = {}
4090 self.logger.debug(
4091 "refresh_sfis status: Getting tenant sfi information from VIM"
4092 )
4093
4094 for sfi_id in sfi_list:
4095 sfi = {}
4096
4097 try:
4098 sfi_vim = self.get_sfi(sfi_id)
4099
4100 if sfi_vim:
4101 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4102 else:
4103 sfi["status"] = "OTHER"
4104 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4105
4106 sfi["vim_info"] = self.serialize(sfi_vim)
4107
4108 if sfi_vim.get("fault"):
4109 sfi["error_msg"] = str(sfi_vim["fault"])
4110 except vimconn.VimConnNotFoundException as e:
4111 self.logger.error("Exception getting sfi status: %s", str(e))
4112 sfi["status"] = "DELETED"
4113 sfi["error_msg"] = str(e)
4114 except vimconn.VimConnException as e:
4115 self.logger.error("Exception getting sfi status: %s", str(e))
4116 sfi["status"] = "VIM_ERROR"
4117 sfi["error_msg"] = str(e)
4118
4119 sfi_dict[sfi_id] = sfi
4120
4121 return sfi_dict
4122
4123 def refresh_sfs_status(self, sf_list):
4124 """Get the status of the service functions
4125 Params: the list of sf identifiers
4126 Returns a dictionary with:
4127 vm_id: #VIM id of this service function
4128 status: #Mandatory. Text with one of:
4129 # DELETED (not found at vim)
4130 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4131 # OTHER (Vim reported other status not understood)
4132 # ERROR (VIM indicates an ERROR status)
4133 # ACTIVE,
4134 # CREATING (on building process)
4135 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4136 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4137 """
4138 sf_dict = {}
4139 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4140
4141 for sf_id in sf_list:
4142 sf = {}
4143
4144 try:
4145 sf_vim = self.get_sf(sf_id)
4146
4147 if sf_vim:
4148 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4149 else:
4150 sf["status"] = "OTHER"
4151 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4152
4153 sf["vim_info"] = self.serialize(sf_vim)
4154
4155 if sf_vim.get("fault"):
4156 sf["error_msg"] = str(sf_vim["fault"])
4157 except vimconn.VimConnNotFoundException as e:
4158 self.logger.error("Exception getting sf status: %s", str(e))
4159 sf["status"] = "DELETED"
4160 sf["error_msg"] = str(e)
4161 except vimconn.VimConnException as e:
4162 self.logger.error("Exception getting sf status: %s", str(e))
4163 sf["status"] = "VIM_ERROR"
4164 sf["error_msg"] = str(e)
4165
4166 sf_dict[sf_id] = sf
4167
4168 return sf_dict
4169
4170 def refresh_classifications_status(self, classification_list):
4171 """Get the status of the classifications
4172 Params: the list of classification identifiers
4173 Returns a dictionary with:
4174 vm_id: #VIM id of this classifier
4175 status: #Mandatory. Text with one of:
4176 # DELETED (not found at vim)
4177 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4178 # OTHER (Vim reported other status not understood)
4179 # ERROR (VIM indicates an ERROR status)
4180 # ACTIVE,
4181 # CREATING (on building process)
4182 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4183 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4184 """
4185 classification_dict = {}
4186 self.logger.debug(
4187 "refresh_classifications status: Getting tenant classification information from VIM"
4188 )
4189
4190 for classification_id in classification_list:
4191 classification = {}
4192
4193 try:
4194 classification_vim = self.get_classification(classification_id)
4195
4196 if classification_vim:
4197 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4198 else:
4199 classification["status"] = "OTHER"
4200 classification["error_msg"] = (
4201 "VIM status reported " + classification["status"]
4202 )
4203
4204 classification["vim_info"] = self.serialize(classification_vim)
4205
4206 if classification_vim.get("fault"):
4207 classification["error_msg"] = str(classification_vim["fault"])
4208 except vimconn.VimConnNotFoundException as e:
4209 self.logger.error("Exception getting classification status: %s", str(e))
4210 classification["status"] = "DELETED"
4211 classification["error_msg"] = str(e)
4212 except vimconn.VimConnException as e:
4213 self.logger.error("Exception getting classification status: %s", str(e))
4214 classification["status"] = "VIM_ERROR"
4215 classification["error_msg"] = str(e)
4216
4217 classification_dict[classification_id] = classification
4218
4219 return classification_dict
4220
4221 def new_affinity_group(self, affinity_group_data):
4222 """Adds a server group to VIM
4223 affinity_group_data contains a dictionary with information, keys:
4224 name: name in VIM for the server group
4225 type: affinity or anti-affinity
4226 scope: Only nfvi-node allowed
4227 Returns the server group identifier"""
4228 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4229
4230 try:
4231 name = affinity_group_data["name"]
4232 policy = affinity_group_data["type"]
4233
4234 self._reload_connection()
4235 new_server_group = self.nova.server_groups.create(name, policy)
4236
4237 return new_server_group.id
4238 except (
4239 ksExceptions.ClientException,
4240 nvExceptions.ClientException,
4241 ConnectionError,
4242 KeyError,
4243 ) as e:
4244 self._format_exception(e)
4245
4246 def get_affinity_group(self, affinity_group_id):
4247 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4248 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4249 try:
4250 self._reload_connection()
4251 server_group = self.nova.server_groups.find(id=affinity_group_id)
4252
4253 return server_group.to_dict()
4254 except (
4255 nvExceptions.NotFound,
4256 nvExceptions.ClientException,
4257 ksExceptions.ClientException,
4258 ConnectionError,
4259 ) as e:
4260 self._format_exception(e)
4261
4262 def delete_affinity_group(self, affinity_group_id):
4263 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4264 self.logger.debug("Getting server group '%s'", affinity_group_id)
4265 try:
4266 self._reload_connection()
4267 self.nova.server_groups.delete(affinity_group_id)
4268
4269 return affinity_group_id
4270 except (
4271 nvExceptions.NotFound,
4272 ksExceptions.ClientException,
4273 nvExceptions.ClientException,
4274 ConnectionError,
4275 ) as e:
4276 self._format_exception(e)
4277
4278 def get_vdu_state(self, vm_id):
4279 """
4280 Getting the state of a vdu
4281 param:
4282 vm_id: ID of an instance
4283 """
4284 self.logger.debug("Getting the status of VM")
4285 self.logger.debug("VIM VM ID %s", vm_id)
4286 self._reload_connection()
4287 server = self.nova.servers.find(id=vm_id)
4288 server_dict = server.to_dict()
4289 vdu_data = [
4290 server_dict["status"],
4291 server_dict["flavor"]["id"],
4292 server_dict["OS-EXT-SRV-ATTR:host"],
4293 server_dict["OS-EXT-AZ:availability_zone"],
4294 ]
4295 self.logger.debug("vdu_data %s", vdu_data)
4296 return vdu_data
4297
4298 def check_compute_availability(self, host, server_flavor_details):
4299 self._reload_connection()
4300 hypervisor_search = self.nova.hypervisors.search(
4301 hypervisor_match=host, servers=True
4302 )
4303 for hypervisor in hypervisor_search:
4304 hypervisor_id = hypervisor.to_dict()["id"]
4305 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4306 hypervisor_dict = hypervisor_details.to_dict()
4307 hypervisor_temp = json.dumps(hypervisor_dict)
4308 hypervisor_json = json.loads(hypervisor_temp)
4309 resources_available = [
4310 hypervisor_json["free_ram_mb"],
4311 hypervisor_json["disk_available_least"],
4312 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4313 ]
4314 compute_available = all(
4315 x > y for x, y in zip(resources_available, server_flavor_details)
4316 )
4317 if compute_available:
4318 return host
4319
4320 def check_availability_zone(
4321 self, old_az, server_flavor_details, old_host, host=None
4322 ):
4323 self._reload_connection()
4324 az_check = {"zone_check": False, "compute_availability": None}
4325 aggregates_list = self.nova.aggregates.list()
4326 for aggregate in aggregates_list:
4327 aggregate_details = aggregate.to_dict()
4328 aggregate_temp = json.dumps(aggregate_details)
4329 aggregate_json = json.loads(aggregate_temp)
4330 if aggregate_json["availability_zone"] == old_az:
4331 hosts_list = aggregate_json["hosts"]
4332 if host is not None:
4333 if host in hosts_list:
4334 az_check["zone_check"] = True
4335 available_compute_id = self.check_compute_availability(
4336 host, server_flavor_details
4337 )
4338 if available_compute_id is not None:
4339 az_check["compute_availability"] = available_compute_id
4340 else:
4341 for check_host in hosts_list:
4342 if check_host != old_host:
4343 available_compute_id = self.check_compute_availability(
4344 check_host, server_flavor_details
4345 )
4346 if available_compute_id is not None:
4347 az_check["zone_check"] = True
4348 az_check["compute_availability"] = available_compute_id
4349 break
4350 else:
4351 az_check["zone_check"] = True
4352 return az_check
4353
4354 def migrate_instance(self, vm_id, compute_host=None):
4355 """
4356 Migrate a vdu
4357 param:
4358 vm_id: ID of an instance
4359 compute_host: Host to migrate the vdu to
4360 """
4361 self._reload_connection()
4362 vm_state = False
4363 instance_state = self.get_vdu_state(vm_id)
4364 server_flavor_id = instance_state[1]
4365 server_hypervisor_name = instance_state[2]
4366 server_availability_zone = instance_state[3]
4367 try:
4368 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4369 server_flavor_details = [
4370 server_flavor["ram"],
4371 server_flavor["disk"],
4372 server_flavor["vcpus"],
4373 ]
4374 if compute_host == server_hypervisor_name:
4375 raise vimconn.VimConnException(
4376 "Unable to migrate instance '{}' to the same host '{}'".format(
4377 vm_id, compute_host
4378 ),
4379 http_code=vimconn.HTTP_Bad_Request,
4380 )
4381 az_status = self.check_availability_zone(
4382 server_availability_zone,
4383 server_flavor_details,
4384 server_hypervisor_name,
4385 compute_host,
4386 )
4387 availability_zone_check = az_status["zone_check"]
4388 available_compute_id = az_status.get("compute_availability")
4389
4390 if availability_zone_check is False:
4391 raise vimconn.VimConnException(
4392 "Unable to migrate instance '{}' to a different availability zone".format(
4393 vm_id
4394 ),
4395 http_code=vimconn.HTTP_Bad_Request,
4396 )
4397 if available_compute_id is not None:
4398 self.nova.servers.live_migrate(
4399 server=vm_id,
4400 host=available_compute_id,
4401 block_migration=True,
4402 disk_over_commit=False,
4403 )
4404 state = "MIGRATING"
4405 changed_compute_host = ""
4406 if state == "MIGRATING":
4407 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4408 changed_compute_host = self.get_vdu_state(vm_id)[2]
4409 if vm_state and changed_compute_host == available_compute_id:
4410 self.logger.debug(
4411 "Instance '{}' migrated to the new compute host '{}'".format(
4412 vm_id, changed_compute_host
4413 )
4414 )
4415 return state, available_compute_id
4416 else:
4417 raise vimconn.VimConnException(
4418 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4419 vm_id, available_compute_id
4420 ),
4421 http_code=vimconn.HTTP_Bad_Request,
4422 )
4423 else:
4424 raise vimconn.VimConnException(
4425 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4426 available_compute_id
4427 ),
4428 http_code=vimconn.HTTP_Bad_Request,
4429 )
4430 except (
4431 nvExceptions.BadRequest,
4432 nvExceptions.ClientException,
4433 nvExceptions.NotFound,
4434 ) as e:
4435 self._format_exception(e)
4436
4437 def resize_instance(self, vm_id, new_flavor_id):
4438 """
4439 For resizing the vm based on the given
4440 flavor details
4441 param:
4442 vm_id : ID of an instance
4443 new_flavor_id : Flavor id to be resized
4444 Return the status of a resized instance
4445 """
4446 self._reload_connection()
4447 self.logger.debug("resize the flavor of an instance")
4448 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4449 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4450 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4451 try:
4452 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4453 if old_flavor_disk > new_flavor_disk:
4454 raise nvExceptions.BadRequest(
4455 400,
4456 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4457 )
4458 else:
4459 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4460 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4461 if vm_state:
4462 instance_resized_status = self.confirm_resize(vm_id)
4463 return instance_resized_status
4464 else:
4465 raise nvExceptions.BadRequest(
4466 409,
4467 message="Cannot 'resize' vm_state is in ERROR",
4468 )
4469
4470 else:
4471 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4472 raise nvExceptions.BadRequest(
4473 409,
4474 message="Cannot 'resize' instance while it is in vm_state resized",
4475 )
4476 except (
4477 nvExceptions.BadRequest,
4478 nvExceptions.ClientException,
4479 nvExceptions.NotFound,
4480 ) as e:
4481 self._format_exception(e)
4482
4483 def confirm_resize(self, vm_id):
4484 """
4485 Confirm the resize of an instance
4486 param:
4487 vm_id: ID of an instance
4488 """
4489 self._reload_connection()
4490 self.nova.servers.confirm_resize(server=vm_id)
4491 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4492 self.__wait_for_vm(vm_id, "ACTIVE")
4493 instance_status = self.get_vdu_state(vm_id)[0]
4494 return instance_status