7c57817c11d72eed4c5752a02c4d10530a48fe66
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 vcpus (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1296 vcpus = numa["paired-threads"] * 2
1297 extra_specs["hw:cpu_thread_policy"] = "require"
1298 extra_specs["hw:cpu_policy"] = "dedicated"
1299 return vcpus
1300
1301 @staticmethod
1302 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1303 """Fill up extra_specs if numa has cores.
1304 Args:
1305 numa (dict): A dictionary which includes numa information
1306 extra_specs (dict): To be filled.
1307
1308 Returns:
1309 vcpus (int) Number of virtual cpus
1310
1311 """
1312 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1313 # architecture, or a non-SMT architecture will be emulated
1314 if not numa.get("cores"):
1315 return
1316 vcpus = numa["cores"]
1317 extra_specs["hw:cpu_thread_policy"] = "isolate"
1318 extra_specs["hw:cpu_policy"] = "dedicated"
1319 return vcpus
1320
1321 @staticmethod
1322 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1323 """Fill up extra_specs if numa has threads.
1324 Args:
1325 numa (dict): A dictionary which includes numa information
1326 extra_specs (dict): To be filled.
1327
1328 Returns:
1329 vcpus (int) Number of virtual cpus
1330
1331 """
1332 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1333 if not numa.get("threads"):
1334 return
1335 vcpus = numa["threads"]
1336 extra_specs["hw:cpu_thread_policy"] = "prefer"
1337 extra_specs["hw:cpu_policy"] = "dedicated"
1338 return vcpus
1339
1340 def _process_numa_parameters_of_flavor(
1341 self, numas: List, extra_specs: Dict, vcpus: Optional[int]
1342 ) -> int:
1343 """Process numa parameters and fill up extra_specs.
1344
1345 Args:
1346 numas (list): List of dictionary which includes numa information
1347 extra_specs (dict): To be filled.
1348 vcpus (int) Number of virtual cpus
1349
1350 Returns:
1351 vcpus (int) Number of virtual cpus
1352
1353 """
1354 numa_nodes = len(numas)
1355 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1356
1357 if self.vim_type == "VIO":
1358 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1359 extra_specs["vmware:latency_sensitivity_level"] = "high"
1360
1361 for numa in numas:
1362 if "id" in numa:
1363 node_id = numa["id"]
1364 # overwrite ram and vcpus
1365 # check if key "memory" is present in numa else use ram value at flavor
1366 self.process_numa_memory(numa, node_id, extra_specs)
1367 self.process_numa_vcpu(numa, node_id, extra_specs)
1368
1369 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1370 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1371
1372 if "paired-threads" in numa:
1373 vcpus = self.process_numa_paired_threads(numa, extra_specs)
1374
1375 elif "cores" in numa:
1376 vcpus = self.process_numa_cores(numa, extra_specs)
1377
1378 elif "threads" in numa:
1379 vcpus = self.process_numa_threads(numa, extra_specs)
1380
1381 return vcpus
1382
1383 def _change_flavor_name(
1384 self, name: str, name_suffix: int, flavor_data: dict
1385 ) -> str:
1386 """Change the flavor name if the name already exists.
1387
1388 Args:
1389 name (str): Flavor name to be checked
1390 name_suffix (int): Suffix to be appended to name
1391 flavor_data (dict): Flavor dict
1392
1393 Returns:
1394 name (str): New flavor name to be used
1395
1396 """
1397 # Get used names
1398 fl = self.nova.flavors.list()
1399 fl_names = [f.name for f in fl]
1400
1401 while name in fl_names:
1402 name_suffix += 1
1403 name = flavor_data["name"] + "-" + str(name_suffix)
1404
1405 return name
1406
1407 def _process_extended_config_of_flavor(
1408 self, extended: dict, extra_specs: dict, vcpus: Optional[int]
1409 ) -> int:
1410 """Process the extended dict to fill up extra_specs.
1411 Args:
1412
1413 extended (dict): Keeping the extra specification of flavor
1414 extra_specs (dict) Dict to be filled to be used during flavor creation
1415 vcpus (int) Number of virtual cpus
1416
1417 Returns:
1418 vcpus (int) Number of virtual cpus
1419
1420 """
1421 quotas = {
1422 "cpu-quota": "cpu",
1423 "mem-quota": "memory",
1424 "vif-quota": "vif",
1425 "disk-io-quota": "disk_io",
1426 }
1427
1428 page_sizes = {
1429 "LARGE": "large",
1430 "SMALL": "small",
1431 "SIZE_2MB": "2MB",
1432 "SIZE_1GB": "1GB",
1433 "PREFER_LARGE": "any",
1434 }
1435
1436 policies = {
1437 "cpu-pinning-policy": "hw:cpu_policy",
1438 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1439 "mem-policy": "hw:numa_mempolicy",
1440 }
1441
1442 numas = extended.get("numas")
1443 if numas:
1444 vcpus = self._process_numa_parameters_of_flavor(numas, extra_specs, vcpus)
1445
1446 for quota, item in quotas.items():
1447 if quota in extended.keys():
1448 self.process_resource_quota(extended.get(quota), item, extra_specs)
1449
1450 # Set the mempage size as specified in the descriptor
1451 if extended.get("mempage-size"):
1452 if extended["mempage-size"] in page_sizes.keys():
1453 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1454 else:
1455 # Normally, validations in NBI should not allow to this condition.
1456 self.logger.debug(
1457 "Invalid mempage-size %s. Will be ignored",
1458 extended.get("mempage-size"),
1459 )
1460
1461 for policy, hw_policy in policies.items():
1462 if extended.get(policy):
1463 extra_specs[hw_policy] = extended[policy].lower()
1464
1465 return vcpus
1466
1467 @staticmethod
1468 def _get_flavor_details(flavor_data: dict) -> Tuple:
1469 """Returns the details of flavor
1470 Args:
1471 flavor_data (dict): Dictionary that includes required flavor details
1472
1473 Returns:
1474 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1475
1476 """
1477 return (
1478 flavor_data.get("ram", 64),
1479 flavor_data.get("vcpus", 1),
1480 {},
1481 flavor_data.get("extended"),
1482 )
1483
1484 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1485 """Adds a tenant flavor to openstack VIM.
1486 if change_name_if_used is True, it will change name in case of conflict,
1487 because it is not supported name repetition.
1488
1489 Args:
1490 flavor_data (dict): Flavor details to be processed
1491 change_name_if_used (bool): Change name in case of conflict
1492
1493 Returns:
1494 flavor_id (str): flavor identifier
1495
1496 """
1497 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1498 retry = 0
1499 max_retries = 3
1500 name_suffix = 0
1501
1502 try:
1503 name = flavor_data["name"]
1504 while retry < max_retries:
1505 retry += 1
1506 try:
1507 self._reload_connection()
1508
1509 if change_name_if_used:
1510 name = self._change_flavor_name(name, name_suffix, flavor_data)
1511
1512 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1513 flavor_data
1514 )
1515 if extended:
1516 vcpus = self._process_extended_config_of_flavor(
1517 extended, extra_specs, vcpus
1518 )
1519
1520 # Create flavor
1521
1522 new_flavor = self.nova.flavors.create(
1523 name=name,
1524 ram=ram,
1525 vcpus=vcpus,
1526 disk=flavor_data.get("disk", 0),
1527 ephemeral=flavor_data.get("ephemeral", 0),
1528 swap=flavor_data.get("swap", 0),
1529 is_public=flavor_data.get("is_public", True),
1530 )
1531
1532 # Add metadata
1533 if extra_specs:
1534 new_flavor.set_keys(extra_specs)
1535
1536 return new_flavor.id
1537
1538 except nvExceptions.Conflict as e:
1539
1540 if change_name_if_used and retry < max_retries:
1541 continue
1542
1543 self._format_exception(e)
1544
1545 except (
1546 ksExceptions.ClientException,
1547 nvExceptions.ClientException,
1548 ConnectionError,
1549 KeyError,
1550 ) as e:
1551 self._format_exception(e)
1552
1553 def delete_flavor(self, flavor_id):
1554 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1555 try:
1556 self._reload_connection()
1557 self.nova.flavors.delete(flavor_id)
1558
1559 return flavor_id
1560 # except nvExceptions.BadRequest as e:
1561 except (
1562 nvExceptions.NotFound,
1563 ksExceptions.ClientException,
1564 nvExceptions.ClientException,
1565 ConnectionError,
1566 ) as e:
1567 self._format_exception(e)
1568
1569 def new_image(self, image_dict):
1570 """
1571 Adds a tenant image to VIM. imge_dict is a dictionary with:
1572 name: name
1573 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1574 location: path or URI
1575 public: "yes" or "no"
1576 metadata: metadata of the image
1577 Returns the image_id
1578 """
1579 retry = 0
1580 max_retries = 3
1581
1582 while retry < max_retries:
1583 retry += 1
1584 try:
1585 self._reload_connection()
1586
1587 # determine format http://docs.openstack.org/developer/glance/formats.html
1588 if "disk_format" in image_dict:
1589 disk_format = image_dict["disk_format"]
1590 else: # autodiscover based on extension
1591 if image_dict["location"].endswith(".qcow2"):
1592 disk_format = "qcow2"
1593 elif image_dict["location"].endswith(".vhd"):
1594 disk_format = "vhd"
1595 elif image_dict["location"].endswith(".vmdk"):
1596 disk_format = "vmdk"
1597 elif image_dict["location"].endswith(".vdi"):
1598 disk_format = "vdi"
1599 elif image_dict["location"].endswith(".iso"):
1600 disk_format = "iso"
1601 elif image_dict["location"].endswith(".aki"):
1602 disk_format = "aki"
1603 elif image_dict["location"].endswith(".ari"):
1604 disk_format = "ari"
1605 elif image_dict["location"].endswith(".ami"):
1606 disk_format = "ami"
1607 else:
1608 disk_format = "raw"
1609
1610 self.logger.debug(
1611 "new_image: '%s' loading from '%s'",
1612 image_dict["name"],
1613 image_dict["location"],
1614 )
1615 if self.vim_type == "VIO":
1616 container_format = "bare"
1617 if "container_format" in image_dict:
1618 container_format = image_dict["container_format"]
1619
1620 new_image = self.glance.images.create(
1621 name=image_dict["name"],
1622 container_format=container_format,
1623 disk_format=disk_format,
1624 )
1625 else:
1626 new_image = self.glance.images.create(name=image_dict["name"])
1627
1628 if image_dict["location"].startswith("http"):
1629 # TODO there is not a method to direct download. It must be downloaded locally with requests
1630 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1631 else: # local path
1632 with open(image_dict["location"]) as fimage:
1633 self.glance.images.upload(new_image.id, fimage)
1634 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1635 # image_dict.get("public","yes")=="yes",
1636 # container_format="bare", data=fimage, disk_format=disk_format)
1637
1638 metadata_to_load = image_dict.get("metadata")
1639
1640 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1641 # for openstack
1642 if self.vim_type == "VIO":
1643 metadata_to_load["upload_location"] = image_dict["location"]
1644 else:
1645 metadata_to_load["location"] = image_dict["location"]
1646
1647 self.glance.images.update(new_image.id, **metadata_to_load)
1648
1649 return new_image.id
1650 except (
1651 nvExceptions.Conflict,
1652 ksExceptions.ClientException,
1653 nvExceptions.ClientException,
1654 ) as e:
1655 self._format_exception(e)
1656 except (
1657 HTTPException,
1658 gl1Exceptions.HTTPException,
1659 gl1Exceptions.CommunicationError,
1660 ConnectionError,
1661 ) as e:
1662 if retry == max_retries:
1663 continue
1664
1665 self._format_exception(e)
1666 except IOError as e: # can not open the file
1667 raise vimconn.VimConnConnectionException(
1668 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1669 http_code=vimconn.HTTP_Bad_Request,
1670 )
1671
1672 def delete_image(self, image_id):
1673 """Deletes a tenant image from openstack VIM. Returns the old id"""
1674 try:
1675 self._reload_connection()
1676 self.glance.images.delete(image_id)
1677
1678 return image_id
1679 except (
1680 nvExceptions.NotFound,
1681 ksExceptions.ClientException,
1682 nvExceptions.ClientException,
1683 gl1Exceptions.CommunicationError,
1684 gl1Exceptions.HTTPNotFound,
1685 ConnectionError,
1686 ) as e: # TODO remove
1687 self._format_exception(e)
1688
1689 def get_image_id_from_path(self, path):
1690 """Get the image id from image path in the VIM database. Returns the image_id"""
1691 try:
1692 self._reload_connection()
1693 images = self.glance.images.list()
1694
1695 for image in images:
1696 if image.metadata.get("location") == path:
1697 return image.id
1698
1699 raise vimconn.VimConnNotFoundException(
1700 "image with location '{}' not found".format(path)
1701 )
1702 except (
1703 ksExceptions.ClientException,
1704 nvExceptions.ClientException,
1705 gl1Exceptions.CommunicationError,
1706 ConnectionError,
1707 ) as e:
1708 self._format_exception(e)
1709
1710 def get_image_list(self, filter_dict={}):
1711 """Obtain tenant images from VIM
1712 Filter_dict can be:
1713 id: image id
1714 name: image name
1715 checksum: image checksum
1716 Returns the image list of dictionaries:
1717 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1718 List can be empty
1719 """
1720 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1721
1722 try:
1723 self._reload_connection()
1724 # filter_dict_os = filter_dict.copy()
1725 # First we filter by the available filter fields: name, id. The others are removed.
1726 image_list = self.glance.images.list()
1727 filtered_list = []
1728
1729 for image in image_list:
1730 try:
1731 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1732 continue
1733
1734 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1735 continue
1736
1737 if (
1738 filter_dict.get("checksum")
1739 and image["checksum"] != filter_dict["checksum"]
1740 ):
1741 continue
1742
1743 filtered_list.append(image.copy())
1744 except gl1Exceptions.HTTPNotFound:
1745 pass
1746
1747 return filtered_list
1748 except (
1749 ksExceptions.ClientException,
1750 nvExceptions.ClientException,
1751 gl1Exceptions.CommunicationError,
1752 ConnectionError,
1753 ) as e:
1754 self._format_exception(e)
1755
1756 def __wait_for_vm(self, vm_id, status):
1757 """wait until vm is in the desired status and return True.
1758 If the VM gets in ERROR status, return false.
1759 If the timeout is reached generate an exception"""
1760 elapsed_time = 0
1761 while elapsed_time < server_timeout:
1762 vm_status = self.nova.servers.get(vm_id).status
1763
1764 if vm_status == status:
1765 return True
1766
1767 if vm_status == "ERROR":
1768 return False
1769
1770 time.sleep(5)
1771 elapsed_time += 5
1772
1773 # if we exceeded the timeout rollback
1774 if elapsed_time >= server_timeout:
1775 raise vimconn.VimConnException(
1776 "Timeout waiting for instance " + vm_id + " to get " + status,
1777 http_code=vimconn.HTTP_Request_Timeout,
1778 )
1779
1780 def _get_openstack_availablity_zones(self):
1781 """
1782 Get from openstack availability zones available
1783 :return:
1784 """
1785 try:
1786 openstack_availability_zone = self.nova.availability_zones.list()
1787 openstack_availability_zone = [
1788 str(zone.zoneName)
1789 for zone in openstack_availability_zone
1790 if zone.zoneName != "internal"
1791 ]
1792
1793 return openstack_availability_zone
1794 except Exception:
1795 return None
1796
1797 def _set_availablity_zones(self):
1798 """
1799 Set vim availablity zone
1800 :return:
1801 """
1802 if "availability_zone" in self.config:
1803 vim_availability_zones = self.config.get("availability_zone")
1804
1805 if isinstance(vim_availability_zones, str):
1806 self.availability_zone = [vim_availability_zones]
1807 elif isinstance(vim_availability_zones, list):
1808 self.availability_zone = vim_availability_zones
1809 else:
1810 self.availability_zone = self._get_openstack_availablity_zones()
1811
1812 def _get_vm_availability_zone(
1813 self, availability_zone_index, availability_zone_list
1814 ):
1815 """
1816 Return thge availability zone to be used by the created VM.
1817 :return: The VIM availability zone to be used or None
1818 """
1819 if availability_zone_index is None:
1820 if not self.config.get("availability_zone"):
1821 return None
1822 elif isinstance(self.config.get("availability_zone"), str):
1823 return self.config["availability_zone"]
1824 else:
1825 # TODO consider using a different parameter at config for default AV and AV list match
1826 return self.config["availability_zone"][0]
1827
1828 vim_availability_zones = self.availability_zone
1829 # check if VIM offer enough availability zones describe in the VNFD
1830 if vim_availability_zones and len(availability_zone_list) <= len(
1831 vim_availability_zones
1832 ):
1833 # check if all the names of NFV AV match VIM AV names
1834 match_by_index = False
1835 for av in availability_zone_list:
1836 if av not in vim_availability_zones:
1837 match_by_index = True
1838 break
1839
1840 if match_by_index:
1841 return vim_availability_zones[availability_zone_index]
1842 else:
1843 return availability_zone_list[availability_zone_index]
1844 else:
1845 raise vimconn.VimConnConflictException(
1846 "No enough availability zones at VIM for this deployment"
1847 )
1848
1849 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1850 """Fill up the security_groups in the port_dict.
1851
1852 Args:
1853 net (dict): Network details
1854 port_dict (dict): Port details
1855
1856 """
1857 if (
1858 self.config.get("security_groups")
1859 and net.get("port_security") is not False
1860 and not self.config.get("no_port_security_extension")
1861 ):
1862 if not self.security_groups_id:
1863 self._get_ids_from_name()
1864
1865 port_dict["security_groups"] = self.security_groups_id
1866
1867 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1868 """Fill up the network binding depending on network type in the port_dict.
1869
1870 Args:
1871 net (dict): Network details
1872 port_dict (dict): Port details
1873
1874 """
1875 if not net.get("type"):
1876 raise vimconn.VimConnException("Type is missing in the network details.")
1877
1878 if net["type"] == "virtual":
1879 pass
1880
1881 # For VF
1882 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1883
1884 port_dict["binding:vnic_type"] = "direct"
1885
1886 # VIO specific Changes
1887 if self.vim_type == "VIO":
1888 # Need to create port with port_security_enabled = False and no-security-groups
1889 port_dict["port_security_enabled"] = False
1890 port_dict["provider_security_groups"] = []
1891 port_dict["security_groups"] = []
1892
1893 else:
1894 # For PT PCI-PASSTHROUGH
1895 port_dict["binding:vnic_type"] = "direct-physical"
1896
1897 @staticmethod
1898 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1899 """Set the "ip" parameter in net dictionary.
1900
1901 Args:
1902 new_port (dict): New created port
1903 net (dict): Network details
1904
1905 """
1906 fixed_ips = new_port["port"].get("fixed_ips")
1907
1908 if fixed_ips:
1909 net["ip"] = fixed_ips[0].get("ip_address")
1910 else:
1911 net["ip"] = None
1912
1913 @staticmethod
1914 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1915 """Fill up the mac_address and fixed_ips in port_dict.
1916
1917 Args:
1918 net (dict): Network details
1919 port_dict (dict): Port details
1920
1921 """
1922 if net.get("mac_address"):
1923 port_dict["mac_address"] = net["mac_address"]
1924
1925 if net.get("ip_address"):
1926 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1927 # TODO add "subnet_id": <subnet_id>
1928
1929 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1930 """Create new port using neutron.
1931
1932 Args:
1933 port_dict (dict): Port details
1934 created_items (dict): All created items
1935 net (dict): Network details
1936
1937 Returns:
1938 new_port (dict): New created port
1939
1940 """
1941 new_port = self.neutron.create_port({"port": port_dict})
1942 created_items["port:" + str(new_port["port"]["id"])] = True
1943 net["mac_adress"] = new_port["port"]["mac_address"]
1944 net["vim_id"] = new_port["port"]["id"]
1945
1946 return new_port
1947
1948 def _create_port(
1949 self, net: dict, name: str, created_items: dict
1950 ) -> Tuple[dict, dict]:
1951 """Create port using net details.
1952
1953 Args:
1954 net (dict): Network details
1955 name (str): Name to be used as network name if net dict does not include name
1956 created_items (dict): All created items
1957
1958 Returns:
1959 new_port, port New created port, port dictionary
1960
1961 """
1962
1963 port_dict = {
1964 "network_id": net["net_id"],
1965 "name": net.get("name"),
1966 "admin_state_up": True,
1967 }
1968
1969 if not port_dict["name"]:
1970 port_dict["name"] = name
1971
1972 self._prepare_port_dict_security_groups(net, port_dict)
1973
1974 self._prepare_port_dict_binding(net, port_dict)
1975
1976 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1977
1978 new_port = self._create_new_port(port_dict, created_items, net)
1979
1980 vimconnector._set_fixed_ip(new_port, net)
1981
1982 port = {"port-id": new_port["port"]["id"]}
1983
1984 if float(self.nova.api_version.get_string()) >= 2.32:
1985 port["tag"] = new_port["port"]["name"]
1986
1987 return new_port, port
1988
1989 def _prepare_network_for_vminstance(
1990 self,
1991 name: str,
1992 net_list: list,
1993 created_items: dict,
1994 net_list_vim: list,
1995 external_network: list,
1996 no_secured_ports: list,
1997 ) -> None:
1998 """Create port and fill up net dictionary for new VM instance creation.
1999
2000 Args:
2001 name (str): Name of network
2002 net_list (list): List of networks
2003 created_items (dict): All created items belongs to a VM
2004 net_list_vim (list): List of ports
2005 external_network (list): List of external-networks
2006 no_secured_ports (list): Port security disabled ports
2007 """
2008
2009 self._reload_connection()
2010
2011 for net in net_list:
2012 # Skip non-connected iface
2013 if not net.get("net_id"):
2014 continue
2015
2016 new_port, port = self._create_port(net, name, created_items)
2017
2018 net_list_vim.append(port)
2019
2020 if net.get("floating_ip", False):
2021 net["exit_on_floating_ip_error"] = True
2022 external_network.append(net)
2023
2024 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2025 net["exit_on_floating_ip_error"] = False
2026 external_network.append(net)
2027 net["floating_ip"] = self.config.get("use_floating_ip")
2028
2029 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2030 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2031 if net.get("port_security") is False and not self.config.get(
2032 "no_port_security_extension"
2033 ):
2034 no_secured_ports.append(
2035 (
2036 new_port["port"]["id"],
2037 net.get("port_security_disable_strategy"),
2038 )
2039 )
2040
2041 def _prepare_persistent_root_volumes(
2042 self,
2043 name: str,
2044 vm_av_zone: list,
2045 disk: dict,
2046 base_disk_index: int,
2047 block_device_mapping: dict,
2048 existing_vim_volumes: list,
2049 created_items: dict,
2050 ) -> Optional[str]:
2051 """Prepare persistent root volumes for new VM instance.
2052
2053 Args:
2054 name (str): Name of VM instance
2055 vm_av_zone (list): List of availability zones
2056 disk (dict): Disk details
2057 base_disk_index (int): Disk index
2058 block_device_mapping (dict): Block device details
2059 existing_vim_volumes (list): Existing disk details
2060 created_items (dict): All created items belongs to VM
2061
2062 Returns:
2063 boot_volume_id (str): ID of boot volume
2064
2065 """
2066 # Disk may include only vim_volume_id or only vim_id."
2067 # Use existing persistent root volume finding with volume_id or vim_id
2068 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2069
2070 if disk.get(key_id):
2071
2072 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2073 existing_vim_volumes.append({"id": disk[key_id]})
2074
2075 else:
2076 # Create persistent root volume
2077 volume = self.cinder.volumes.create(
2078 size=disk["size"],
2079 name=name + "vd" + chr(base_disk_index),
2080 imageRef=disk["image_id"],
2081 # Make sure volume is in the same AZ as the VM to be attached to
2082 availability_zone=vm_av_zone,
2083 )
2084 boot_volume_id = volume.id
2085 self.update_block_device_mapping(
2086 volume=volume,
2087 block_device_mapping=block_device_mapping,
2088 base_disk_index=base_disk_index,
2089 disk=disk,
2090 created_items=created_items,
2091 )
2092
2093 return boot_volume_id
2094
2095 @staticmethod
2096 def update_block_device_mapping(
2097 volume: object,
2098 block_device_mapping: dict,
2099 base_disk_index: int,
2100 disk: dict,
2101 created_items: dict,
2102 ) -> None:
2103 """Add volume information to block device mapping dict.
2104 Args:
2105 volume (object): Created volume object
2106 block_device_mapping (dict): Block device details
2107 base_disk_index (int): Disk index
2108 disk (dict): Disk details
2109 created_items (dict): All created items belongs to VM
2110 """
2111 if not volume:
2112 raise vimconn.VimConnException("Volume is empty.")
2113
2114 if not hasattr(volume, "id"):
2115 raise vimconn.VimConnException(
2116 "Created volume is not valid, does not have id attribute."
2117 )
2118
2119 volume_txt = "volume:" + str(volume.id)
2120 if disk.get("keep"):
2121 volume_txt += ":keep"
2122 created_items[volume_txt] = True
2123 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2124
2125 def _prepare_non_root_persistent_volumes(
2126 self,
2127 name: str,
2128 disk: dict,
2129 vm_av_zone: list,
2130 block_device_mapping: dict,
2131 base_disk_index: int,
2132 existing_vim_volumes: list,
2133 created_items: dict,
2134 ) -> None:
2135 """Prepare persistent volumes for new VM instance.
2136
2137 Args:
2138 name (str): Name of VM instance
2139 disk (dict): Disk details
2140 vm_av_zone (list): List of availability zones
2141 block_device_mapping (dict): Block device details
2142 base_disk_index (int): Disk index
2143 existing_vim_volumes (list): Existing disk details
2144 created_items (dict): All created items belongs to VM
2145 """
2146 # Non-root persistent volumes
2147 # Disk may include only vim_volume_id or only vim_id."
2148 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2149
2150 if disk.get(key_id):
2151
2152 # Use existing persistent volume
2153 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2154 existing_vim_volumes.append({"id": disk[key_id]})
2155
2156 else:
2157 # Create persistent volume
2158 volume = self.cinder.volumes.create(
2159 size=disk["size"],
2160 name=name + "vd" + chr(base_disk_index),
2161 # Make sure volume is in the same AZ as the VM to be attached to
2162 availability_zone=vm_av_zone,
2163 )
2164 self.update_block_device_mapping(
2165 volume=volume,
2166 block_device_mapping=block_device_mapping,
2167 base_disk_index=base_disk_index,
2168 disk=disk,
2169 created_items=created_items,
2170 )
2171
2172 def _wait_for_created_volumes_availability(
2173 self, elapsed_time: int, created_items: dict
2174 ) -> Optional[int]:
2175 """Wait till created volumes become available.
2176
2177 Args:
2178 elapsed_time (int): Passed time while waiting
2179 created_items (dict): All created items belongs to VM
2180
2181 Returns:
2182 elapsed_time (int): Time spent while waiting
2183
2184 """
2185
2186 while elapsed_time < volume_timeout:
2187 for created_item in created_items:
2188 v, volume_id = (
2189 created_item.split(":")[0],
2190 created_item.split(":")[1],
2191 )
2192 if v == "volume":
2193 if self.cinder.volumes.get(volume_id).status != "available":
2194 break
2195 else:
2196 # All ready: break from while
2197 break
2198
2199 time.sleep(5)
2200 elapsed_time += 5
2201
2202 return elapsed_time
2203
2204 def _wait_for_existing_volumes_availability(
2205 self, elapsed_time: int, existing_vim_volumes: list
2206 ) -> Optional[int]:
2207 """Wait till existing volumes become available.
2208
2209 Args:
2210 elapsed_time (int): Passed time while waiting
2211 existing_vim_volumes (list): Existing volume details
2212
2213 Returns:
2214 elapsed_time (int): Time spent while waiting
2215
2216 """
2217
2218 while elapsed_time < volume_timeout:
2219 for volume in existing_vim_volumes:
2220 if self.cinder.volumes.get(volume["id"]).status != "available":
2221 break
2222 else: # all ready: break from while
2223 break
2224
2225 time.sleep(5)
2226 elapsed_time += 5
2227
2228 return elapsed_time
2229
2230 def _prepare_disk_for_vminstance(
2231 self,
2232 name: str,
2233 existing_vim_volumes: list,
2234 created_items: dict,
2235 vm_av_zone: list,
2236 block_device_mapping: dict,
2237 disk_list: list = None,
2238 ) -> None:
2239 """Prepare all volumes for new VM instance.
2240
2241 Args:
2242 name (str): Name of Instance
2243 existing_vim_volumes (list): List of existing volumes
2244 created_items (dict): All created items belongs to VM
2245 vm_av_zone (list): VM availability zone
2246 block_device_mapping (dict): Block devices to be attached to VM
2247 disk_list (list): List of disks
2248
2249 """
2250 # Create additional volumes in case these are present in disk_list
2251 base_disk_index = ord("b")
2252 boot_volume_id = None
2253 elapsed_time = 0
2254
2255 for disk in disk_list:
2256 if "image_id" in disk:
2257 # Root persistent volume
2258 base_disk_index = ord("a")
2259 boot_volume_id = self._prepare_persistent_root_volumes(
2260 name=name,
2261 vm_av_zone=vm_av_zone,
2262 disk=disk,
2263 base_disk_index=base_disk_index,
2264 block_device_mapping=block_device_mapping,
2265 existing_vim_volumes=existing_vim_volumes,
2266 created_items=created_items,
2267 )
2268 else:
2269 # Non-root persistent volume
2270 self._prepare_non_root_persistent_volumes(
2271 name=name,
2272 disk=disk,
2273 vm_av_zone=vm_av_zone,
2274 block_device_mapping=block_device_mapping,
2275 base_disk_index=base_disk_index,
2276 existing_vim_volumes=existing_vim_volumes,
2277 created_items=created_items,
2278 )
2279 base_disk_index += 1
2280
2281 # Wait until created volumes are with status available
2282 elapsed_time = self._wait_for_created_volumes_availability(
2283 elapsed_time, created_items
2284 )
2285 # Wait until existing volumes in vim are with status available
2286 elapsed_time = self._wait_for_existing_volumes_availability(
2287 elapsed_time, existing_vim_volumes
2288 )
2289 # If we exceeded the timeout rollback
2290 if elapsed_time >= volume_timeout:
2291 raise vimconn.VimConnException(
2292 "Timeout creating volumes for instance " + name,
2293 http_code=vimconn.HTTP_Request_Timeout,
2294 )
2295 if boot_volume_id:
2296 self.cinder.volumes.set_bootable(boot_volume_id, True)
2297
2298 def _find_the_external_network_for_floating_ip(self):
2299 """Get the external network ip in order to create floating IP.
2300
2301 Returns:
2302 pool_id (str): External network pool ID
2303
2304 """
2305
2306 # Find the external network
2307 external_nets = list()
2308
2309 for net in self.neutron.list_networks()["networks"]:
2310 if net["router:external"]:
2311 external_nets.append(net)
2312
2313 if len(external_nets) == 0:
2314 raise vimconn.VimConnException(
2315 "Cannot create floating_ip automatically since "
2316 "no external network is present",
2317 http_code=vimconn.HTTP_Conflict,
2318 )
2319
2320 if len(external_nets) > 1:
2321 raise vimconn.VimConnException(
2322 "Cannot create floating_ip automatically since "
2323 "multiple external networks are present",
2324 http_code=vimconn.HTTP_Conflict,
2325 )
2326
2327 # Pool ID
2328 return external_nets[0].get("id")
2329
2330 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2331 """Trigger neutron to create a new floating IP using external network ID.
2332
2333 Args:
2334 param (dict): Input parameters to create a floating IP
2335 created_items (dict): All created items belongs to new VM instance
2336
2337 Raises:
2338
2339 VimConnException
2340 """
2341 try:
2342 self.logger.debug("Creating floating IP")
2343 new_floating_ip = self.neutron.create_floatingip(param)
2344 free_floating_ip = new_floating_ip["floatingip"]["id"]
2345 created_items["floating_ip:" + str(free_floating_ip)] = True
2346
2347 except Exception as e:
2348 raise vimconn.VimConnException(
2349 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2350 http_code=vimconn.HTTP_Conflict,
2351 )
2352
2353 def _create_floating_ip(
2354 self, floating_network: dict, server: object, created_items: dict
2355 ) -> None:
2356 """Get the available Pool ID and create a new floating IP.
2357
2358 Args:
2359 floating_network (dict): Dict including external network ID
2360 server (object): Server object
2361 created_items (dict): All created items belongs to new VM instance
2362
2363 """
2364
2365 # Pool_id is available
2366 if (
2367 isinstance(floating_network["floating_ip"], str)
2368 and floating_network["floating_ip"].lower() != "true"
2369 ):
2370 pool_id = floating_network["floating_ip"]
2371
2372 # Find the Pool_id
2373 else:
2374 pool_id = self._find_the_external_network_for_floating_ip()
2375
2376 param = {
2377 "floatingip": {
2378 "floating_network_id": pool_id,
2379 "tenant_id": server.tenant_id,
2380 }
2381 }
2382
2383 self._neutron_create_float_ip(param, created_items)
2384
2385 def _find_floating_ip(
2386 self,
2387 server: object,
2388 floating_ips: list,
2389 floating_network: dict,
2390 ) -> Optional[str]:
2391 """Find the available free floating IPs if there are.
2392
2393 Args:
2394 server (object): Server object
2395 floating_ips (list): List of floating IPs
2396 floating_network (dict): Details of floating network such as ID
2397
2398 Returns:
2399 free_floating_ip (str): Free floating ip address
2400
2401 """
2402 for fip in floating_ips:
2403 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2404 continue
2405
2406 if isinstance(floating_network["floating_ip"], str):
2407 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2408 continue
2409
2410 return fip["id"]
2411
2412 def _assign_floating_ip(
2413 self, free_floating_ip: str, floating_network: dict
2414 ) -> Dict:
2415 """Assign the free floating ip address to port.
2416
2417 Args:
2418 free_floating_ip (str): Floating IP to be assigned
2419 floating_network (dict): ID of floating network
2420
2421 Returns:
2422 fip (dict) (dict): Floating ip details
2423
2424 """
2425 # The vim_id key contains the neutron.port_id
2426 self.neutron.update_floatingip(
2427 free_floating_ip,
2428 {"floatingip": {"port_id": floating_network["vim_id"]}},
2429 )
2430 # For race condition ensure not re-assigned to other VM after 5 seconds
2431 time.sleep(5)
2432
2433 return self.neutron.show_floatingip(free_floating_ip)
2434
2435 def _get_free_floating_ip(
2436 self, server: object, floating_network: dict
2437 ) -> Optional[str]:
2438 """Get the free floating IP address.
2439
2440 Args:
2441 server (object): Server Object
2442 floating_network (dict): Floating network details
2443
2444 Returns:
2445 free_floating_ip (str): Free floating ip addr
2446
2447 """
2448
2449 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2450
2451 # Randomize
2452 random.shuffle(floating_ips)
2453
2454 return self._find_floating_ip(server, floating_ips, floating_network)
2455
2456 def _prepare_external_network_for_vminstance(
2457 self,
2458 external_network: list,
2459 server: object,
2460 created_items: dict,
2461 vm_start_time: float,
2462 ) -> None:
2463 """Assign floating IP address for VM instance.
2464
2465 Args:
2466 external_network (list): ID of External network
2467 server (object): Server Object
2468 created_items (dict): All created items belongs to new VM instance
2469 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2470
2471 Raises:
2472 VimConnException
2473
2474 """
2475 for floating_network in external_network:
2476 try:
2477 assigned = False
2478 floating_ip_retries = 3
2479 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2480 # several times
2481 while not assigned:
2482
2483 free_floating_ip = self._get_free_floating_ip(
2484 server, floating_network
2485 )
2486
2487 if not free_floating_ip:
2488 self._create_floating_ip(
2489 floating_network, server, created_items
2490 )
2491
2492 try:
2493 # For race condition ensure not already assigned
2494 fip = self.neutron.show_floatingip(free_floating_ip)
2495
2496 if fip["floatingip"].get("port_id"):
2497 continue
2498
2499 # Assign floating ip
2500 fip = self._assign_floating_ip(
2501 free_floating_ip, floating_network
2502 )
2503
2504 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2505 self.logger.warning(
2506 "floating_ip {} re-assigned to other port".format(
2507 free_floating_ip
2508 )
2509 )
2510 continue
2511
2512 self.logger.debug(
2513 "Assigned floating_ip {} to VM {}".format(
2514 free_floating_ip, server.id
2515 )
2516 )
2517
2518 assigned = True
2519
2520 except Exception as e:
2521 # Openstack need some time after VM creation to assign an IP. So retry if fails
2522 vm_status = self.nova.servers.get(server.id).status
2523
2524 if vm_status not in ("ACTIVE", "ERROR"):
2525 if time.time() - vm_start_time < server_timeout:
2526 time.sleep(5)
2527 continue
2528 elif floating_ip_retries > 0:
2529 floating_ip_retries -= 1
2530 continue
2531
2532 raise vimconn.VimConnException(
2533 "Cannot create floating_ip: {} {}".format(
2534 type(e).__name__, e
2535 ),
2536 http_code=vimconn.HTTP_Conflict,
2537 )
2538
2539 except Exception as e:
2540 if not floating_network["exit_on_floating_ip_error"]:
2541 self.logger.error("Cannot create floating_ip. %s", str(e))
2542 continue
2543
2544 raise
2545
2546 def _update_port_security_for_vminstance(
2547 self,
2548 no_secured_ports: list,
2549 server: object,
2550 ) -> None:
2551 """Updates the port security according to no_secured_ports list.
2552
2553 Args:
2554 no_secured_ports (list): List of ports that security will be disabled
2555 server (object): Server Object
2556
2557 Raises:
2558 VimConnException
2559
2560 """
2561 # Wait until the VM is active and then disable the port-security
2562 if no_secured_ports:
2563 self.__wait_for_vm(server.id, "ACTIVE")
2564
2565 for port in no_secured_ports:
2566 port_update = {
2567 "port": {"port_security_enabled": False, "security_groups": None}
2568 }
2569
2570 if port[1] == "allow-address-pairs":
2571 port_update = {
2572 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2573 }
2574
2575 try:
2576 self.neutron.update_port(port[0], port_update)
2577
2578 except Exception:
2579
2580 raise vimconn.VimConnException(
2581 "It was not possible to disable port security for port {}".format(
2582 port[0]
2583 )
2584 )
2585
2586 def new_vminstance(
2587 self,
2588 name: str,
2589 description: str,
2590 start: bool,
2591 image_id: str,
2592 flavor_id: str,
2593 affinity_group_list: list,
2594 net_list: list,
2595 cloud_config=None,
2596 disk_list=None,
2597 availability_zone_index=None,
2598 availability_zone_list=None,
2599 ) -> tuple:
2600 """Adds a VM instance to VIM.
2601
2602 Args:
2603 name (str): name of VM
2604 description (str): description
2605 start (bool): indicates if VM must start or boot in pause mode. Ignored
2606 image_id (str) image uuid
2607 flavor_id (str) flavor uuid
2608 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2609 net_list (list): list of interfaces, each one is a dictionary with:
2610 name: name of network
2611 net_id: network uuid to connect
2612 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2613 model: interface model, ignored #TODO
2614 mac_address: used for SR-IOV ifaces #TODO for other types
2615 use: 'data', 'bridge', 'mgmt'
2616 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2617 vim_id: filled/added by this function
2618 floating_ip: True/False (or it can be None)
2619 port_security: True/False
2620 cloud_config (dict): (optional) dictionary with:
2621 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2622 users: (optional) list of users to be inserted, each item is a dict with:
2623 name: (mandatory) user name,
2624 key-pairs: (optional) list of strings with the public key to be inserted to the user
2625 user-data: (optional) string is a text script to be passed directly to cloud-init
2626 config-files: (optional). List of files to be transferred. Each item is a dict with:
2627 dest: (mandatory) string with the destination absolute path
2628 encoding: (optional, by default text). Can be one of:
2629 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2630 content : (mandatory) string with the content of the file
2631 permissions: (optional) string with file permissions, typically octal notation '0644'
2632 owner: (optional) file owner, string with the format 'owner:group'
2633 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2634 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2635 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2636 size: (mandatory) string with the size of the disk in GB
2637 vim_id: (optional) should use this existing volume id
2638 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2639 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2640 availability_zone_index is None
2641 #TODO ip, security groups
2642
2643 Returns:
2644 A tuple with the instance identifier and created_items or raises an exception on error
2645 created_items can be None or a dictionary where this method can include key-values that will be passed to
2646 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2647 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2648 as not present.
2649
2650 """
2651 self.logger.debug(
2652 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2653 image_id,
2654 flavor_id,
2655 str(net_list),
2656 )
2657
2658 try:
2659 server = None
2660 created_items = {}
2661 net_list_vim = []
2662 # list of external networks to be connected to instance, later on used to create floating_ip
2663 external_network = []
2664 # List of ports with port-security disabled
2665 no_secured_ports = []
2666 block_device_mapping = {}
2667 existing_vim_volumes = []
2668 server_group_id = None
2669 scheduller_hints = {}
2670
2671 # Check the Openstack Connection
2672 self._reload_connection()
2673
2674 # Prepare network list
2675 self._prepare_network_for_vminstance(
2676 name=name,
2677 net_list=net_list,
2678 created_items=created_items,
2679 net_list_vim=net_list_vim,
2680 external_network=external_network,
2681 no_secured_ports=no_secured_ports,
2682 )
2683
2684 # Cloud config
2685 config_drive, userdata = self._create_user_data(cloud_config)
2686
2687 # Get availability Zone
2688 vm_av_zone = self._get_vm_availability_zone(
2689 availability_zone_index, availability_zone_list
2690 )
2691
2692 if disk_list:
2693 # Prepare disks
2694 self._prepare_disk_for_vminstance(
2695 name=name,
2696 existing_vim_volumes=existing_vim_volumes,
2697 created_items=created_items,
2698 vm_av_zone=vm_av_zone,
2699 block_device_mapping=block_device_mapping,
2700 disk_list=disk_list,
2701 )
2702
2703 if affinity_group_list:
2704 # Only first id on the list will be used. Openstack restriction
2705 server_group_id = affinity_group_list[0]["affinity_group_id"]
2706 scheduller_hints["group"] = server_group_id
2707
2708 self.logger.debug(
2709 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2710 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2711 "block_device_mapping={}, server_group={})".format(
2712 name,
2713 image_id,
2714 flavor_id,
2715 net_list_vim,
2716 self.config.get("security_groups"),
2717 vm_av_zone,
2718 self.config.get("keypair"),
2719 userdata,
2720 config_drive,
2721 block_device_mapping,
2722 server_group_id,
2723 )
2724 )
2725
2726 # Create VM
2727 server = self.nova.servers.create(
2728 name=name,
2729 image=image_id,
2730 flavor=flavor_id,
2731 nics=net_list_vim,
2732 security_groups=self.config.get("security_groups"),
2733 # TODO remove security_groups in future versions. Already at neutron port
2734 availability_zone=vm_av_zone,
2735 key_name=self.config.get("keypair"),
2736 userdata=userdata,
2737 config_drive=config_drive,
2738 block_device_mapping=block_device_mapping,
2739 scheduler_hints=scheduller_hints,
2740 )
2741
2742 vm_start_time = time.time()
2743
2744 self._update_port_security_for_vminstance(no_secured_ports, server)
2745
2746 self._prepare_external_network_for_vminstance(
2747 external_network=external_network,
2748 server=server,
2749 created_items=created_items,
2750 vm_start_time=vm_start_time,
2751 )
2752
2753 return server.id, created_items
2754
2755 except Exception as e:
2756 server_id = None
2757 if server:
2758 server_id = server.id
2759
2760 try:
2761 created_items = self.remove_keep_tag_from_persistent_volumes(
2762 created_items
2763 )
2764
2765 self.delete_vminstance(server_id, created_items)
2766
2767 except Exception as e2:
2768 self.logger.error("new_vminstance rollback fail {}".format(e2))
2769
2770 self._format_exception(e)
2771
2772 @staticmethod
2773 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2774 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2775
2776 Args:
2777 created_items (dict): All created items belongs to VM
2778
2779 Returns:
2780 updated_created_items (dict): Dict which does not include keep flag for volumes.
2781
2782 """
2783 return {
2784 key.replace(":keep", ""): value for (key, value) in created_items.items()
2785 }
2786
2787 def get_vminstance(self, vm_id):
2788 """Returns the VM instance information from VIM"""
2789 # self.logger.debug("Getting VM from VIM")
2790 try:
2791 self._reload_connection()
2792 server = self.nova.servers.find(id=vm_id)
2793 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2794
2795 return server.to_dict()
2796 except (
2797 ksExceptions.ClientException,
2798 nvExceptions.ClientException,
2799 nvExceptions.NotFound,
2800 ConnectionError,
2801 ) as e:
2802 self._format_exception(e)
2803
2804 def get_vminstance_console(self, vm_id, console_type="vnc"):
2805 """
2806 Get a console for the virtual machine
2807 Params:
2808 vm_id: uuid of the VM
2809 console_type, can be:
2810 "novnc" (by default), "xvpvnc" for VNC types,
2811 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2812 Returns dict with the console parameters:
2813 protocol: ssh, ftp, http, https, ...
2814 server: usually ip address
2815 port: the http, ssh, ... port
2816 suffix: extra text, e.g. the http path and query string
2817 """
2818 self.logger.debug("Getting VM CONSOLE from VIM")
2819
2820 try:
2821 self._reload_connection()
2822 server = self.nova.servers.find(id=vm_id)
2823
2824 if console_type is None or console_type == "novnc":
2825 console_dict = server.get_vnc_console("novnc")
2826 elif console_type == "xvpvnc":
2827 console_dict = server.get_vnc_console(console_type)
2828 elif console_type == "rdp-html5":
2829 console_dict = server.get_rdp_console(console_type)
2830 elif console_type == "spice-html5":
2831 console_dict = server.get_spice_console(console_type)
2832 else:
2833 raise vimconn.VimConnException(
2834 "console type '{}' not allowed".format(console_type),
2835 http_code=vimconn.HTTP_Bad_Request,
2836 )
2837
2838 console_dict1 = console_dict.get("console")
2839
2840 if console_dict1:
2841 console_url = console_dict1.get("url")
2842
2843 if console_url:
2844 # parse console_url
2845 protocol_index = console_url.find("//")
2846 suffix_index = (
2847 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2848 )
2849 port_index = (
2850 console_url[protocol_index + 2 : suffix_index].find(":")
2851 + protocol_index
2852 + 2
2853 )
2854
2855 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2856 return (
2857 -vimconn.HTTP_Internal_Server_Error,
2858 "Unexpected response from VIM",
2859 )
2860
2861 console_dict = {
2862 "protocol": console_url[0:protocol_index],
2863 "server": console_url[protocol_index + 2 : port_index],
2864 "port": console_url[port_index:suffix_index],
2865 "suffix": console_url[suffix_index + 1 :],
2866 }
2867 protocol_index += 2
2868
2869 return console_dict
2870 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2871 except (
2872 nvExceptions.NotFound,
2873 ksExceptions.ClientException,
2874 nvExceptions.ClientException,
2875 nvExceptions.BadRequest,
2876 ConnectionError,
2877 ) as e:
2878 self._format_exception(e)
2879
2880 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2881 """Neutron delete ports by id.
2882 Args:
2883 k_id (str): Port id in the VIM
2884 """
2885 try:
2886
2887 port_dict = self.neutron.list_ports()
2888 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2889
2890 if k_id in existing_ports:
2891 self.neutron.delete_port(k_id)
2892
2893 except Exception as e:
2894
2895 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2896
2897 def _delete_volumes_by_id_wth_cinder(
2898 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2899 ) -> bool:
2900 """Cinder delete volume by id.
2901 Args:
2902 k (str): Full item name in created_items
2903 k_id (str): ID of floating ip in VIM
2904 volumes_to_hold (list): Volumes not to delete
2905 created_items (dict): All created items belongs to VM
2906 """
2907 try:
2908 if k_id in volumes_to_hold:
2909 return
2910
2911 if self.cinder.volumes.get(k_id).status != "available":
2912 return True
2913
2914 else:
2915 self.cinder.volumes.delete(k_id)
2916 created_items[k] = None
2917
2918 except Exception as e:
2919 self.logger.error(
2920 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2921 )
2922
2923 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2924 """Neutron delete floating ip by id.
2925 Args:
2926 k (str): Full item name in created_items
2927 k_id (str): ID of floating ip in VIM
2928 created_items (dict): All created items belongs to VM
2929 """
2930 try:
2931 self.neutron.delete_floatingip(k_id)
2932 created_items[k] = None
2933
2934 except Exception as e:
2935 self.logger.error(
2936 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2937 )
2938
2939 @staticmethod
2940 def _get_item_name_id(k: str) -> Tuple[str, str]:
2941 k_item, _, k_id = k.partition(":")
2942 return k_item, k_id
2943
2944 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2945 """Delete VM ports attached to the networks before deleting virtual machine.
2946 Args:
2947 created_items (dict): All created items belongs to VM
2948 """
2949
2950 for k, v in created_items.items():
2951 if not v: # skip already deleted
2952 continue
2953
2954 try:
2955 k_item, k_id = self._get_item_name_id(k)
2956 if k_item == "port":
2957 self._delete_ports_by_id_wth_neutron(k_id)
2958
2959 except Exception as e:
2960 self.logger.error(
2961 "Error deleting port: {}: {}".format(type(e).__name__, e)
2962 )
2963
2964 def _delete_created_items(
2965 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2966 ) -> bool:
2967 """Delete Volumes and floating ip if they exist in created_items."""
2968 for k, v in created_items.items():
2969 if not v: # skip already deleted
2970 continue
2971
2972 try:
2973 k_item, k_id = self._get_item_name_id(k)
2974
2975 if k_item == "volume":
2976
2977 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2978 k, k_id, volumes_to_hold, created_items
2979 )
2980
2981 if unavailable_vol:
2982 keep_waiting = True
2983
2984 elif k_item == "floating_ip":
2985
2986 self._delete_floating_ip_by_id(k, k_id, created_items)
2987
2988 except Exception as e:
2989 self.logger.error("Error deleting {}: {}".format(k, e))
2990
2991 return keep_waiting
2992
2993 @staticmethod
2994 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
2995 """Remove the volumes which has key flag from created_items
2996
2997 Args:
2998 created_items (dict): All created items belongs to VM
2999
3000 Returns:
3001 created_items (dict): Persistent volumes eliminated created_items
3002 """
3003 return {
3004 key: value
3005 for (key, value) in created_items.items()
3006 if len(key.split(":")) == 2
3007 }
3008
3009 def delete_vminstance(
3010 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3011 ) -> None:
3012 """Removes a VM instance from VIM. Returns the old identifier.
3013 Args:
3014 vm_id (str): Identifier of VM instance
3015 created_items (dict): All created items belongs to VM
3016 volumes_to_hold (list): Volumes_to_hold
3017 """
3018 if created_items is None:
3019 created_items = {}
3020 if volumes_to_hold is None:
3021 volumes_to_hold = []
3022
3023 try:
3024 created_items = self._extract_items_wth_keep_flag_from_created_items(
3025 created_items
3026 )
3027
3028 self._reload_connection()
3029
3030 # Delete VM ports attached to the networks before the virtual machine
3031 if created_items:
3032 self._delete_vm_ports_attached_to_network(created_items)
3033
3034 if vm_id:
3035 self.nova.servers.delete(vm_id)
3036
3037 # Although having detached, volumes should have in active status before deleting.
3038 # We ensure in this loop
3039 keep_waiting = True
3040 elapsed_time = 0
3041
3042 while keep_waiting and elapsed_time < volume_timeout:
3043 keep_waiting = False
3044
3045 # Delete volumes and floating IP.
3046 keep_waiting = self._delete_created_items(
3047 created_items, volumes_to_hold, keep_waiting
3048 )
3049
3050 if keep_waiting:
3051 time.sleep(1)
3052 elapsed_time += 1
3053
3054 except (
3055 nvExceptions.NotFound,
3056 ksExceptions.ClientException,
3057 nvExceptions.ClientException,
3058 ConnectionError,
3059 ) as e:
3060 self._format_exception(e)
3061
3062 def refresh_vms_status(self, vm_list):
3063 """Get the status of the virtual machines and their interfaces/ports
3064 Params: the list of VM identifiers
3065 Returns a dictionary with:
3066 vm_id: #VIM id of this Virtual Machine
3067 status: #Mandatory. Text with one of:
3068 # DELETED (not found at vim)
3069 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3070 # OTHER (Vim reported other status not understood)
3071 # ERROR (VIM indicates an ERROR status)
3072 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3073 # CREATING (on building process), ERROR
3074 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3075 #
3076 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3077 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3078 interfaces:
3079 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3080 mac_address: #Text format XX:XX:XX:XX:XX:XX
3081 vim_net_id: #network id where this interface is connected
3082 vim_interface_id: #interface/port VIM id
3083 ip_address: #null, or text with IPv4, IPv6 address
3084 compute_node: #identification of compute node where PF,VF interface is allocated
3085 pci: #PCI address of the NIC that hosts the PF,VF
3086 vlan: #physical VLAN used for VF
3087 """
3088 vm_dict = {}
3089 self.logger.debug(
3090 "refresh_vms status: Getting tenant VM instance information from VIM"
3091 )
3092
3093 for vm_id in vm_list:
3094 vm = {}
3095
3096 try:
3097 vm_vim = self.get_vminstance(vm_id)
3098
3099 if vm_vim["status"] in vmStatus2manoFormat:
3100 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3101 else:
3102 vm["status"] = "OTHER"
3103 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3104
3105 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3106 vm_vim.pop("user_data", None)
3107 vm["vim_info"] = self.serialize(vm_vim)
3108
3109 vm["interfaces"] = []
3110 if vm_vim.get("fault"):
3111 vm["error_msg"] = str(vm_vim["fault"])
3112
3113 # get interfaces
3114 try:
3115 self._reload_connection()
3116 port_dict = self.neutron.list_ports(device_id=vm_id)
3117
3118 for port in port_dict["ports"]:
3119 interface = {}
3120 interface["vim_info"] = self.serialize(port)
3121 interface["mac_address"] = port.get("mac_address")
3122 interface["vim_net_id"] = port["network_id"]
3123 interface["vim_interface_id"] = port["id"]
3124 # check if OS-EXT-SRV-ATTR:host is there,
3125 # in case of non-admin credentials, it will be missing
3126
3127 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3128 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3129
3130 interface["pci"] = None
3131
3132 # check if binding:profile is there,
3133 # in case of non-admin credentials, it will be missing
3134 if port.get("binding:profile"):
3135 if port["binding:profile"].get("pci_slot"):
3136 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3137 # the slot to 0x00
3138 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3139 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3140 pci = port["binding:profile"]["pci_slot"]
3141 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3142 interface["pci"] = pci
3143
3144 interface["vlan"] = None
3145
3146 if port.get("binding:vif_details"):
3147 interface["vlan"] = port["binding:vif_details"].get("vlan")
3148
3149 # Get vlan from network in case not present in port for those old openstacks and cases where
3150 # it is needed vlan at PT
3151 if not interface["vlan"]:
3152 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3153 network = self.neutron.show_network(port["network_id"])
3154
3155 if (
3156 network["network"].get("provider:network_type")
3157 == "vlan"
3158 ):
3159 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3160 interface["vlan"] = network["network"].get(
3161 "provider:segmentation_id"
3162 )
3163
3164 ips = []
3165 # look for floating ip address
3166 try:
3167 floating_ip_dict = self.neutron.list_floatingips(
3168 port_id=port["id"]
3169 )
3170
3171 if floating_ip_dict.get("floatingips"):
3172 ips.append(
3173 floating_ip_dict["floatingips"][0].get(
3174 "floating_ip_address"
3175 )
3176 )
3177 except Exception:
3178 pass
3179
3180 for subnet in port["fixed_ips"]:
3181 ips.append(subnet["ip_address"])
3182
3183 interface["ip_address"] = ";".join(ips)
3184 vm["interfaces"].append(interface)
3185 except Exception as e:
3186 self.logger.error(
3187 "Error getting vm interface information {}: {}".format(
3188 type(e).__name__, e
3189 ),
3190 exc_info=True,
3191 )
3192 except vimconn.VimConnNotFoundException as e:
3193 self.logger.error("Exception getting vm status: %s", str(e))
3194 vm["status"] = "DELETED"
3195 vm["error_msg"] = str(e)
3196 except vimconn.VimConnException as e:
3197 self.logger.error("Exception getting vm status: %s", str(e))
3198 vm["status"] = "VIM_ERROR"
3199 vm["error_msg"] = str(e)
3200
3201 vm_dict[vm_id] = vm
3202
3203 return vm_dict
3204
3205 def action_vminstance(self, vm_id, action_dict, created_items={}):
3206 """Send and action over a VM instance from VIM
3207 Returns None or the console dict if the action was successfully sent to the VIM"""
3208 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3209
3210 try:
3211 self._reload_connection()
3212 server = self.nova.servers.find(id=vm_id)
3213
3214 if "start" in action_dict:
3215 if action_dict["start"] == "rebuild":
3216 server.rebuild()
3217 else:
3218 if server.status == "PAUSED":
3219 server.unpause()
3220 elif server.status == "SUSPENDED":
3221 server.resume()
3222 elif server.status == "SHUTOFF":
3223 server.start()
3224 else:
3225 self.logger.debug(
3226 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3227 )
3228 raise vimconn.VimConnException(
3229 "Cannot 'start' instance while it is in active state",
3230 http_code=vimconn.HTTP_Bad_Request,
3231 )
3232
3233 elif "pause" in action_dict:
3234 server.pause()
3235 elif "resume" in action_dict:
3236 server.resume()
3237 elif "shutoff" in action_dict or "shutdown" in action_dict:
3238 self.logger.debug("server status %s", server.status)
3239 if server.status == "ACTIVE":
3240 server.stop()
3241 else:
3242 self.logger.debug("ERROR: VM is not in Active state")
3243 raise vimconn.VimConnException(
3244 "VM is not in active state, stop operation is not allowed",
3245 http_code=vimconn.HTTP_Bad_Request,
3246 )
3247 elif "forceOff" in action_dict:
3248 server.stop() # TODO
3249 elif "terminate" in action_dict:
3250 server.delete()
3251 elif "createImage" in action_dict:
3252 server.create_image()
3253 # "path":path_schema,
3254 # "description":description_schema,
3255 # "name":name_schema,
3256 # "metadata":metadata_schema,
3257 # "imageRef": id_schema,
3258 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3259 elif "rebuild" in action_dict:
3260 server.rebuild(server.image["id"])
3261 elif "reboot" in action_dict:
3262 server.reboot() # reboot_type="SOFT"
3263 elif "console" in action_dict:
3264 console_type = action_dict["console"]
3265
3266 if console_type is None or console_type == "novnc":
3267 console_dict = server.get_vnc_console("novnc")
3268 elif console_type == "xvpvnc":
3269 console_dict = server.get_vnc_console(console_type)
3270 elif console_type == "rdp-html5":
3271 console_dict = server.get_rdp_console(console_type)
3272 elif console_type == "spice-html5":
3273 console_dict = server.get_spice_console(console_type)
3274 else:
3275 raise vimconn.VimConnException(
3276 "console type '{}' not allowed".format(console_type),
3277 http_code=vimconn.HTTP_Bad_Request,
3278 )
3279
3280 try:
3281 console_url = console_dict["console"]["url"]
3282 # parse console_url
3283 protocol_index = console_url.find("//")
3284 suffix_index = (
3285 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3286 )
3287 port_index = (
3288 console_url[protocol_index + 2 : suffix_index].find(":")
3289 + protocol_index
3290 + 2
3291 )
3292
3293 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3294 raise vimconn.VimConnException(
3295 "Unexpected response from VIM " + str(console_dict)
3296 )
3297
3298 console_dict2 = {
3299 "protocol": console_url[0:protocol_index],
3300 "server": console_url[protocol_index + 2 : port_index],
3301 "port": int(console_url[port_index + 1 : suffix_index]),
3302 "suffix": console_url[suffix_index + 1 :],
3303 }
3304
3305 return console_dict2
3306 except Exception:
3307 raise vimconn.VimConnException(
3308 "Unexpected response from VIM " + str(console_dict)
3309 )
3310
3311 return None
3312 except (
3313 ksExceptions.ClientException,
3314 nvExceptions.ClientException,
3315 nvExceptions.NotFound,
3316 ConnectionError,
3317 ) as e:
3318 self._format_exception(e)
3319 # TODO insert exception vimconn.HTTP_Unauthorized
3320
3321 # ###### VIO Specific Changes #########
3322 def _generate_vlanID(self):
3323 """
3324 Method to get unused vlanID
3325 Args:
3326 None
3327 Returns:
3328 vlanID
3329 """
3330 # Get used VLAN IDs
3331 usedVlanIDs = []
3332 networks = self.get_network_list()
3333
3334 for net in networks:
3335 if net.get("provider:segmentation_id"):
3336 usedVlanIDs.append(net.get("provider:segmentation_id"))
3337
3338 used_vlanIDs = set(usedVlanIDs)
3339
3340 # find unused VLAN ID
3341 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3342 try:
3343 start_vlanid, end_vlanid = map(
3344 int, vlanID_range.replace(" ", "").split("-")
3345 )
3346
3347 for vlanID in range(start_vlanid, end_vlanid + 1):
3348 if vlanID not in used_vlanIDs:
3349 return vlanID
3350 except Exception as exp:
3351 raise vimconn.VimConnException(
3352 "Exception {} occurred while generating VLAN ID.".format(exp)
3353 )
3354 else:
3355 raise vimconn.VimConnConflictException(
3356 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3357 self.config.get("dataplane_net_vlan_range")
3358 )
3359 )
3360
3361 def _generate_multisegment_vlanID(self):
3362 """
3363 Method to get unused vlanID
3364 Args:
3365 None
3366 Returns:
3367 vlanID
3368 """
3369 # Get used VLAN IDs
3370 usedVlanIDs = []
3371 networks = self.get_network_list()
3372 for net in networks:
3373 if net.get("provider:network_type") == "vlan" and net.get(
3374 "provider:segmentation_id"
3375 ):
3376 usedVlanIDs.append(net.get("provider:segmentation_id"))
3377 elif net.get("segments"):
3378 for segment in net.get("segments"):
3379 if segment.get("provider:network_type") == "vlan" and segment.get(
3380 "provider:segmentation_id"
3381 ):
3382 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3383
3384 used_vlanIDs = set(usedVlanIDs)
3385
3386 # find unused VLAN ID
3387 for vlanID_range in self.config.get("multisegment_vlan_range"):
3388 try:
3389 start_vlanid, end_vlanid = map(
3390 int, vlanID_range.replace(" ", "").split("-")
3391 )
3392
3393 for vlanID in range(start_vlanid, end_vlanid + 1):
3394 if vlanID not in used_vlanIDs:
3395 return vlanID
3396 except Exception as exp:
3397 raise vimconn.VimConnException(
3398 "Exception {} occurred while generating VLAN ID.".format(exp)
3399 )
3400 else:
3401 raise vimconn.VimConnConflictException(
3402 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3403 self.config.get("multisegment_vlan_range")
3404 )
3405 )
3406
3407 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3408 """
3409 Method to validate user given vlanID ranges
3410 Args: None
3411 Returns: None
3412 """
3413 for vlanID_range in input_vlan_range:
3414 vlan_range = vlanID_range.replace(" ", "")
3415 # validate format
3416 vlanID_pattern = r"(\d)*-(\d)*$"
3417 match_obj = re.match(vlanID_pattern, vlan_range)
3418 if not match_obj:
3419 raise vimconn.VimConnConflictException(
3420 "Invalid VLAN range for {}: {}.You must provide "
3421 "'{}' in format [start_ID - end_ID].".format(
3422 text_vlan_range, vlanID_range, text_vlan_range
3423 )
3424 )
3425
3426 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3427 if start_vlanid <= 0:
3428 raise vimconn.VimConnConflictException(
3429 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3430 "networks valid IDs are 1 to 4094 ".format(
3431 text_vlan_range, vlanID_range
3432 )
3433 )
3434
3435 if end_vlanid > 4094:
3436 raise vimconn.VimConnConflictException(
3437 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3438 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3439 text_vlan_range, vlanID_range
3440 )
3441 )
3442
3443 if start_vlanid > end_vlanid:
3444 raise vimconn.VimConnConflictException(
3445 "Invalid VLAN range for {}: {}. You must provide '{}'"
3446 " in format start_ID - end_ID and start_ID < end_ID ".format(
3447 text_vlan_range, vlanID_range, text_vlan_range
3448 )
3449 )
3450
3451 def delete_user(self, user_id):
3452 """Delete a user from openstack VIM
3453 Returns the user identifier"""
3454 if self.debug:
3455 print("osconnector: Deleting a user from VIM")
3456
3457 try:
3458 self._reload_connection()
3459 self.keystone.users.delete(user_id)
3460
3461 return 1, user_id
3462 except ksExceptions.ConnectionError as e:
3463 error_value = -vimconn.HTTP_Bad_Request
3464 error_text = (
3465 type(e).__name__
3466 + ": "
3467 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3468 )
3469 except ksExceptions.NotFound as e:
3470 error_value = -vimconn.HTTP_Not_Found
3471 error_text = (
3472 type(e).__name__
3473 + ": "
3474 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3475 )
3476 except ksExceptions.ClientException as e: # TODO remove
3477 error_value = -vimconn.HTTP_Bad_Request
3478 error_text = (
3479 type(e).__name__
3480 + ": "
3481 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3482 )
3483
3484 # TODO insert exception vimconn.HTTP_Unauthorized
3485 # if reaching here is because an exception
3486 self.logger.debug("delete_tenant " + error_text)
3487
3488 return error_value, error_text
3489
3490 def get_hosts_info(self):
3491 """Get the information of deployed hosts
3492 Returns the hosts content"""
3493 if self.debug:
3494 print("osconnector: Getting Host info from VIM")
3495
3496 try:
3497 h_list = []
3498 self._reload_connection()
3499 hypervisors = self.nova.hypervisors.list()
3500
3501 for hype in hypervisors:
3502 h_list.append(hype.to_dict())
3503
3504 return 1, {"hosts": h_list}
3505 except nvExceptions.NotFound as e:
3506 error_value = -vimconn.HTTP_Not_Found
3507 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3508 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3509 error_value = -vimconn.HTTP_Bad_Request
3510 error_text = (
3511 type(e).__name__
3512 + ": "
3513 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3514 )
3515
3516 # TODO insert exception vimconn.HTTP_Unauthorized
3517 # if reaching here is because an exception
3518 self.logger.debug("get_hosts_info " + error_text)
3519
3520 return error_value, error_text
3521
3522 def get_hosts(self, vim_tenant):
3523 """Get the hosts and deployed instances
3524 Returns the hosts content"""
3525 r, hype_dict = self.get_hosts_info()
3526
3527 if r < 0:
3528 return r, hype_dict
3529
3530 hypervisors = hype_dict["hosts"]
3531
3532 try:
3533 servers = self.nova.servers.list()
3534 for hype in hypervisors:
3535 for server in servers:
3536 if (
3537 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3538 == hype["hypervisor_hostname"]
3539 ):
3540 if "vm" in hype:
3541 hype["vm"].append(server.id)
3542 else:
3543 hype["vm"] = [server.id]
3544
3545 return 1, hype_dict
3546 except nvExceptions.NotFound as e:
3547 error_value = -vimconn.HTTP_Not_Found
3548 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3549 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3550 error_value = -vimconn.HTTP_Bad_Request
3551 error_text = (
3552 type(e).__name__
3553 + ": "
3554 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3555 )
3556
3557 # TODO insert exception vimconn.HTTP_Unauthorized
3558 # if reaching here is because an exception
3559 self.logger.debug("get_hosts " + error_text)
3560
3561 return error_value, error_text
3562
3563 def new_classification(self, name, ctype, definition):
3564 self.logger.debug(
3565 "Adding a new (Traffic) Classification to VIM, named %s", name
3566 )
3567
3568 try:
3569 new_class = None
3570 self._reload_connection()
3571
3572 if ctype not in supportedClassificationTypes:
3573 raise vimconn.VimConnNotSupportedException(
3574 "OpenStack VIM connector does not support provided "
3575 "Classification Type {}, supported ones are: {}".format(
3576 ctype, supportedClassificationTypes
3577 )
3578 )
3579
3580 if not self._validate_classification(ctype, definition):
3581 raise vimconn.VimConnException(
3582 "Incorrect Classification definition for the type specified."
3583 )
3584
3585 classification_dict = definition
3586 classification_dict["name"] = name
3587 new_class = self.neutron.create_sfc_flow_classifier(
3588 {"flow_classifier": classification_dict}
3589 )
3590
3591 return new_class["flow_classifier"]["id"]
3592 except (
3593 neExceptions.ConnectionFailed,
3594 ksExceptions.ClientException,
3595 neExceptions.NeutronException,
3596 ConnectionError,
3597 ) as e:
3598 self.logger.error("Creation of Classification failed.")
3599 self._format_exception(e)
3600
3601 def get_classification(self, class_id):
3602 self.logger.debug(" Getting Classification %s from VIM", class_id)
3603 filter_dict = {"id": class_id}
3604 class_list = self.get_classification_list(filter_dict)
3605
3606 if len(class_list) == 0:
3607 raise vimconn.VimConnNotFoundException(
3608 "Classification '{}' not found".format(class_id)
3609 )
3610 elif len(class_list) > 1:
3611 raise vimconn.VimConnConflictException(
3612 "Found more than one Classification with this criteria"
3613 )
3614
3615 classification = class_list[0]
3616
3617 return classification
3618
3619 def get_classification_list(self, filter_dict={}):
3620 self.logger.debug(
3621 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3622 )
3623
3624 try:
3625 filter_dict_os = filter_dict.copy()
3626 self._reload_connection()
3627
3628 if self.api_version3 and "tenant_id" in filter_dict_os:
3629 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3630
3631 classification_dict = self.neutron.list_sfc_flow_classifiers(
3632 **filter_dict_os
3633 )
3634 classification_list = classification_dict["flow_classifiers"]
3635 self.__classification_os2mano(classification_list)
3636
3637 return classification_list
3638 except (
3639 neExceptions.ConnectionFailed,
3640 ksExceptions.ClientException,
3641 neExceptions.NeutronException,
3642 ConnectionError,
3643 ) as e:
3644 self._format_exception(e)
3645
3646 def delete_classification(self, class_id):
3647 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3648
3649 try:
3650 self._reload_connection()
3651 self.neutron.delete_sfc_flow_classifier(class_id)
3652
3653 return class_id
3654 except (
3655 neExceptions.ConnectionFailed,
3656 neExceptions.NeutronException,
3657 ksExceptions.ClientException,
3658 neExceptions.NeutronException,
3659 ConnectionError,
3660 ) as e:
3661 self._format_exception(e)
3662
3663 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3664 self.logger.debug(
3665 "Adding a new Service Function Instance to VIM, named '%s'", name
3666 )
3667
3668 try:
3669 new_sfi = None
3670 self._reload_connection()
3671 correlation = None
3672
3673 if sfc_encap:
3674 correlation = "nsh"
3675
3676 if len(ingress_ports) != 1:
3677 raise vimconn.VimConnNotSupportedException(
3678 "OpenStack VIM connector can only have 1 ingress port per SFI"
3679 )
3680
3681 if len(egress_ports) != 1:
3682 raise vimconn.VimConnNotSupportedException(
3683 "OpenStack VIM connector can only have 1 egress port per SFI"
3684 )
3685
3686 sfi_dict = {
3687 "name": name,
3688 "ingress": ingress_ports[0],
3689 "egress": egress_ports[0],
3690 "service_function_parameters": {"correlation": correlation},
3691 }
3692 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3693
3694 return new_sfi["port_pair"]["id"]
3695 except (
3696 neExceptions.ConnectionFailed,
3697 ksExceptions.ClientException,
3698 neExceptions.NeutronException,
3699 ConnectionError,
3700 ) as e:
3701 if new_sfi:
3702 try:
3703 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3704 except Exception:
3705 self.logger.error(
3706 "Creation of Service Function Instance failed, with "
3707 "subsequent deletion failure as well."
3708 )
3709
3710 self._format_exception(e)
3711
3712 def get_sfi(self, sfi_id):
3713 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3714 filter_dict = {"id": sfi_id}
3715 sfi_list = self.get_sfi_list(filter_dict)
3716
3717 if len(sfi_list) == 0:
3718 raise vimconn.VimConnNotFoundException(
3719 "Service Function Instance '{}' not found".format(sfi_id)
3720 )
3721 elif len(sfi_list) > 1:
3722 raise vimconn.VimConnConflictException(
3723 "Found more than one Service Function Instance with this criteria"
3724 )
3725
3726 sfi = sfi_list[0]
3727
3728 return sfi
3729
3730 def get_sfi_list(self, filter_dict={}):
3731 self.logger.debug(
3732 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3733 )
3734
3735 try:
3736 self._reload_connection()
3737 filter_dict_os = filter_dict.copy()
3738
3739 if self.api_version3 and "tenant_id" in filter_dict_os:
3740 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3741
3742 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3743 sfi_list = sfi_dict["port_pairs"]
3744 self.__sfi_os2mano(sfi_list)
3745
3746 return sfi_list
3747 except (
3748 neExceptions.ConnectionFailed,
3749 ksExceptions.ClientException,
3750 neExceptions.NeutronException,
3751 ConnectionError,
3752 ) as e:
3753 self._format_exception(e)
3754
3755 def delete_sfi(self, sfi_id):
3756 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3757
3758 try:
3759 self._reload_connection()
3760 self.neutron.delete_sfc_port_pair(sfi_id)
3761
3762 return sfi_id
3763 except (
3764 neExceptions.ConnectionFailed,
3765 neExceptions.NeutronException,
3766 ksExceptions.ClientException,
3767 neExceptions.NeutronException,
3768 ConnectionError,
3769 ) as e:
3770 self._format_exception(e)
3771
3772 def new_sf(self, name, sfis, sfc_encap=True):
3773 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3774
3775 try:
3776 new_sf = None
3777 self._reload_connection()
3778 # correlation = None
3779 # if sfc_encap:
3780 # correlation = "nsh"
3781
3782 for instance in sfis:
3783 sfi = self.get_sfi(instance)
3784
3785 if sfi.get("sfc_encap") != sfc_encap:
3786 raise vimconn.VimConnNotSupportedException(
3787 "OpenStack VIM connector requires all SFIs of the "
3788 "same SF to share the same SFC Encapsulation"
3789 )
3790
3791 sf_dict = {"name": name, "port_pairs": sfis}
3792 new_sf = self.neutron.create_sfc_port_pair_group(
3793 {"port_pair_group": sf_dict}
3794 )
3795
3796 return new_sf["port_pair_group"]["id"]
3797 except (
3798 neExceptions.ConnectionFailed,
3799 ksExceptions.ClientException,
3800 neExceptions.NeutronException,
3801 ConnectionError,
3802 ) as e:
3803 if new_sf:
3804 try:
3805 self.neutron.delete_sfc_port_pair_group(
3806 new_sf["port_pair_group"]["id"]
3807 )
3808 except Exception:
3809 self.logger.error(
3810 "Creation of Service Function failed, with "
3811 "subsequent deletion failure as well."
3812 )
3813
3814 self._format_exception(e)
3815
3816 def get_sf(self, sf_id):
3817 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3818 filter_dict = {"id": sf_id}
3819 sf_list = self.get_sf_list(filter_dict)
3820
3821 if len(sf_list) == 0:
3822 raise vimconn.VimConnNotFoundException(
3823 "Service Function '{}' not found".format(sf_id)
3824 )
3825 elif len(sf_list) > 1:
3826 raise vimconn.VimConnConflictException(
3827 "Found more than one Service Function with this criteria"
3828 )
3829
3830 sf = sf_list[0]
3831
3832 return sf
3833
3834 def get_sf_list(self, filter_dict={}):
3835 self.logger.debug(
3836 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3837 )
3838
3839 try:
3840 self._reload_connection()
3841 filter_dict_os = filter_dict.copy()
3842
3843 if self.api_version3 and "tenant_id" in filter_dict_os:
3844 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3845
3846 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3847 sf_list = sf_dict["port_pair_groups"]
3848 self.__sf_os2mano(sf_list)
3849
3850 return sf_list
3851 except (
3852 neExceptions.ConnectionFailed,
3853 ksExceptions.ClientException,
3854 neExceptions.NeutronException,
3855 ConnectionError,
3856 ) as e:
3857 self._format_exception(e)
3858
3859 def delete_sf(self, sf_id):
3860 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3861
3862 try:
3863 self._reload_connection()
3864 self.neutron.delete_sfc_port_pair_group(sf_id)
3865
3866 return sf_id
3867 except (
3868 neExceptions.ConnectionFailed,
3869 neExceptions.NeutronException,
3870 ksExceptions.ClientException,
3871 neExceptions.NeutronException,
3872 ConnectionError,
3873 ) as e:
3874 self._format_exception(e)
3875
3876 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3877 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3878
3879 try:
3880 new_sfp = None
3881 self._reload_connection()
3882 # In networking-sfc the MPLS encapsulation is legacy
3883 # should be used when no full SFC Encapsulation is intended
3884 correlation = "mpls"
3885
3886 if sfc_encap:
3887 correlation = "nsh"
3888
3889 sfp_dict = {
3890 "name": name,
3891 "flow_classifiers": classifications,
3892 "port_pair_groups": sfs,
3893 "chain_parameters": {"correlation": correlation},
3894 }
3895
3896 if spi:
3897 sfp_dict["chain_id"] = spi
3898
3899 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3900
3901 return new_sfp["port_chain"]["id"]
3902 except (
3903 neExceptions.ConnectionFailed,
3904 ksExceptions.ClientException,
3905 neExceptions.NeutronException,
3906 ConnectionError,
3907 ) as e:
3908 if new_sfp:
3909 try:
3910 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3911 except Exception:
3912 self.logger.error(
3913 "Creation of Service Function Path failed, with "
3914 "subsequent deletion failure as well."
3915 )
3916
3917 self._format_exception(e)
3918
3919 def get_sfp(self, sfp_id):
3920 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3921
3922 filter_dict = {"id": sfp_id}
3923 sfp_list = self.get_sfp_list(filter_dict)
3924
3925 if len(sfp_list) == 0:
3926 raise vimconn.VimConnNotFoundException(
3927 "Service Function Path '{}' not found".format(sfp_id)
3928 )
3929 elif len(sfp_list) > 1:
3930 raise vimconn.VimConnConflictException(
3931 "Found more than one Service Function Path with this criteria"
3932 )
3933
3934 sfp = sfp_list[0]
3935
3936 return sfp
3937
3938 def get_sfp_list(self, filter_dict={}):
3939 self.logger.debug(
3940 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3941 )
3942
3943 try:
3944 self._reload_connection()
3945 filter_dict_os = filter_dict.copy()
3946
3947 if self.api_version3 and "tenant_id" in filter_dict_os:
3948 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3949
3950 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3951 sfp_list = sfp_dict["port_chains"]
3952 self.__sfp_os2mano(sfp_list)
3953
3954 return sfp_list
3955 except (
3956 neExceptions.ConnectionFailed,
3957 ksExceptions.ClientException,
3958 neExceptions.NeutronException,
3959 ConnectionError,
3960 ) as e:
3961 self._format_exception(e)
3962
3963 def delete_sfp(self, sfp_id):
3964 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3965
3966 try:
3967 self._reload_connection()
3968 self.neutron.delete_sfc_port_chain(sfp_id)
3969
3970 return sfp_id
3971 except (
3972 neExceptions.ConnectionFailed,
3973 neExceptions.NeutronException,
3974 ksExceptions.ClientException,
3975 neExceptions.NeutronException,
3976 ConnectionError,
3977 ) as e:
3978 self._format_exception(e)
3979
3980 def refresh_sfps_status(self, sfp_list):
3981 """Get the status of the service function path
3982 Params: the list of sfp identifiers
3983 Returns a dictionary with:
3984 vm_id: #VIM id of this service function path
3985 status: #Mandatory. Text with one of:
3986 # DELETED (not found at vim)
3987 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3988 # OTHER (Vim reported other status not understood)
3989 # ERROR (VIM indicates an ERROR status)
3990 # ACTIVE,
3991 # CREATING (on building process)
3992 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3993 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3994 """
3995 sfp_dict = {}
3996 self.logger.debug(
3997 "refresh_sfps status: Getting tenant SFP information from VIM"
3998 )
3999
4000 for sfp_id in sfp_list:
4001 sfp = {}
4002
4003 try:
4004 sfp_vim = self.get_sfp(sfp_id)
4005
4006 if sfp_vim["spi"]:
4007 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4008 else:
4009 sfp["status"] = "OTHER"
4010 sfp["error_msg"] = "VIM status reported " + sfp["status"]
4011
4012 sfp["vim_info"] = self.serialize(sfp_vim)
4013
4014 if sfp_vim.get("fault"):
4015 sfp["error_msg"] = str(sfp_vim["fault"])
4016 except vimconn.VimConnNotFoundException as e:
4017 self.logger.error("Exception getting sfp status: %s", str(e))
4018 sfp["status"] = "DELETED"
4019 sfp["error_msg"] = str(e)
4020 except vimconn.VimConnException as e:
4021 self.logger.error("Exception getting sfp status: %s", str(e))
4022 sfp["status"] = "VIM_ERROR"
4023 sfp["error_msg"] = str(e)
4024
4025 sfp_dict[sfp_id] = sfp
4026
4027 return sfp_dict
4028
4029 def refresh_sfis_status(self, sfi_list):
4030 """Get the status of the service function instances
4031 Params: the list of sfi identifiers
4032 Returns a dictionary with:
4033 vm_id: #VIM id of this service function instance
4034 status: #Mandatory. Text with one of:
4035 # DELETED (not found at vim)
4036 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4037 # OTHER (Vim reported other status not understood)
4038 # ERROR (VIM indicates an ERROR status)
4039 # ACTIVE,
4040 # CREATING (on building process)
4041 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4042 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4043 """
4044 sfi_dict = {}
4045 self.logger.debug(
4046 "refresh_sfis status: Getting tenant sfi information from VIM"
4047 )
4048
4049 for sfi_id in sfi_list:
4050 sfi = {}
4051
4052 try:
4053 sfi_vim = self.get_sfi(sfi_id)
4054
4055 if sfi_vim:
4056 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4057 else:
4058 sfi["status"] = "OTHER"
4059 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4060
4061 sfi["vim_info"] = self.serialize(sfi_vim)
4062
4063 if sfi_vim.get("fault"):
4064 sfi["error_msg"] = str(sfi_vim["fault"])
4065 except vimconn.VimConnNotFoundException as e:
4066 self.logger.error("Exception getting sfi status: %s", str(e))
4067 sfi["status"] = "DELETED"
4068 sfi["error_msg"] = str(e)
4069 except vimconn.VimConnException as e:
4070 self.logger.error("Exception getting sfi status: %s", str(e))
4071 sfi["status"] = "VIM_ERROR"
4072 sfi["error_msg"] = str(e)
4073
4074 sfi_dict[sfi_id] = sfi
4075
4076 return sfi_dict
4077
4078 def refresh_sfs_status(self, sf_list):
4079 """Get the status of the service functions
4080 Params: the list of sf identifiers
4081 Returns a dictionary with:
4082 vm_id: #VIM id of this service function
4083 status: #Mandatory. Text with one of:
4084 # DELETED (not found at vim)
4085 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4086 # OTHER (Vim reported other status not understood)
4087 # ERROR (VIM indicates an ERROR status)
4088 # ACTIVE,
4089 # CREATING (on building process)
4090 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4091 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4092 """
4093 sf_dict = {}
4094 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4095
4096 for sf_id in sf_list:
4097 sf = {}
4098
4099 try:
4100 sf_vim = self.get_sf(sf_id)
4101
4102 if sf_vim:
4103 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4104 else:
4105 sf["status"] = "OTHER"
4106 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4107
4108 sf["vim_info"] = self.serialize(sf_vim)
4109
4110 if sf_vim.get("fault"):
4111 sf["error_msg"] = str(sf_vim["fault"])
4112 except vimconn.VimConnNotFoundException as e:
4113 self.logger.error("Exception getting sf status: %s", str(e))
4114 sf["status"] = "DELETED"
4115 sf["error_msg"] = str(e)
4116 except vimconn.VimConnException as e:
4117 self.logger.error("Exception getting sf status: %s", str(e))
4118 sf["status"] = "VIM_ERROR"
4119 sf["error_msg"] = str(e)
4120
4121 sf_dict[sf_id] = sf
4122
4123 return sf_dict
4124
4125 def refresh_classifications_status(self, classification_list):
4126 """Get the status of the classifications
4127 Params: the list of classification identifiers
4128 Returns a dictionary with:
4129 vm_id: #VIM id of this classifier
4130 status: #Mandatory. Text with one of:
4131 # DELETED (not found at vim)
4132 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4133 # OTHER (Vim reported other status not understood)
4134 # ERROR (VIM indicates an ERROR status)
4135 # ACTIVE,
4136 # CREATING (on building process)
4137 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4138 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4139 """
4140 classification_dict = {}
4141 self.logger.debug(
4142 "refresh_classifications status: Getting tenant classification information from VIM"
4143 )
4144
4145 for classification_id in classification_list:
4146 classification = {}
4147
4148 try:
4149 classification_vim = self.get_classification(classification_id)
4150
4151 if classification_vim:
4152 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4153 else:
4154 classification["status"] = "OTHER"
4155 classification["error_msg"] = (
4156 "VIM status reported " + classification["status"]
4157 )
4158
4159 classification["vim_info"] = self.serialize(classification_vim)
4160
4161 if classification_vim.get("fault"):
4162 classification["error_msg"] = str(classification_vim["fault"])
4163 except vimconn.VimConnNotFoundException as e:
4164 self.logger.error("Exception getting classification status: %s", str(e))
4165 classification["status"] = "DELETED"
4166 classification["error_msg"] = str(e)
4167 except vimconn.VimConnException as e:
4168 self.logger.error("Exception getting classification status: %s", str(e))
4169 classification["status"] = "VIM_ERROR"
4170 classification["error_msg"] = str(e)
4171
4172 classification_dict[classification_id] = classification
4173
4174 return classification_dict
4175
4176 def new_affinity_group(self, affinity_group_data):
4177 """Adds a server group to VIM
4178 affinity_group_data contains a dictionary with information, keys:
4179 name: name in VIM for the server group
4180 type: affinity or anti-affinity
4181 scope: Only nfvi-node allowed
4182 Returns the server group identifier"""
4183 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4184
4185 try:
4186 name = affinity_group_data["name"]
4187 policy = affinity_group_data["type"]
4188
4189 self._reload_connection()
4190 new_server_group = self.nova.server_groups.create(name, policy)
4191
4192 return new_server_group.id
4193 except (
4194 ksExceptions.ClientException,
4195 nvExceptions.ClientException,
4196 ConnectionError,
4197 KeyError,
4198 ) as e:
4199 self._format_exception(e)
4200
4201 def get_affinity_group(self, affinity_group_id):
4202 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4203 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4204 try:
4205 self._reload_connection()
4206 server_group = self.nova.server_groups.find(id=affinity_group_id)
4207
4208 return server_group.to_dict()
4209 except (
4210 nvExceptions.NotFound,
4211 nvExceptions.ClientException,
4212 ksExceptions.ClientException,
4213 ConnectionError,
4214 ) as e:
4215 self._format_exception(e)
4216
4217 def delete_affinity_group(self, affinity_group_id):
4218 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4219 self.logger.debug("Getting server group '%s'", affinity_group_id)
4220 try:
4221 self._reload_connection()
4222 self.nova.server_groups.delete(affinity_group_id)
4223
4224 return affinity_group_id
4225 except (
4226 nvExceptions.NotFound,
4227 ksExceptions.ClientException,
4228 nvExceptions.ClientException,
4229 ConnectionError,
4230 ) as e:
4231 self._format_exception(e)
4232
4233 def get_vdu_state(self, vm_id):
4234 """
4235 Getting the state of a vdu
4236 param:
4237 vm_id: ID of an instance
4238 """
4239 self.logger.debug("Getting the status of VM")
4240 self.logger.debug("VIM VM ID %s", vm_id)
4241 self._reload_connection()
4242 server = self.nova.servers.find(id=vm_id)
4243 server_dict = server.to_dict()
4244 vdu_data = [
4245 server_dict["status"],
4246 server_dict["flavor"]["id"],
4247 server_dict["OS-EXT-SRV-ATTR:host"],
4248 server_dict["OS-EXT-AZ:availability_zone"],
4249 ]
4250 self.logger.debug("vdu_data %s", vdu_data)
4251 return vdu_data
4252
4253 def check_compute_availability(self, host, server_flavor_details):
4254 self._reload_connection()
4255 hypervisor_search = self.nova.hypervisors.search(
4256 hypervisor_match=host, servers=True
4257 )
4258 for hypervisor in hypervisor_search:
4259 hypervisor_id = hypervisor.to_dict()["id"]
4260 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4261 hypervisor_dict = hypervisor_details.to_dict()
4262 hypervisor_temp = json.dumps(hypervisor_dict)
4263 hypervisor_json = json.loads(hypervisor_temp)
4264 resources_available = [
4265 hypervisor_json["free_ram_mb"],
4266 hypervisor_json["disk_available_least"],
4267 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4268 ]
4269 compute_available = all(
4270 x > y for x, y in zip(resources_available, server_flavor_details)
4271 )
4272 if compute_available:
4273 return host
4274
4275 def check_availability_zone(
4276 self, old_az, server_flavor_details, old_host, host=None
4277 ):
4278 self._reload_connection()
4279 az_check = {"zone_check": False, "compute_availability": None}
4280 aggregates_list = self.nova.aggregates.list()
4281 for aggregate in aggregates_list:
4282 aggregate_details = aggregate.to_dict()
4283 aggregate_temp = json.dumps(aggregate_details)
4284 aggregate_json = json.loads(aggregate_temp)
4285 if aggregate_json["availability_zone"] == old_az:
4286 hosts_list = aggregate_json["hosts"]
4287 if host is not None:
4288 if host in hosts_list:
4289 az_check["zone_check"] = True
4290 available_compute_id = self.check_compute_availability(
4291 host, server_flavor_details
4292 )
4293 if available_compute_id is not None:
4294 az_check["compute_availability"] = available_compute_id
4295 else:
4296 for check_host in hosts_list:
4297 if check_host != old_host:
4298 available_compute_id = self.check_compute_availability(
4299 check_host, server_flavor_details
4300 )
4301 if available_compute_id is not None:
4302 az_check["zone_check"] = True
4303 az_check["compute_availability"] = available_compute_id
4304 break
4305 else:
4306 az_check["zone_check"] = True
4307 return az_check
4308
4309 def migrate_instance(self, vm_id, compute_host=None):
4310 """
4311 Migrate a vdu
4312 param:
4313 vm_id: ID of an instance
4314 compute_host: Host to migrate the vdu to
4315 """
4316 self._reload_connection()
4317 vm_state = False
4318 instance_state = self.get_vdu_state(vm_id)
4319 server_flavor_id = instance_state[1]
4320 server_hypervisor_name = instance_state[2]
4321 server_availability_zone = instance_state[3]
4322 try:
4323 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4324 server_flavor_details = [
4325 server_flavor["ram"],
4326 server_flavor["disk"],
4327 server_flavor["vcpus"],
4328 ]
4329 if compute_host == server_hypervisor_name:
4330 raise vimconn.VimConnException(
4331 "Unable to migrate instance '{}' to the same host '{}'".format(
4332 vm_id, compute_host
4333 ),
4334 http_code=vimconn.HTTP_Bad_Request,
4335 )
4336 az_status = self.check_availability_zone(
4337 server_availability_zone,
4338 server_flavor_details,
4339 server_hypervisor_name,
4340 compute_host,
4341 )
4342 availability_zone_check = az_status["zone_check"]
4343 available_compute_id = az_status.get("compute_availability")
4344
4345 if availability_zone_check is False:
4346 raise vimconn.VimConnException(
4347 "Unable to migrate instance '{}' to a different availability zone".format(
4348 vm_id
4349 ),
4350 http_code=vimconn.HTTP_Bad_Request,
4351 )
4352 if available_compute_id is not None:
4353 self.nova.servers.live_migrate(
4354 server=vm_id,
4355 host=available_compute_id,
4356 block_migration=True,
4357 disk_over_commit=False,
4358 )
4359 state = "MIGRATING"
4360 changed_compute_host = ""
4361 if state == "MIGRATING":
4362 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4363 changed_compute_host = self.get_vdu_state(vm_id)[2]
4364 if vm_state and changed_compute_host == available_compute_id:
4365 self.logger.debug(
4366 "Instance '{}' migrated to the new compute host '{}'".format(
4367 vm_id, changed_compute_host
4368 )
4369 )
4370 return state, available_compute_id
4371 else:
4372 raise vimconn.VimConnException(
4373 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4374 vm_id, available_compute_id
4375 ),
4376 http_code=vimconn.HTTP_Bad_Request,
4377 )
4378 else:
4379 raise vimconn.VimConnException(
4380 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4381 available_compute_id
4382 ),
4383 http_code=vimconn.HTTP_Bad_Request,
4384 )
4385 except (
4386 nvExceptions.BadRequest,
4387 nvExceptions.ClientException,
4388 nvExceptions.NotFound,
4389 ) as e:
4390 self._format_exception(e)
4391
4392 def resize_instance(self, vm_id, new_flavor_id):
4393 """
4394 For resizing the vm based on the given
4395 flavor details
4396 param:
4397 vm_id : ID of an instance
4398 new_flavor_id : Flavor id to be resized
4399 Return the status of a resized instance
4400 """
4401 self._reload_connection()
4402 self.logger.debug("resize the flavor of an instance")
4403 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4404 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4405 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4406 try:
4407 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4408 if old_flavor_disk > new_flavor_disk:
4409 raise nvExceptions.BadRequest(
4410 400,
4411 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4412 )
4413 else:
4414 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4415 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4416 if vm_state:
4417 instance_resized_status = self.confirm_resize(vm_id)
4418 return instance_resized_status
4419 else:
4420 raise nvExceptions.BadRequest(
4421 409,
4422 message="Cannot 'resize' vm_state is in ERROR",
4423 )
4424
4425 else:
4426 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4427 raise nvExceptions.BadRequest(
4428 409,
4429 message="Cannot 'resize' instance while it is in vm_state resized",
4430 )
4431 except (
4432 nvExceptions.BadRequest,
4433 nvExceptions.ClientException,
4434 nvExceptions.NotFound,
4435 ) as e:
4436 self._format_exception(e)
4437
4438 def confirm_resize(self, vm_id):
4439 """
4440 Confirm the resize of an instance
4441 param:
4442 vm_id: ID of an instance
4443 """
4444 self._reload_connection()
4445 self.nova.servers.confirm_resize(server=vm_id)
4446 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4447 self.__wait_for_vm(vm_id, "ACTIVE")
4448 instance_status = self.get_vdu_state(vm_id)[0]
4449 return instance_status