5f66f09ae1a492a06b781c11a896334a34387741
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 threads (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads = numa["paired-threads"] * 2
1298 extra_specs["hw:cpu_thread_policy"] = "require"
1299 extra_specs["hw:cpu_policy"] = "dedicated"
1300 return threads
1301
1302 @staticmethod
1303 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304 """Fill up extra_specs if numa has cores.
1305 Args:
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1308
1309 Returns:
1310 cores (int) Number of virtual cpus
1311
1312 """
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa.get("cores"):
1316 return
1317 cores = numa["cores"]
1318 extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs["hw:cpu_policy"] = "dedicated"
1320 return cores
1321
1322 @staticmethod
1323 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324 """Fill up extra_specs if numa has threads.
1325 Args:
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1328
1329 Returns:
1330 threads (int) Number of virtual cpus
1331
1332 """
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa.get("threads"):
1335 return
1336 threads = numa["threads"]
1337 extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs["hw:cpu_policy"] = "dedicated"
1339 return threads
1340
1341 def _process_numa_parameters_of_flavor(
1342 self, numas: List, extra_specs: Dict
1343 ) -> None:
1344 """Process numa parameters and fill up extra_specs.
1345
1346 Args:
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1349
1350 """
1351 numa_nodes = len(numas)
1352 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 cpu_cores, cpu_threads = 0, 0
1354
1355 if self.vim_type == "VIO":
1356 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1357 extra_specs["vmware:latency_sensitivity_level"] = "high"
1358
1359 for numa in numas:
1360 if "id" in numa:
1361 node_id = numa["id"]
1362 # overwrite ram and vcpus
1363 # check if key "memory" is present in numa else use ram value at flavor
1364 self.process_numa_memory(numa, node_id, extra_specs)
1365 self.process_numa_vcpu(numa, node_id, extra_specs)
1366
1367 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1368 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1369
1370 if "paired-threads" in numa:
1371 threads = self.process_numa_paired_threads(numa, extra_specs)
1372 cpu_threads += threads
1373
1374 elif "cores" in numa:
1375 cores = self.process_numa_cores(numa, extra_specs)
1376 cpu_cores += cores
1377
1378 elif "threads" in numa:
1379 threads = self.process_numa_threads(numa, extra_specs)
1380 cpu_threads += threads
1381
1382 if cpu_cores:
1383 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1384 if cpu_threads:
1385 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1386
1387 def _change_flavor_name(
1388 self, name: str, name_suffix: int, flavor_data: dict
1389 ) -> str:
1390 """Change the flavor name if the name already exists.
1391
1392 Args:
1393 name (str): Flavor name to be checked
1394 name_suffix (int): Suffix to be appended to name
1395 flavor_data (dict): Flavor dict
1396
1397 Returns:
1398 name (str): New flavor name to be used
1399
1400 """
1401 # Get used names
1402 fl = self.nova.flavors.list()
1403 fl_names = [f.name for f in fl]
1404
1405 while name in fl_names:
1406 name_suffix += 1
1407 name = flavor_data["name"] + "-" + str(name_suffix)
1408
1409 return name
1410
1411 def _process_extended_config_of_flavor(
1412 self, extended: dict, extra_specs: dict
1413 ) -> None:
1414 """Process the extended dict to fill up extra_specs.
1415 Args:
1416
1417 extended (dict): Keeping the extra specification of flavor
1418 extra_specs (dict) Dict to be filled to be used during flavor creation
1419
1420 """
1421 quotas = {
1422 "cpu-quota": "cpu",
1423 "mem-quota": "memory",
1424 "vif-quota": "vif",
1425 "disk-io-quota": "disk_io",
1426 }
1427
1428 page_sizes = {
1429 "LARGE": "large",
1430 "SMALL": "small",
1431 "SIZE_2MB": "2MB",
1432 "SIZE_1GB": "1GB",
1433 "PREFER_LARGE": "any",
1434 }
1435
1436 policies = {
1437 "cpu-pinning-policy": "hw:cpu_policy",
1438 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1439 "mem-policy": "hw:numa_mempolicy",
1440 }
1441
1442 numas = extended.get("numas")
1443 if numas:
1444 self._process_numa_parameters_of_flavor(numas, extra_specs)
1445
1446 for quota, item in quotas.items():
1447 if quota in extended.keys():
1448 self.process_resource_quota(extended.get(quota), item, extra_specs)
1449
1450 # Set the mempage size as specified in the descriptor
1451 if extended.get("mempage-size"):
1452 if extended["mempage-size"] in page_sizes.keys():
1453 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1454 else:
1455 # Normally, validations in NBI should not allow to this condition.
1456 self.logger.debug(
1457 "Invalid mempage-size %s. Will be ignored",
1458 extended.get("mempage-size"),
1459 )
1460
1461 for policy, hw_policy in policies.items():
1462 if extended.get(policy):
1463 extra_specs[hw_policy] = extended[policy].lower()
1464
1465 @staticmethod
1466 def _get_flavor_details(flavor_data: dict) -> Tuple:
1467 """Returns the details of flavor
1468 Args:
1469 flavor_data (dict): Dictionary that includes required flavor details
1470
1471 Returns:
1472 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1473
1474 """
1475 return (
1476 flavor_data.get("ram", 64),
1477 flavor_data.get("vcpus", 1),
1478 {},
1479 flavor_data.get("extended"),
1480 )
1481
1482 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1483 """Adds a tenant flavor to openstack VIM.
1484 if change_name_if_used is True, it will change name in case of conflict,
1485 because it is not supported name repetition.
1486
1487 Args:
1488 flavor_data (dict): Flavor details to be processed
1489 change_name_if_used (bool): Change name in case of conflict
1490
1491 Returns:
1492 flavor_id (str): flavor identifier
1493
1494 """
1495 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1496 retry = 0
1497 max_retries = 3
1498 name_suffix = 0
1499
1500 try:
1501 name = flavor_data["name"]
1502 while retry < max_retries:
1503 retry += 1
1504 try:
1505 self._reload_connection()
1506
1507 if change_name_if_used:
1508 name = self._change_flavor_name(name, name_suffix, flavor_data)
1509
1510 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1511 flavor_data
1512 )
1513 if extended:
1514 self._process_extended_config_of_flavor(extended, extra_specs)
1515
1516 # Create flavor
1517
1518 new_flavor = self.nova.flavors.create(
1519 name=name,
1520 ram=ram,
1521 vcpus=vcpus,
1522 disk=flavor_data.get("disk", 0),
1523 ephemeral=flavor_data.get("ephemeral", 0),
1524 swap=flavor_data.get("swap", 0),
1525 is_public=flavor_data.get("is_public", True),
1526 )
1527
1528 # Add metadata
1529 if extra_specs:
1530 new_flavor.set_keys(extra_specs)
1531
1532 return new_flavor.id
1533
1534 except nvExceptions.Conflict as e:
1535
1536 if change_name_if_used and retry < max_retries:
1537 continue
1538
1539 self._format_exception(e)
1540
1541 except (
1542 ksExceptions.ClientException,
1543 nvExceptions.ClientException,
1544 ConnectionError,
1545 KeyError,
1546 ) as e:
1547 self._format_exception(e)
1548
1549 def delete_flavor(self, flavor_id):
1550 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1551 try:
1552 self._reload_connection()
1553 self.nova.flavors.delete(flavor_id)
1554
1555 return flavor_id
1556 # except nvExceptions.BadRequest as e:
1557 except (
1558 nvExceptions.NotFound,
1559 ksExceptions.ClientException,
1560 nvExceptions.ClientException,
1561 ConnectionError,
1562 ) as e:
1563 self._format_exception(e)
1564
1565 def new_image(self, image_dict):
1566 """
1567 Adds a tenant image to VIM. imge_dict is a dictionary with:
1568 name: name
1569 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1570 location: path or URI
1571 public: "yes" or "no"
1572 metadata: metadata of the image
1573 Returns the image_id
1574 """
1575 retry = 0
1576 max_retries = 3
1577
1578 while retry < max_retries:
1579 retry += 1
1580 try:
1581 self._reload_connection()
1582
1583 # determine format http://docs.openstack.org/developer/glance/formats.html
1584 if "disk_format" in image_dict:
1585 disk_format = image_dict["disk_format"]
1586 else: # autodiscover based on extension
1587 if image_dict["location"].endswith(".qcow2"):
1588 disk_format = "qcow2"
1589 elif image_dict["location"].endswith(".vhd"):
1590 disk_format = "vhd"
1591 elif image_dict["location"].endswith(".vmdk"):
1592 disk_format = "vmdk"
1593 elif image_dict["location"].endswith(".vdi"):
1594 disk_format = "vdi"
1595 elif image_dict["location"].endswith(".iso"):
1596 disk_format = "iso"
1597 elif image_dict["location"].endswith(".aki"):
1598 disk_format = "aki"
1599 elif image_dict["location"].endswith(".ari"):
1600 disk_format = "ari"
1601 elif image_dict["location"].endswith(".ami"):
1602 disk_format = "ami"
1603 else:
1604 disk_format = "raw"
1605
1606 self.logger.debug(
1607 "new_image: '%s' loading from '%s'",
1608 image_dict["name"],
1609 image_dict["location"],
1610 )
1611 if self.vim_type == "VIO":
1612 container_format = "bare"
1613 if "container_format" in image_dict:
1614 container_format = image_dict["container_format"]
1615
1616 new_image = self.glance.images.create(
1617 name=image_dict["name"],
1618 container_format=container_format,
1619 disk_format=disk_format,
1620 )
1621 else:
1622 new_image = self.glance.images.create(name=image_dict["name"])
1623
1624 if image_dict["location"].startswith("http"):
1625 # TODO there is not a method to direct download. It must be downloaded locally with requests
1626 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1627 else: # local path
1628 with open(image_dict["location"]) as fimage:
1629 self.glance.images.upload(new_image.id, fimage)
1630 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1631 # image_dict.get("public","yes")=="yes",
1632 # container_format="bare", data=fimage, disk_format=disk_format)
1633
1634 metadata_to_load = image_dict.get("metadata")
1635
1636 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1637 # for openstack
1638 if self.vim_type == "VIO":
1639 metadata_to_load["upload_location"] = image_dict["location"]
1640 else:
1641 metadata_to_load["location"] = image_dict["location"]
1642
1643 self.glance.images.update(new_image.id, **metadata_to_load)
1644
1645 return new_image.id
1646 except (
1647 nvExceptions.Conflict,
1648 ksExceptions.ClientException,
1649 nvExceptions.ClientException,
1650 ) as e:
1651 self._format_exception(e)
1652 except (
1653 HTTPException,
1654 gl1Exceptions.HTTPException,
1655 gl1Exceptions.CommunicationError,
1656 ConnectionError,
1657 ) as e:
1658 if retry == max_retries:
1659 continue
1660
1661 self._format_exception(e)
1662 except IOError as e: # can not open the file
1663 raise vimconn.VimConnConnectionException(
1664 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1665 http_code=vimconn.HTTP_Bad_Request,
1666 )
1667
1668 def delete_image(self, image_id):
1669 """Deletes a tenant image from openstack VIM. Returns the old id"""
1670 try:
1671 self._reload_connection()
1672 self.glance.images.delete(image_id)
1673
1674 return image_id
1675 except (
1676 nvExceptions.NotFound,
1677 ksExceptions.ClientException,
1678 nvExceptions.ClientException,
1679 gl1Exceptions.CommunicationError,
1680 gl1Exceptions.HTTPNotFound,
1681 ConnectionError,
1682 ) as e: # TODO remove
1683 self._format_exception(e)
1684
1685 def get_image_id_from_path(self, path):
1686 """Get the image id from image path in the VIM database. Returns the image_id"""
1687 try:
1688 self._reload_connection()
1689 images = self.glance.images.list()
1690
1691 for image in images:
1692 if image.metadata.get("location") == path:
1693 return image.id
1694
1695 raise vimconn.VimConnNotFoundException(
1696 "image with location '{}' not found".format(path)
1697 )
1698 except (
1699 ksExceptions.ClientException,
1700 nvExceptions.ClientException,
1701 gl1Exceptions.CommunicationError,
1702 ConnectionError,
1703 ) as e:
1704 self._format_exception(e)
1705
1706 def get_image_list(self, filter_dict={}):
1707 """Obtain tenant images from VIM
1708 Filter_dict can be:
1709 id: image id
1710 name: image name
1711 checksum: image checksum
1712 Returns the image list of dictionaries:
1713 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1714 List can be empty
1715 """
1716 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1717
1718 try:
1719 self._reload_connection()
1720 # filter_dict_os = filter_dict.copy()
1721 # First we filter by the available filter fields: name, id. The others are removed.
1722 image_list = self.glance.images.list()
1723 filtered_list = []
1724
1725 for image in image_list:
1726 try:
1727 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1728 continue
1729
1730 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1731 continue
1732
1733 if (
1734 filter_dict.get("checksum")
1735 and image["checksum"] != filter_dict["checksum"]
1736 ):
1737 continue
1738
1739 filtered_list.append(image.copy())
1740 except gl1Exceptions.HTTPNotFound:
1741 pass
1742
1743 return filtered_list
1744 except (
1745 ksExceptions.ClientException,
1746 nvExceptions.ClientException,
1747 gl1Exceptions.CommunicationError,
1748 ConnectionError,
1749 ) as e:
1750 self._format_exception(e)
1751
1752 def __wait_for_vm(self, vm_id, status):
1753 """wait until vm is in the desired status and return True.
1754 If the VM gets in ERROR status, return false.
1755 If the timeout is reached generate an exception"""
1756 elapsed_time = 0
1757 while elapsed_time < server_timeout:
1758 vm_status = self.nova.servers.get(vm_id).status
1759
1760 if vm_status == status:
1761 return True
1762
1763 if vm_status == "ERROR":
1764 return False
1765
1766 time.sleep(5)
1767 elapsed_time += 5
1768
1769 # if we exceeded the timeout rollback
1770 if elapsed_time >= server_timeout:
1771 raise vimconn.VimConnException(
1772 "Timeout waiting for instance " + vm_id + " to get " + status,
1773 http_code=vimconn.HTTP_Request_Timeout,
1774 )
1775
1776 def _get_openstack_availablity_zones(self):
1777 """
1778 Get from openstack availability zones available
1779 :return:
1780 """
1781 try:
1782 openstack_availability_zone = self.nova.availability_zones.list()
1783 openstack_availability_zone = [
1784 str(zone.zoneName)
1785 for zone in openstack_availability_zone
1786 if zone.zoneName != "internal"
1787 ]
1788
1789 return openstack_availability_zone
1790 except Exception:
1791 return None
1792
1793 def _set_availablity_zones(self):
1794 """
1795 Set vim availablity zone
1796 :return:
1797 """
1798 if "availability_zone" in self.config:
1799 vim_availability_zones = self.config.get("availability_zone")
1800
1801 if isinstance(vim_availability_zones, str):
1802 self.availability_zone = [vim_availability_zones]
1803 elif isinstance(vim_availability_zones, list):
1804 self.availability_zone = vim_availability_zones
1805 else:
1806 self.availability_zone = self._get_openstack_availablity_zones()
1807
1808 def _get_vm_availability_zone(
1809 self, availability_zone_index, availability_zone_list
1810 ):
1811 """
1812 Return thge availability zone to be used by the created VM.
1813 :return: The VIM availability zone to be used or None
1814 """
1815 if availability_zone_index is None:
1816 if not self.config.get("availability_zone"):
1817 return None
1818 elif isinstance(self.config.get("availability_zone"), str):
1819 return self.config["availability_zone"]
1820 else:
1821 # TODO consider using a different parameter at config for default AV and AV list match
1822 return self.config["availability_zone"][0]
1823
1824 vim_availability_zones = self.availability_zone
1825 # check if VIM offer enough availability zones describe in the VNFD
1826 if vim_availability_zones and len(availability_zone_list) <= len(
1827 vim_availability_zones
1828 ):
1829 # check if all the names of NFV AV match VIM AV names
1830 match_by_index = False
1831 for av in availability_zone_list:
1832 if av not in vim_availability_zones:
1833 match_by_index = True
1834 break
1835
1836 if match_by_index:
1837 return vim_availability_zones[availability_zone_index]
1838 else:
1839 return availability_zone_list[availability_zone_index]
1840 else:
1841 raise vimconn.VimConnConflictException(
1842 "No enough availability zones at VIM for this deployment"
1843 )
1844
1845 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1846 """Fill up the security_groups in the port_dict.
1847
1848 Args:
1849 net (dict): Network details
1850 port_dict (dict): Port details
1851
1852 """
1853 if (
1854 self.config.get("security_groups")
1855 and net.get("port_security") is not False
1856 and not self.config.get("no_port_security_extension")
1857 ):
1858 if not self.security_groups_id:
1859 self._get_ids_from_name()
1860
1861 port_dict["security_groups"] = self.security_groups_id
1862
1863 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1864 """Fill up the network binding depending on network type in the port_dict.
1865
1866 Args:
1867 net (dict): Network details
1868 port_dict (dict): Port details
1869
1870 """
1871 if not net.get("type"):
1872 raise vimconn.VimConnException("Type is missing in the network details.")
1873
1874 if net["type"] == "virtual":
1875 pass
1876
1877 # For VF
1878 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1879
1880 port_dict["binding:vnic_type"] = "direct"
1881
1882 # VIO specific Changes
1883 if self.vim_type == "VIO":
1884 # Need to create port with port_security_enabled = False and no-security-groups
1885 port_dict["port_security_enabled"] = False
1886 port_dict["provider_security_groups"] = []
1887 port_dict["security_groups"] = []
1888
1889 else:
1890 # For PT PCI-PASSTHROUGH
1891 port_dict["binding:vnic_type"] = "direct-physical"
1892
1893 @staticmethod
1894 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1895 """Set the "ip" parameter in net dictionary.
1896
1897 Args:
1898 new_port (dict): New created port
1899 net (dict): Network details
1900
1901 """
1902 fixed_ips = new_port["port"].get("fixed_ips")
1903
1904 if fixed_ips:
1905 net["ip"] = fixed_ips[0].get("ip_address")
1906 else:
1907 net["ip"] = None
1908
1909 @staticmethod
1910 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1911 """Fill up the mac_address and fixed_ips in port_dict.
1912
1913 Args:
1914 net (dict): Network details
1915 port_dict (dict): Port details
1916
1917 """
1918 if net.get("mac_address"):
1919 port_dict["mac_address"] = net["mac_address"]
1920
1921 if net.get("ip_address"):
1922 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1923 # TODO add "subnet_id": <subnet_id>
1924
1925 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1926 """Create new port using neutron.
1927
1928 Args:
1929 port_dict (dict): Port details
1930 created_items (dict): All created items
1931 net (dict): Network details
1932
1933 Returns:
1934 new_port (dict): New created port
1935
1936 """
1937 new_port = self.neutron.create_port({"port": port_dict})
1938 created_items["port:" + str(new_port["port"]["id"])] = True
1939 net["mac_adress"] = new_port["port"]["mac_address"]
1940 net["vim_id"] = new_port["port"]["id"]
1941
1942 return new_port
1943
1944 def _create_port(
1945 self, net: dict, name: str, created_items: dict
1946 ) -> Tuple[dict, dict]:
1947 """Create port using net details.
1948
1949 Args:
1950 net (dict): Network details
1951 name (str): Name to be used as network name if net dict does not include name
1952 created_items (dict): All created items
1953
1954 Returns:
1955 new_port, port New created port, port dictionary
1956
1957 """
1958
1959 port_dict = {
1960 "network_id": net["net_id"],
1961 "name": net.get("name"),
1962 "admin_state_up": True,
1963 }
1964
1965 if not port_dict["name"]:
1966 port_dict["name"] = name
1967
1968 self._prepare_port_dict_security_groups(net, port_dict)
1969
1970 self._prepare_port_dict_binding(net, port_dict)
1971
1972 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1973
1974 new_port = self._create_new_port(port_dict, created_items, net)
1975
1976 vimconnector._set_fixed_ip(new_port, net)
1977
1978 port = {"port-id": new_port["port"]["id"]}
1979
1980 if float(self.nova.api_version.get_string()) >= 2.32:
1981 port["tag"] = new_port["port"]["name"]
1982
1983 return new_port, port
1984
1985 def _prepare_network_for_vminstance(
1986 self,
1987 name: str,
1988 net_list: list,
1989 created_items: dict,
1990 net_list_vim: list,
1991 external_network: list,
1992 no_secured_ports: list,
1993 ) -> None:
1994 """Create port and fill up net dictionary for new VM instance creation.
1995
1996 Args:
1997 name (str): Name of network
1998 net_list (list): List of networks
1999 created_items (dict): All created items belongs to a VM
2000 net_list_vim (list): List of ports
2001 external_network (list): List of external-networks
2002 no_secured_ports (list): Port security disabled ports
2003 """
2004
2005 self._reload_connection()
2006
2007 for net in net_list:
2008 # Skip non-connected iface
2009 if not net.get("net_id"):
2010 continue
2011
2012 new_port, port = self._create_port(net, name, created_items)
2013
2014 net_list_vim.append(port)
2015
2016 if net.get("floating_ip", False):
2017 net["exit_on_floating_ip_error"] = True
2018 external_network.append(net)
2019
2020 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2021 net["exit_on_floating_ip_error"] = False
2022 external_network.append(net)
2023 net["floating_ip"] = self.config.get("use_floating_ip")
2024
2025 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2026 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2027 if net.get("port_security") is False and not self.config.get(
2028 "no_port_security_extension"
2029 ):
2030 no_secured_ports.append(
2031 (
2032 new_port["port"]["id"],
2033 net.get("port_security_disable_strategy"),
2034 )
2035 )
2036
2037 def _prepare_persistent_root_volumes(
2038 self,
2039 name: str,
2040 vm_av_zone: list,
2041 disk: dict,
2042 base_disk_index: int,
2043 block_device_mapping: dict,
2044 existing_vim_volumes: list,
2045 created_items: dict,
2046 ) -> Optional[str]:
2047 """Prepare persistent root volumes for new VM instance.
2048
2049 Args:
2050 name (str): Name of VM instance
2051 vm_av_zone (list): List of availability zones
2052 disk (dict): Disk details
2053 base_disk_index (int): Disk index
2054 block_device_mapping (dict): Block device details
2055 existing_vim_volumes (list): Existing disk details
2056 created_items (dict): All created items belongs to VM
2057
2058 Returns:
2059 boot_volume_id (str): ID of boot volume
2060
2061 """
2062 # Disk may include only vim_volume_id or only vim_id."
2063 # Use existing persistent root volume finding with volume_id or vim_id
2064 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2065
2066 if disk.get(key_id):
2067
2068 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2069 existing_vim_volumes.append({"id": disk[key_id]})
2070
2071 else:
2072 # Create persistent root volume
2073 volume = self.cinder.volumes.create(
2074 size=disk["size"],
2075 name=name + "vd" + chr(base_disk_index),
2076 imageRef=disk["image_id"],
2077 # Make sure volume is in the same AZ as the VM to be attached to
2078 availability_zone=vm_av_zone,
2079 )
2080 boot_volume_id = volume.id
2081 self.update_block_device_mapping(
2082 volume=volume,
2083 block_device_mapping=block_device_mapping,
2084 base_disk_index=base_disk_index,
2085 disk=disk,
2086 created_items=created_items,
2087 )
2088
2089 return boot_volume_id
2090
2091 @staticmethod
2092 def update_block_device_mapping(
2093 volume: object,
2094 block_device_mapping: dict,
2095 base_disk_index: int,
2096 disk: dict,
2097 created_items: dict,
2098 ) -> None:
2099 """Add volume information to block device mapping dict.
2100 Args:
2101 volume (object): Created volume object
2102 block_device_mapping (dict): Block device details
2103 base_disk_index (int): Disk index
2104 disk (dict): Disk details
2105 created_items (dict): All created items belongs to VM
2106 """
2107 if not volume:
2108 raise vimconn.VimConnException("Volume is empty.")
2109
2110 if not hasattr(volume, "id"):
2111 raise vimconn.VimConnException(
2112 "Created volume is not valid, does not have id attribute."
2113 )
2114
2115 volume_txt = "volume:" + str(volume.id)
2116 if disk.get("keep"):
2117 volume_txt += ":keep"
2118 created_items[volume_txt] = True
2119 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2120
2121 def _prepare_non_root_persistent_volumes(
2122 self,
2123 name: str,
2124 disk: dict,
2125 vm_av_zone: list,
2126 block_device_mapping: dict,
2127 base_disk_index: int,
2128 existing_vim_volumes: list,
2129 created_items: dict,
2130 ) -> None:
2131 """Prepare persistent volumes for new VM instance.
2132
2133 Args:
2134 name (str): Name of VM instance
2135 disk (dict): Disk details
2136 vm_av_zone (list): List of availability zones
2137 block_device_mapping (dict): Block device details
2138 base_disk_index (int): Disk index
2139 existing_vim_volumes (list): Existing disk details
2140 created_items (dict): All created items belongs to VM
2141 """
2142 # Non-root persistent volumes
2143 # Disk may include only vim_volume_id or only vim_id."
2144 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2145
2146 if disk.get(key_id):
2147
2148 # Use existing persistent volume
2149 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2150 existing_vim_volumes.append({"id": disk[key_id]})
2151
2152 else:
2153 # Create persistent volume
2154 volume = self.cinder.volumes.create(
2155 size=disk["size"],
2156 name=name + "vd" + chr(base_disk_index),
2157 # Make sure volume is in the same AZ as the VM to be attached to
2158 availability_zone=vm_av_zone,
2159 )
2160 self.update_block_device_mapping(
2161 volume=volume,
2162 block_device_mapping=block_device_mapping,
2163 base_disk_index=base_disk_index,
2164 disk=disk,
2165 created_items=created_items,
2166 )
2167
2168 def _wait_for_created_volumes_availability(
2169 self, elapsed_time: int, created_items: dict
2170 ) -> Optional[int]:
2171 """Wait till created volumes become available.
2172
2173 Args:
2174 elapsed_time (int): Passed time while waiting
2175 created_items (dict): All created items belongs to VM
2176
2177 Returns:
2178 elapsed_time (int): Time spent while waiting
2179
2180 """
2181
2182 while elapsed_time < volume_timeout:
2183 for created_item in created_items:
2184 v, volume_id = (
2185 created_item.split(":")[0],
2186 created_item.split(":")[1],
2187 )
2188 if v == "volume":
2189 if self.cinder.volumes.get(volume_id).status != "available":
2190 break
2191 else:
2192 # All ready: break from while
2193 break
2194
2195 time.sleep(5)
2196 elapsed_time += 5
2197
2198 return elapsed_time
2199
2200 def _wait_for_existing_volumes_availability(
2201 self, elapsed_time: int, existing_vim_volumes: list
2202 ) -> Optional[int]:
2203 """Wait till existing volumes become available.
2204
2205 Args:
2206 elapsed_time (int): Passed time while waiting
2207 existing_vim_volumes (list): Existing volume details
2208
2209 Returns:
2210 elapsed_time (int): Time spent while waiting
2211
2212 """
2213
2214 while elapsed_time < volume_timeout:
2215 for volume in existing_vim_volumes:
2216 if self.cinder.volumes.get(volume["id"]).status != "available":
2217 break
2218 else: # all ready: break from while
2219 break
2220
2221 time.sleep(5)
2222 elapsed_time += 5
2223
2224 return elapsed_time
2225
2226 def _prepare_disk_for_vminstance(
2227 self,
2228 name: str,
2229 existing_vim_volumes: list,
2230 created_items: dict,
2231 vm_av_zone: list,
2232 block_device_mapping: dict,
2233 disk_list: list = None,
2234 ) -> None:
2235 """Prepare all volumes for new VM instance.
2236
2237 Args:
2238 name (str): Name of Instance
2239 existing_vim_volumes (list): List of existing volumes
2240 created_items (dict): All created items belongs to VM
2241 vm_av_zone (list): VM availability zone
2242 block_device_mapping (dict): Block devices to be attached to VM
2243 disk_list (list): List of disks
2244
2245 """
2246 # Create additional volumes in case these are present in disk_list
2247 base_disk_index = ord("b")
2248 boot_volume_id = None
2249 elapsed_time = 0
2250
2251 for disk in disk_list:
2252 if "image_id" in disk:
2253 # Root persistent volume
2254 base_disk_index = ord("a")
2255 boot_volume_id = self._prepare_persistent_root_volumes(
2256 name=name,
2257 vm_av_zone=vm_av_zone,
2258 disk=disk,
2259 base_disk_index=base_disk_index,
2260 block_device_mapping=block_device_mapping,
2261 existing_vim_volumes=existing_vim_volumes,
2262 created_items=created_items,
2263 )
2264 else:
2265 # Non-root persistent volume
2266 self._prepare_non_root_persistent_volumes(
2267 name=name,
2268 disk=disk,
2269 vm_av_zone=vm_av_zone,
2270 block_device_mapping=block_device_mapping,
2271 base_disk_index=base_disk_index,
2272 existing_vim_volumes=existing_vim_volumes,
2273 created_items=created_items,
2274 )
2275 base_disk_index += 1
2276
2277 # Wait until created volumes are with status available
2278 elapsed_time = self._wait_for_created_volumes_availability(
2279 elapsed_time, created_items
2280 )
2281 # Wait until existing volumes in vim are with status available
2282 elapsed_time = self._wait_for_existing_volumes_availability(
2283 elapsed_time, existing_vim_volumes
2284 )
2285 # If we exceeded the timeout rollback
2286 if elapsed_time >= volume_timeout:
2287 raise vimconn.VimConnException(
2288 "Timeout creating volumes for instance " + name,
2289 http_code=vimconn.HTTP_Request_Timeout,
2290 )
2291 if boot_volume_id:
2292 self.cinder.volumes.set_bootable(boot_volume_id, True)
2293
2294 def _find_the_external_network_for_floating_ip(self):
2295 """Get the external network ip in order to create floating IP.
2296
2297 Returns:
2298 pool_id (str): External network pool ID
2299
2300 """
2301
2302 # Find the external network
2303 external_nets = list()
2304
2305 for net in self.neutron.list_networks()["networks"]:
2306 if net["router:external"]:
2307 external_nets.append(net)
2308
2309 if len(external_nets) == 0:
2310 raise vimconn.VimConnException(
2311 "Cannot create floating_ip automatically since "
2312 "no external network is present",
2313 http_code=vimconn.HTTP_Conflict,
2314 )
2315
2316 if len(external_nets) > 1:
2317 raise vimconn.VimConnException(
2318 "Cannot create floating_ip automatically since "
2319 "multiple external networks are present",
2320 http_code=vimconn.HTTP_Conflict,
2321 )
2322
2323 # Pool ID
2324 return external_nets[0].get("id")
2325
2326 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2327 """Trigger neutron to create a new floating IP using external network ID.
2328
2329 Args:
2330 param (dict): Input parameters to create a floating IP
2331 created_items (dict): All created items belongs to new VM instance
2332
2333 Raises:
2334
2335 VimConnException
2336 """
2337 try:
2338 self.logger.debug("Creating floating IP")
2339 new_floating_ip = self.neutron.create_floatingip(param)
2340 free_floating_ip = new_floating_ip["floatingip"]["id"]
2341 created_items["floating_ip:" + str(free_floating_ip)] = True
2342
2343 except Exception as e:
2344 raise vimconn.VimConnException(
2345 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2346 http_code=vimconn.HTTP_Conflict,
2347 )
2348
2349 def _create_floating_ip(
2350 self, floating_network: dict, server: object, created_items: dict
2351 ) -> None:
2352 """Get the available Pool ID and create a new floating IP.
2353
2354 Args:
2355 floating_network (dict): Dict including external network ID
2356 server (object): Server object
2357 created_items (dict): All created items belongs to new VM instance
2358
2359 """
2360
2361 # Pool_id is available
2362 if (
2363 isinstance(floating_network["floating_ip"], str)
2364 and floating_network["floating_ip"].lower() != "true"
2365 ):
2366 pool_id = floating_network["floating_ip"]
2367
2368 # Find the Pool_id
2369 else:
2370 pool_id = self._find_the_external_network_for_floating_ip()
2371
2372 param = {
2373 "floatingip": {
2374 "floating_network_id": pool_id,
2375 "tenant_id": server.tenant_id,
2376 }
2377 }
2378
2379 self._neutron_create_float_ip(param, created_items)
2380
2381 def _find_floating_ip(
2382 self,
2383 server: object,
2384 floating_ips: list,
2385 floating_network: dict,
2386 ) -> Optional[str]:
2387 """Find the available free floating IPs if there are.
2388
2389 Args:
2390 server (object): Server object
2391 floating_ips (list): List of floating IPs
2392 floating_network (dict): Details of floating network such as ID
2393
2394 Returns:
2395 free_floating_ip (str): Free floating ip address
2396
2397 """
2398 for fip in floating_ips:
2399 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2400 continue
2401
2402 if isinstance(floating_network["floating_ip"], str):
2403 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2404 continue
2405
2406 return fip["id"]
2407
2408 def _assign_floating_ip(
2409 self, free_floating_ip: str, floating_network: dict
2410 ) -> Dict:
2411 """Assign the free floating ip address to port.
2412
2413 Args:
2414 free_floating_ip (str): Floating IP to be assigned
2415 floating_network (dict): ID of floating network
2416
2417 Returns:
2418 fip (dict) (dict): Floating ip details
2419
2420 """
2421 # The vim_id key contains the neutron.port_id
2422 self.neutron.update_floatingip(
2423 free_floating_ip,
2424 {"floatingip": {"port_id": floating_network["vim_id"]}},
2425 )
2426 # For race condition ensure not re-assigned to other VM after 5 seconds
2427 time.sleep(5)
2428
2429 return self.neutron.show_floatingip(free_floating_ip)
2430
2431 def _get_free_floating_ip(
2432 self, server: object, floating_network: dict
2433 ) -> Optional[str]:
2434 """Get the free floating IP address.
2435
2436 Args:
2437 server (object): Server Object
2438 floating_network (dict): Floating network details
2439
2440 Returns:
2441 free_floating_ip (str): Free floating ip addr
2442
2443 """
2444
2445 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2446
2447 # Randomize
2448 random.shuffle(floating_ips)
2449
2450 return self._find_floating_ip(server, floating_ips, floating_network)
2451
2452 def _prepare_external_network_for_vminstance(
2453 self,
2454 external_network: list,
2455 server: object,
2456 created_items: dict,
2457 vm_start_time: float,
2458 ) -> None:
2459 """Assign floating IP address for VM instance.
2460
2461 Args:
2462 external_network (list): ID of External network
2463 server (object): Server Object
2464 created_items (dict): All created items belongs to new VM instance
2465 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2466
2467 Raises:
2468 VimConnException
2469
2470 """
2471 for floating_network in external_network:
2472 try:
2473 assigned = False
2474 floating_ip_retries = 3
2475 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2476 # several times
2477 while not assigned:
2478
2479 free_floating_ip = self._get_free_floating_ip(
2480 server, floating_network
2481 )
2482
2483 if not free_floating_ip:
2484 self._create_floating_ip(
2485 floating_network, server, created_items
2486 )
2487
2488 try:
2489 # For race condition ensure not already assigned
2490 fip = self.neutron.show_floatingip(free_floating_ip)
2491
2492 if fip["floatingip"].get("port_id"):
2493 continue
2494
2495 # Assign floating ip
2496 fip = self._assign_floating_ip(
2497 free_floating_ip, floating_network
2498 )
2499
2500 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2501 self.logger.warning(
2502 "floating_ip {} re-assigned to other port".format(
2503 free_floating_ip
2504 )
2505 )
2506 continue
2507
2508 self.logger.debug(
2509 "Assigned floating_ip {} to VM {}".format(
2510 free_floating_ip, server.id
2511 )
2512 )
2513
2514 assigned = True
2515
2516 except Exception as e:
2517 # Openstack need some time after VM creation to assign an IP. So retry if fails
2518 vm_status = self.nova.servers.get(server.id).status
2519
2520 if vm_status not in ("ACTIVE", "ERROR"):
2521 if time.time() - vm_start_time < server_timeout:
2522 time.sleep(5)
2523 continue
2524 elif floating_ip_retries > 0:
2525 floating_ip_retries -= 1
2526 continue
2527
2528 raise vimconn.VimConnException(
2529 "Cannot create floating_ip: {} {}".format(
2530 type(e).__name__, e
2531 ),
2532 http_code=vimconn.HTTP_Conflict,
2533 )
2534
2535 except Exception as e:
2536 if not floating_network["exit_on_floating_ip_error"]:
2537 self.logger.error("Cannot create floating_ip. %s", str(e))
2538 continue
2539
2540 raise
2541
2542 def _update_port_security_for_vminstance(
2543 self,
2544 no_secured_ports: list,
2545 server: object,
2546 ) -> None:
2547 """Updates the port security according to no_secured_ports list.
2548
2549 Args:
2550 no_secured_ports (list): List of ports that security will be disabled
2551 server (object): Server Object
2552
2553 Raises:
2554 VimConnException
2555
2556 """
2557 # Wait until the VM is active and then disable the port-security
2558 if no_secured_ports:
2559 self.__wait_for_vm(server.id, "ACTIVE")
2560
2561 for port in no_secured_ports:
2562 port_update = {
2563 "port": {"port_security_enabled": False, "security_groups": None}
2564 }
2565
2566 if port[1] == "allow-address-pairs":
2567 port_update = {
2568 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2569 }
2570
2571 try:
2572 self.neutron.update_port(port[0], port_update)
2573
2574 except Exception:
2575
2576 raise vimconn.VimConnException(
2577 "It was not possible to disable port security for port {}".format(
2578 port[0]
2579 )
2580 )
2581
2582 def new_vminstance(
2583 self,
2584 name: str,
2585 description: str,
2586 start: bool,
2587 image_id: str,
2588 flavor_id: str,
2589 affinity_group_list: list,
2590 net_list: list,
2591 cloud_config=None,
2592 disk_list=None,
2593 availability_zone_index=None,
2594 availability_zone_list=None,
2595 ) -> tuple:
2596 """Adds a VM instance to VIM.
2597
2598 Args:
2599 name (str): name of VM
2600 description (str): description
2601 start (bool): indicates if VM must start or boot in pause mode. Ignored
2602 image_id (str) image uuid
2603 flavor_id (str) flavor uuid
2604 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2605 net_list (list): list of interfaces, each one is a dictionary with:
2606 name: name of network
2607 net_id: network uuid to connect
2608 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2609 model: interface model, ignored #TODO
2610 mac_address: used for SR-IOV ifaces #TODO for other types
2611 use: 'data', 'bridge', 'mgmt'
2612 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2613 vim_id: filled/added by this function
2614 floating_ip: True/False (or it can be None)
2615 port_security: True/False
2616 cloud_config (dict): (optional) dictionary with:
2617 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2618 users: (optional) list of users to be inserted, each item is a dict with:
2619 name: (mandatory) user name,
2620 key-pairs: (optional) list of strings with the public key to be inserted to the user
2621 user-data: (optional) string is a text script to be passed directly to cloud-init
2622 config-files: (optional). List of files to be transferred. Each item is a dict with:
2623 dest: (mandatory) string with the destination absolute path
2624 encoding: (optional, by default text). Can be one of:
2625 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2626 content : (mandatory) string with the content of the file
2627 permissions: (optional) string with file permissions, typically octal notation '0644'
2628 owner: (optional) file owner, string with the format 'owner:group'
2629 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2630 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2631 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2632 size: (mandatory) string with the size of the disk in GB
2633 vim_id: (optional) should use this existing volume id
2634 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2635 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2636 availability_zone_index is None
2637 #TODO ip, security groups
2638
2639 Returns:
2640 A tuple with the instance identifier and created_items or raises an exception on error
2641 created_items can be None or a dictionary where this method can include key-values that will be passed to
2642 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2643 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2644 as not present.
2645
2646 """
2647 self.logger.debug(
2648 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2649 image_id,
2650 flavor_id,
2651 str(net_list),
2652 )
2653
2654 try:
2655 server = None
2656 created_items = {}
2657 net_list_vim = []
2658 # list of external networks to be connected to instance, later on used to create floating_ip
2659 external_network = []
2660 # List of ports with port-security disabled
2661 no_secured_ports = []
2662 block_device_mapping = {}
2663 existing_vim_volumes = []
2664 server_group_id = None
2665 scheduller_hints = {}
2666
2667 # Check the Openstack Connection
2668 self._reload_connection()
2669
2670 # Prepare network list
2671 self._prepare_network_for_vminstance(
2672 name=name,
2673 net_list=net_list,
2674 created_items=created_items,
2675 net_list_vim=net_list_vim,
2676 external_network=external_network,
2677 no_secured_ports=no_secured_ports,
2678 )
2679
2680 # Cloud config
2681 config_drive, userdata = self._create_user_data(cloud_config)
2682
2683 # Get availability Zone
2684 vm_av_zone = self._get_vm_availability_zone(
2685 availability_zone_index, availability_zone_list
2686 )
2687
2688 if disk_list:
2689 # Prepare disks
2690 self._prepare_disk_for_vminstance(
2691 name=name,
2692 existing_vim_volumes=existing_vim_volumes,
2693 created_items=created_items,
2694 vm_av_zone=vm_av_zone,
2695 block_device_mapping=block_device_mapping,
2696 disk_list=disk_list,
2697 )
2698
2699 if affinity_group_list:
2700 # Only first id on the list will be used. Openstack restriction
2701 server_group_id = affinity_group_list[0]["affinity_group_id"]
2702 scheduller_hints["group"] = server_group_id
2703
2704 self.logger.debug(
2705 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2706 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2707 "block_device_mapping={}, server_group={})".format(
2708 name,
2709 image_id,
2710 flavor_id,
2711 net_list_vim,
2712 self.config.get("security_groups"),
2713 vm_av_zone,
2714 self.config.get("keypair"),
2715 userdata,
2716 config_drive,
2717 block_device_mapping,
2718 server_group_id,
2719 )
2720 )
2721
2722 # Create VM
2723 server = self.nova.servers.create(
2724 name=name,
2725 image=image_id,
2726 flavor=flavor_id,
2727 nics=net_list_vim,
2728 security_groups=self.config.get("security_groups"),
2729 # TODO remove security_groups in future versions. Already at neutron port
2730 availability_zone=vm_av_zone,
2731 key_name=self.config.get("keypair"),
2732 userdata=userdata,
2733 config_drive=config_drive,
2734 block_device_mapping=block_device_mapping,
2735 scheduler_hints=scheduller_hints,
2736 )
2737
2738 vm_start_time = time.time()
2739
2740 self._update_port_security_for_vminstance(no_secured_ports, server)
2741
2742 self._prepare_external_network_for_vminstance(
2743 external_network=external_network,
2744 server=server,
2745 created_items=created_items,
2746 vm_start_time=vm_start_time,
2747 )
2748
2749 return server.id, created_items
2750
2751 except Exception as e:
2752 server_id = None
2753 if server:
2754 server_id = server.id
2755
2756 try:
2757 created_items = self.remove_keep_tag_from_persistent_volumes(
2758 created_items
2759 )
2760
2761 self.delete_vminstance(server_id, created_items)
2762
2763 except Exception as e2:
2764 self.logger.error("new_vminstance rollback fail {}".format(e2))
2765
2766 self._format_exception(e)
2767
2768 @staticmethod
2769 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2770 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2771
2772 Args:
2773 created_items (dict): All created items belongs to VM
2774
2775 Returns:
2776 updated_created_items (dict): Dict which does not include keep flag for volumes.
2777
2778 """
2779 return {
2780 key.replace(":keep", ""): value for (key, value) in created_items.items()
2781 }
2782
2783 def get_vminstance(self, vm_id):
2784 """Returns the VM instance information from VIM"""
2785 # self.logger.debug("Getting VM from VIM")
2786 try:
2787 self._reload_connection()
2788 server = self.nova.servers.find(id=vm_id)
2789 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2790
2791 return server.to_dict()
2792 except (
2793 ksExceptions.ClientException,
2794 nvExceptions.ClientException,
2795 nvExceptions.NotFound,
2796 ConnectionError,
2797 ) as e:
2798 self._format_exception(e)
2799
2800 def get_vminstance_console(self, vm_id, console_type="vnc"):
2801 """
2802 Get a console for the virtual machine
2803 Params:
2804 vm_id: uuid of the VM
2805 console_type, can be:
2806 "novnc" (by default), "xvpvnc" for VNC types,
2807 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2808 Returns dict with the console parameters:
2809 protocol: ssh, ftp, http, https, ...
2810 server: usually ip address
2811 port: the http, ssh, ... port
2812 suffix: extra text, e.g. the http path and query string
2813 """
2814 self.logger.debug("Getting VM CONSOLE from VIM")
2815
2816 try:
2817 self._reload_connection()
2818 server = self.nova.servers.find(id=vm_id)
2819
2820 if console_type is None or console_type == "novnc":
2821 console_dict = server.get_vnc_console("novnc")
2822 elif console_type == "xvpvnc":
2823 console_dict = server.get_vnc_console(console_type)
2824 elif console_type == "rdp-html5":
2825 console_dict = server.get_rdp_console(console_type)
2826 elif console_type == "spice-html5":
2827 console_dict = server.get_spice_console(console_type)
2828 else:
2829 raise vimconn.VimConnException(
2830 "console type '{}' not allowed".format(console_type),
2831 http_code=vimconn.HTTP_Bad_Request,
2832 )
2833
2834 console_dict1 = console_dict.get("console")
2835
2836 if console_dict1:
2837 console_url = console_dict1.get("url")
2838
2839 if console_url:
2840 # parse console_url
2841 protocol_index = console_url.find("//")
2842 suffix_index = (
2843 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2844 )
2845 port_index = (
2846 console_url[protocol_index + 2 : suffix_index].find(":")
2847 + protocol_index
2848 + 2
2849 )
2850
2851 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2852 return (
2853 -vimconn.HTTP_Internal_Server_Error,
2854 "Unexpected response from VIM",
2855 )
2856
2857 console_dict = {
2858 "protocol": console_url[0:protocol_index],
2859 "server": console_url[protocol_index + 2 : port_index],
2860 "port": console_url[port_index:suffix_index],
2861 "suffix": console_url[suffix_index + 1 :],
2862 }
2863 protocol_index += 2
2864
2865 return console_dict
2866 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2867 except (
2868 nvExceptions.NotFound,
2869 ksExceptions.ClientException,
2870 nvExceptions.ClientException,
2871 nvExceptions.BadRequest,
2872 ConnectionError,
2873 ) as e:
2874 self._format_exception(e)
2875
2876 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2877 """Neutron delete ports by id.
2878 Args:
2879 k_id (str): Port id in the VIM
2880 """
2881 try:
2882
2883 port_dict = self.neutron.list_ports()
2884 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2885
2886 if k_id in existing_ports:
2887 self.neutron.delete_port(k_id)
2888
2889 except Exception as e:
2890
2891 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2892
2893 def _delete_volumes_by_id_wth_cinder(
2894 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2895 ) -> bool:
2896 """Cinder delete volume by id.
2897 Args:
2898 k (str): Full item name in created_items
2899 k_id (str): ID of floating ip in VIM
2900 volumes_to_hold (list): Volumes not to delete
2901 created_items (dict): All created items belongs to VM
2902 """
2903 try:
2904 if k_id in volumes_to_hold:
2905 return
2906
2907 if self.cinder.volumes.get(k_id).status != "available":
2908 return True
2909
2910 else:
2911 self.cinder.volumes.delete(k_id)
2912 created_items[k] = None
2913
2914 except Exception as e:
2915 self.logger.error(
2916 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2917 )
2918
2919 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2920 """Neutron delete floating ip by id.
2921 Args:
2922 k (str): Full item name in created_items
2923 k_id (str): ID of floating ip in VIM
2924 created_items (dict): All created items belongs to VM
2925 """
2926 try:
2927 self.neutron.delete_floatingip(k_id)
2928 created_items[k] = None
2929
2930 except Exception as e:
2931 self.logger.error(
2932 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2933 )
2934
2935 @staticmethod
2936 def _get_item_name_id(k: str) -> Tuple[str, str]:
2937 k_item, _, k_id = k.partition(":")
2938 return k_item, k_id
2939
2940 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2941 """Delete VM ports attached to the networks before deleting virtual machine.
2942 Args:
2943 created_items (dict): All created items belongs to VM
2944 """
2945
2946 for k, v in created_items.items():
2947 if not v: # skip already deleted
2948 continue
2949
2950 try:
2951 k_item, k_id = self._get_item_name_id(k)
2952 if k_item == "port":
2953 self._delete_ports_by_id_wth_neutron(k_id)
2954
2955 except Exception as e:
2956 self.logger.error(
2957 "Error deleting port: {}: {}".format(type(e).__name__, e)
2958 )
2959
2960 def _delete_created_items(
2961 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2962 ) -> bool:
2963 """Delete Volumes and floating ip if they exist in created_items."""
2964 for k, v in created_items.items():
2965 if not v: # skip already deleted
2966 continue
2967
2968 try:
2969 k_item, k_id = self._get_item_name_id(k)
2970
2971 if k_item == "volume":
2972
2973 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2974 k, k_id, volumes_to_hold, created_items
2975 )
2976
2977 if unavailable_vol:
2978 keep_waiting = True
2979
2980 elif k_item == "floating_ip":
2981
2982 self._delete_floating_ip_by_id(k, k_id, created_items)
2983
2984 except Exception as e:
2985 self.logger.error("Error deleting {}: {}".format(k, e))
2986
2987 return keep_waiting
2988
2989 @staticmethod
2990 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
2991 """Remove the volumes which has key flag from created_items
2992
2993 Args:
2994 created_items (dict): All created items belongs to VM
2995
2996 Returns:
2997 created_items (dict): Persistent volumes eliminated created_items
2998 """
2999 return {
3000 key: value
3001 for (key, value) in created_items.items()
3002 if len(key.split(":")) == 2
3003 }
3004
3005 def delete_vminstance(
3006 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3007 ) -> None:
3008 """Removes a VM instance from VIM. Returns the old identifier.
3009 Args:
3010 vm_id (str): Identifier of VM instance
3011 created_items (dict): All created items belongs to VM
3012 volumes_to_hold (list): Volumes_to_hold
3013 """
3014 if created_items is None:
3015 created_items = {}
3016 if volumes_to_hold is None:
3017 volumes_to_hold = []
3018
3019 try:
3020 created_items = self._extract_items_wth_keep_flag_from_created_items(
3021 created_items
3022 )
3023
3024 self._reload_connection()
3025
3026 # Delete VM ports attached to the networks before the virtual machine
3027 if created_items:
3028 self._delete_vm_ports_attached_to_network(created_items)
3029
3030 if vm_id:
3031 self.nova.servers.delete(vm_id)
3032
3033 # Although having detached, volumes should have in active status before deleting.
3034 # We ensure in this loop
3035 keep_waiting = True
3036 elapsed_time = 0
3037
3038 while keep_waiting and elapsed_time < volume_timeout:
3039 keep_waiting = False
3040
3041 # Delete volumes and floating IP.
3042 keep_waiting = self._delete_created_items(
3043 created_items, volumes_to_hold, keep_waiting
3044 )
3045
3046 if keep_waiting:
3047 time.sleep(1)
3048 elapsed_time += 1
3049
3050 except (
3051 nvExceptions.NotFound,
3052 ksExceptions.ClientException,
3053 nvExceptions.ClientException,
3054 ConnectionError,
3055 ) as e:
3056 self._format_exception(e)
3057
3058 def refresh_vms_status(self, vm_list):
3059 """Get the status of the virtual machines and their interfaces/ports
3060 Params: the list of VM identifiers
3061 Returns a dictionary with:
3062 vm_id: #VIM id of this Virtual Machine
3063 status: #Mandatory. Text with one of:
3064 # DELETED (not found at vim)
3065 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3066 # OTHER (Vim reported other status not understood)
3067 # ERROR (VIM indicates an ERROR status)
3068 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3069 # CREATING (on building process), ERROR
3070 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3071 #
3072 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3073 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3074 interfaces:
3075 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3076 mac_address: #Text format XX:XX:XX:XX:XX:XX
3077 vim_net_id: #network id where this interface is connected
3078 vim_interface_id: #interface/port VIM id
3079 ip_address: #null, or text with IPv4, IPv6 address
3080 compute_node: #identification of compute node where PF,VF interface is allocated
3081 pci: #PCI address of the NIC that hosts the PF,VF
3082 vlan: #physical VLAN used for VF
3083 """
3084 vm_dict = {}
3085 self.logger.debug(
3086 "refresh_vms status: Getting tenant VM instance information from VIM"
3087 )
3088
3089 for vm_id in vm_list:
3090 vm = {}
3091
3092 try:
3093 vm_vim = self.get_vminstance(vm_id)
3094
3095 if vm_vim["status"] in vmStatus2manoFormat:
3096 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3097 else:
3098 vm["status"] = "OTHER"
3099 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3100
3101 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3102 vm_vim.pop("user_data", None)
3103 vm["vim_info"] = self.serialize(vm_vim)
3104
3105 vm["interfaces"] = []
3106 if vm_vim.get("fault"):
3107 vm["error_msg"] = str(vm_vim["fault"])
3108
3109 # get interfaces
3110 try:
3111 self._reload_connection()
3112 port_dict = self.neutron.list_ports(device_id=vm_id)
3113
3114 for port in port_dict["ports"]:
3115 interface = {}
3116 interface["vim_info"] = self.serialize(port)
3117 interface["mac_address"] = port.get("mac_address")
3118 interface["vim_net_id"] = port["network_id"]
3119 interface["vim_interface_id"] = port["id"]
3120 # check if OS-EXT-SRV-ATTR:host is there,
3121 # in case of non-admin credentials, it will be missing
3122
3123 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3124 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3125
3126 interface["pci"] = None
3127
3128 # check if binding:profile is there,
3129 # in case of non-admin credentials, it will be missing
3130 if port.get("binding:profile"):
3131 if port["binding:profile"].get("pci_slot"):
3132 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3133 # the slot to 0x00
3134 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3135 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3136 pci = port["binding:profile"]["pci_slot"]
3137 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3138 interface["pci"] = pci
3139
3140 interface["vlan"] = None
3141
3142 if port.get("binding:vif_details"):
3143 interface["vlan"] = port["binding:vif_details"].get("vlan")
3144
3145 # Get vlan from network in case not present in port for those old openstacks and cases where
3146 # it is needed vlan at PT
3147 if not interface["vlan"]:
3148 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3149 network = self.neutron.show_network(port["network_id"])
3150
3151 if (
3152 network["network"].get("provider:network_type")
3153 == "vlan"
3154 ):
3155 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3156 interface["vlan"] = network["network"].get(
3157 "provider:segmentation_id"
3158 )
3159
3160 ips = []
3161 # look for floating ip address
3162 try:
3163 floating_ip_dict = self.neutron.list_floatingips(
3164 port_id=port["id"]
3165 )
3166
3167 if floating_ip_dict.get("floatingips"):
3168 ips.append(
3169 floating_ip_dict["floatingips"][0].get(
3170 "floating_ip_address"
3171 )
3172 )
3173 except Exception:
3174 pass
3175
3176 for subnet in port["fixed_ips"]:
3177 ips.append(subnet["ip_address"])
3178
3179 interface["ip_address"] = ";".join(ips)
3180 vm["interfaces"].append(interface)
3181 except Exception as e:
3182 self.logger.error(
3183 "Error getting vm interface information {}: {}".format(
3184 type(e).__name__, e
3185 ),
3186 exc_info=True,
3187 )
3188 except vimconn.VimConnNotFoundException as e:
3189 self.logger.error("Exception getting vm status: %s", str(e))
3190 vm["status"] = "DELETED"
3191 vm["error_msg"] = str(e)
3192 except vimconn.VimConnException as e:
3193 self.logger.error("Exception getting vm status: %s", str(e))
3194 vm["status"] = "VIM_ERROR"
3195 vm["error_msg"] = str(e)
3196
3197 vm_dict[vm_id] = vm
3198
3199 return vm_dict
3200
3201 def action_vminstance(self, vm_id, action_dict, created_items={}):
3202 """Send and action over a VM instance from VIM
3203 Returns None or the console dict if the action was successfully sent to the VIM"""
3204 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3205
3206 try:
3207 self._reload_connection()
3208 server = self.nova.servers.find(id=vm_id)
3209
3210 if "start" in action_dict:
3211 if action_dict["start"] == "rebuild":
3212 server.rebuild()
3213 else:
3214 if server.status == "PAUSED":
3215 server.unpause()
3216 elif server.status == "SUSPENDED":
3217 server.resume()
3218 elif server.status == "SHUTOFF":
3219 server.start()
3220 else:
3221 self.logger.debug(
3222 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3223 )
3224 raise vimconn.VimConnException(
3225 "Cannot 'start' instance while it is in active state",
3226 http_code=vimconn.HTTP_Bad_Request,
3227 )
3228
3229 elif "pause" in action_dict:
3230 server.pause()
3231 elif "resume" in action_dict:
3232 server.resume()
3233 elif "shutoff" in action_dict or "shutdown" in action_dict:
3234 self.logger.debug("server status %s", server.status)
3235 if server.status == "ACTIVE":
3236 server.stop()
3237 else:
3238 self.logger.debug("ERROR: VM is not in Active state")
3239 raise vimconn.VimConnException(
3240 "VM is not in active state, stop operation is not allowed",
3241 http_code=vimconn.HTTP_Bad_Request,
3242 )
3243 elif "forceOff" in action_dict:
3244 server.stop() # TODO
3245 elif "terminate" in action_dict:
3246 server.delete()
3247 elif "createImage" in action_dict:
3248 server.create_image()
3249 # "path":path_schema,
3250 # "description":description_schema,
3251 # "name":name_schema,
3252 # "metadata":metadata_schema,
3253 # "imageRef": id_schema,
3254 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3255 elif "rebuild" in action_dict:
3256 server.rebuild(server.image["id"])
3257 elif "reboot" in action_dict:
3258 server.reboot() # reboot_type="SOFT"
3259 elif "console" in action_dict:
3260 console_type = action_dict["console"]
3261
3262 if console_type is None or console_type == "novnc":
3263 console_dict = server.get_vnc_console("novnc")
3264 elif console_type == "xvpvnc":
3265 console_dict = server.get_vnc_console(console_type)
3266 elif console_type == "rdp-html5":
3267 console_dict = server.get_rdp_console(console_type)
3268 elif console_type == "spice-html5":
3269 console_dict = server.get_spice_console(console_type)
3270 else:
3271 raise vimconn.VimConnException(
3272 "console type '{}' not allowed".format(console_type),
3273 http_code=vimconn.HTTP_Bad_Request,
3274 )
3275
3276 try:
3277 console_url = console_dict["console"]["url"]
3278 # parse console_url
3279 protocol_index = console_url.find("//")
3280 suffix_index = (
3281 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3282 )
3283 port_index = (
3284 console_url[protocol_index + 2 : suffix_index].find(":")
3285 + protocol_index
3286 + 2
3287 )
3288
3289 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3290 raise vimconn.VimConnException(
3291 "Unexpected response from VIM " + str(console_dict)
3292 )
3293
3294 console_dict2 = {
3295 "protocol": console_url[0:protocol_index],
3296 "server": console_url[protocol_index + 2 : port_index],
3297 "port": int(console_url[port_index + 1 : suffix_index]),
3298 "suffix": console_url[suffix_index + 1 :],
3299 }
3300
3301 return console_dict2
3302 except Exception:
3303 raise vimconn.VimConnException(
3304 "Unexpected response from VIM " + str(console_dict)
3305 )
3306
3307 return None
3308 except (
3309 ksExceptions.ClientException,
3310 nvExceptions.ClientException,
3311 nvExceptions.NotFound,
3312 ConnectionError,
3313 ) as e:
3314 self._format_exception(e)
3315 # TODO insert exception vimconn.HTTP_Unauthorized
3316
3317 # ###### VIO Specific Changes #########
3318 def _generate_vlanID(self):
3319 """
3320 Method to get unused vlanID
3321 Args:
3322 None
3323 Returns:
3324 vlanID
3325 """
3326 # Get used VLAN IDs
3327 usedVlanIDs = []
3328 networks = self.get_network_list()
3329
3330 for net in networks:
3331 if net.get("provider:segmentation_id"):
3332 usedVlanIDs.append(net.get("provider:segmentation_id"))
3333
3334 used_vlanIDs = set(usedVlanIDs)
3335
3336 # find unused VLAN ID
3337 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3338 try:
3339 start_vlanid, end_vlanid = map(
3340 int, vlanID_range.replace(" ", "").split("-")
3341 )
3342
3343 for vlanID in range(start_vlanid, end_vlanid + 1):
3344 if vlanID not in used_vlanIDs:
3345 return vlanID
3346 except Exception as exp:
3347 raise vimconn.VimConnException(
3348 "Exception {} occurred while generating VLAN ID.".format(exp)
3349 )
3350 else:
3351 raise vimconn.VimConnConflictException(
3352 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3353 self.config.get("dataplane_net_vlan_range")
3354 )
3355 )
3356
3357 def _generate_multisegment_vlanID(self):
3358 """
3359 Method to get unused vlanID
3360 Args:
3361 None
3362 Returns:
3363 vlanID
3364 """
3365 # Get used VLAN IDs
3366 usedVlanIDs = []
3367 networks = self.get_network_list()
3368 for net in networks:
3369 if net.get("provider:network_type") == "vlan" and net.get(
3370 "provider:segmentation_id"
3371 ):
3372 usedVlanIDs.append(net.get("provider:segmentation_id"))
3373 elif net.get("segments"):
3374 for segment in net.get("segments"):
3375 if segment.get("provider:network_type") == "vlan" and segment.get(
3376 "provider:segmentation_id"
3377 ):
3378 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3379
3380 used_vlanIDs = set(usedVlanIDs)
3381
3382 # find unused VLAN ID
3383 for vlanID_range in self.config.get("multisegment_vlan_range"):
3384 try:
3385 start_vlanid, end_vlanid = map(
3386 int, vlanID_range.replace(" ", "").split("-")
3387 )
3388
3389 for vlanID in range(start_vlanid, end_vlanid + 1):
3390 if vlanID not in used_vlanIDs:
3391 return vlanID
3392 except Exception as exp:
3393 raise vimconn.VimConnException(
3394 "Exception {} occurred while generating VLAN ID.".format(exp)
3395 )
3396 else:
3397 raise vimconn.VimConnConflictException(
3398 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3399 self.config.get("multisegment_vlan_range")
3400 )
3401 )
3402
3403 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3404 """
3405 Method to validate user given vlanID ranges
3406 Args: None
3407 Returns: None
3408 """
3409 for vlanID_range in input_vlan_range:
3410 vlan_range = vlanID_range.replace(" ", "")
3411 # validate format
3412 vlanID_pattern = r"(\d)*-(\d)*$"
3413 match_obj = re.match(vlanID_pattern, vlan_range)
3414 if not match_obj:
3415 raise vimconn.VimConnConflictException(
3416 "Invalid VLAN range for {}: {}.You must provide "
3417 "'{}' in format [start_ID - end_ID].".format(
3418 text_vlan_range, vlanID_range, text_vlan_range
3419 )
3420 )
3421
3422 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3423 if start_vlanid <= 0:
3424 raise vimconn.VimConnConflictException(
3425 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3426 "networks valid IDs are 1 to 4094 ".format(
3427 text_vlan_range, vlanID_range
3428 )
3429 )
3430
3431 if end_vlanid > 4094:
3432 raise vimconn.VimConnConflictException(
3433 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3434 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3435 text_vlan_range, vlanID_range
3436 )
3437 )
3438
3439 if start_vlanid > end_vlanid:
3440 raise vimconn.VimConnConflictException(
3441 "Invalid VLAN range for {}: {}. You must provide '{}'"
3442 " in format start_ID - end_ID and start_ID < end_ID ".format(
3443 text_vlan_range, vlanID_range, text_vlan_range
3444 )
3445 )
3446
3447 def delete_user(self, user_id):
3448 """Delete a user from openstack VIM
3449 Returns the user identifier"""
3450 if self.debug:
3451 print("osconnector: Deleting a user from VIM")
3452
3453 try:
3454 self._reload_connection()
3455 self.keystone.users.delete(user_id)
3456
3457 return 1, user_id
3458 except ksExceptions.ConnectionError as e:
3459 error_value = -vimconn.HTTP_Bad_Request
3460 error_text = (
3461 type(e).__name__
3462 + ": "
3463 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3464 )
3465 except ksExceptions.NotFound as e:
3466 error_value = -vimconn.HTTP_Not_Found
3467 error_text = (
3468 type(e).__name__
3469 + ": "
3470 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3471 )
3472 except ksExceptions.ClientException as e: # TODO remove
3473 error_value = -vimconn.HTTP_Bad_Request
3474 error_text = (
3475 type(e).__name__
3476 + ": "
3477 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3478 )
3479
3480 # TODO insert exception vimconn.HTTP_Unauthorized
3481 # if reaching here is because an exception
3482 self.logger.debug("delete_tenant " + error_text)
3483
3484 return error_value, error_text
3485
3486 def get_hosts_info(self):
3487 """Get the information of deployed hosts
3488 Returns the hosts content"""
3489 if self.debug:
3490 print("osconnector: Getting Host info from VIM")
3491
3492 try:
3493 h_list = []
3494 self._reload_connection()
3495 hypervisors = self.nova.hypervisors.list()
3496
3497 for hype in hypervisors:
3498 h_list.append(hype.to_dict())
3499
3500 return 1, {"hosts": h_list}
3501 except nvExceptions.NotFound as e:
3502 error_value = -vimconn.HTTP_Not_Found
3503 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3504 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3505 error_value = -vimconn.HTTP_Bad_Request
3506 error_text = (
3507 type(e).__name__
3508 + ": "
3509 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3510 )
3511
3512 # TODO insert exception vimconn.HTTP_Unauthorized
3513 # if reaching here is because an exception
3514 self.logger.debug("get_hosts_info " + error_text)
3515
3516 return error_value, error_text
3517
3518 def get_hosts(self, vim_tenant):
3519 """Get the hosts and deployed instances
3520 Returns the hosts content"""
3521 r, hype_dict = self.get_hosts_info()
3522
3523 if r < 0:
3524 return r, hype_dict
3525
3526 hypervisors = hype_dict["hosts"]
3527
3528 try:
3529 servers = self.nova.servers.list()
3530 for hype in hypervisors:
3531 for server in servers:
3532 if (
3533 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3534 == hype["hypervisor_hostname"]
3535 ):
3536 if "vm" in hype:
3537 hype["vm"].append(server.id)
3538 else:
3539 hype["vm"] = [server.id]
3540
3541 return 1, hype_dict
3542 except nvExceptions.NotFound as e:
3543 error_value = -vimconn.HTTP_Not_Found
3544 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3545 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3546 error_value = -vimconn.HTTP_Bad_Request
3547 error_text = (
3548 type(e).__name__
3549 + ": "
3550 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3551 )
3552
3553 # TODO insert exception vimconn.HTTP_Unauthorized
3554 # if reaching here is because an exception
3555 self.logger.debug("get_hosts " + error_text)
3556
3557 return error_value, error_text
3558
3559 def new_classification(self, name, ctype, definition):
3560 self.logger.debug(
3561 "Adding a new (Traffic) Classification to VIM, named %s", name
3562 )
3563
3564 try:
3565 new_class = None
3566 self._reload_connection()
3567
3568 if ctype not in supportedClassificationTypes:
3569 raise vimconn.VimConnNotSupportedException(
3570 "OpenStack VIM connector does not support provided "
3571 "Classification Type {}, supported ones are: {}".format(
3572 ctype, supportedClassificationTypes
3573 )
3574 )
3575
3576 if not self._validate_classification(ctype, definition):
3577 raise vimconn.VimConnException(
3578 "Incorrect Classification definition for the type specified."
3579 )
3580
3581 classification_dict = definition
3582 classification_dict["name"] = name
3583 new_class = self.neutron.create_sfc_flow_classifier(
3584 {"flow_classifier": classification_dict}
3585 )
3586
3587 return new_class["flow_classifier"]["id"]
3588 except (
3589 neExceptions.ConnectionFailed,
3590 ksExceptions.ClientException,
3591 neExceptions.NeutronException,
3592 ConnectionError,
3593 ) as e:
3594 self.logger.error("Creation of Classification failed.")
3595 self._format_exception(e)
3596
3597 def get_classification(self, class_id):
3598 self.logger.debug(" Getting Classification %s from VIM", class_id)
3599 filter_dict = {"id": class_id}
3600 class_list = self.get_classification_list(filter_dict)
3601
3602 if len(class_list) == 0:
3603 raise vimconn.VimConnNotFoundException(
3604 "Classification '{}' not found".format(class_id)
3605 )
3606 elif len(class_list) > 1:
3607 raise vimconn.VimConnConflictException(
3608 "Found more than one Classification with this criteria"
3609 )
3610
3611 classification = class_list[0]
3612
3613 return classification
3614
3615 def get_classification_list(self, filter_dict={}):
3616 self.logger.debug(
3617 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3618 )
3619
3620 try:
3621 filter_dict_os = filter_dict.copy()
3622 self._reload_connection()
3623
3624 if self.api_version3 and "tenant_id" in filter_dict_os:
3625 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3626
3627 classification_dict = self.neutron.list_sfc_flow_classifiers(
3628 **filter_dict_os
3629 )
3630 classification_list = classification_dict["flow_classifiers"]
3631 self.__classification_os2mano(classification_list)
3632
3633 return classification_list
3634 except (
3635 neExceptions.ConnectionFailed,
3636 ksExceptions.ClientException,
3637 neExceptions.NeutronException,
3638 ConnectionError,
3639 ) as e:
3640 self._format_exception(e)
3641
3642 def delete_classification(self, class_id):
3643 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3644
3645 try:
3646 self._reload_connection()
3647 self.neutron.delete_sfc_flow_classifier(class_id)
3648
3649 return class_id
3650 except (
3651 neExceptions.ConnectionFailed,
3652 neExceptions.NeutronException,
3653 ksExceptions.ClientException,
3654 neExceptions.NeutronException,
3655 ConnectionError,
3656 ) as e:
3657 self._format_exception(e)
3658
3659 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3660 self.logger.debug(
3661 "Adding a new Service Function Instance to VIM, named '%s'", name
3662 )
3663
3664 try:
3665 new_sfi = None
3666 self._reload_connection()
3667 correlation = None
3668
3669 if sfc_encap:
3670 correlation = "nsh"
3671
3672 if len(ingress_ports) != 1:
3673 raise vimconn.VimConnNotSupportedException(
3674 "OpenStack VIM connector can only have 1 ingress port per SFI"
3675 )
3676
3677 if len(egress_ports) != 1:
3678 raise vimconn.VimConnNotSupportedException(
3679 "OpenStack VIM connector can only have 1 egress port per SFI"
3680 )
3681
3682 sfi_dict = {
3683 "name": name,
3684 "ingress": ingress_ports[0],
3685 "egress": egress_ports[0],
3686 "service_function_parameters": {"correlation": correlation},
3687 }
3688 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3689
3690 return new_sfi["port_pair"]["id"]
3691 except (
3692 neExceptions.ConnectionFailed,
3693 ksExceptions.ClientException,
3694 neExceptions.NeutronException,
3695 ConnectionError,
3696 ) as e:
3697 if new_sfi:
3698 try:
3699 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3700 except Exception:
3701 self.logger.error(
3702 "Creation of Service Function Instance failed, with "
3703 "subsequent deletion failure as well."
3704 )
3705
3706 self._format_exception(e)
3707
3708 def get_sfi(self, sfi_id):
3709 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3710 filter_dict = {"id": sfi_id}
3711 sfi_list = self.get_sfi_list(filter_dict)
3712
3713 if len(sfi_list) == 0:
3714 raise vimconn.VimConnNotFoundException(
3715 "Service Function Instance '{}' not found".format(sfi_id)
3716 )
3717 elif len(sfi_list) > 1:
3718 raise vimconn.VimConnConflictException(
3719 "Found more than one Service Function Instance with this criteria"
3720 )
3721
3722 sfi = sfi_list[0]
3723
3724 return sfi
3725
3726 def get_sfi_list(self, filter_dict={}):
3727 self.logger.debug(
3728 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3729 )
3730
3731 try:
3732 self._reload_connection()
3733 filter_dict_os = filter_dict.copy()
3734
3735 if self.api_version3 and "tenant_id" in filter_dict_os:
3736 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3737
3738 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3739 sfi_list = sfi_dict["port_pairs"]
3740 self.__sfi_os2mano(sfi_list)
3741
3742 return sfi_list
3743 except (
3744 neExceptions.ConnectionFailed,
3745 ksExceptions.ClientException,
3746 neExceptions.NeutronException,
3747 ConnectionError,
3748 ) as e:
3749 self._format_exception(e)
3750
3751 def delete_sfi(self, sfi_id):
3752 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3753
3754 try:
3755 self._reload_connection()
3756 self.neutron.delete_sfc_port_pair(sfi_id)
3757
3758 return sfi_id
3759 except (
3760 neExceptions.ConnectionFailed,
3761 neExceptions.NeutronException,
3762 ksExceptions.ClientException,
3763 neExceptions.NeutronException,
3764 ConnectionError,
3765 ) as e:
3766 self._format_exception(e)
3767
3768 def new_sf(self, name, sfis, sfc_encap=True):
3769 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3770
3771 try:
3772 new_sf = None
3773 self._reload_connection()
3774 # correlation = None
3775 # if sfc_encap:
3776 # correlation = "nsh"
3777
3778 for instance in sfis:
3779 sfi = self.get_sfi(instance)
3780
3781 if sfi.get("sfc_encap") != sfc_encap:
3782 raise vimconn.VimConnNotSupportedException(
3783 "OpenStack VIM connector requires all SFIs of the "
3784 "same SF to share the same SFC Encapsulation"
3785 )
3786
3787 sf_dict = {"name": name, "port_pairs": sfis}
3788 new_sf = self.neutron.create_sfc_port_pair_group(
3789 {"port_pair_group": sf_dict}
3790 )
3791
3792 return new_sf["port_pair_group"]["id"]
3793 except (
3794 neExceptions.ConnectionFailed,
3795 ksExceptions.ClientException,
3796 neExceptions.NeutronException,
3797 ConnectionError,
3798 ) as e:
3799 if new_sf:
3800 try:
3801 self.neutron.delete_sfc_port_pair_group(
3802 new_sf["port_pair_group"]["id"]
3803 )
3804 except Exception:
3805 self.logger.error(
3806 "Creation of Service Function failed, with "
3807 "subsequent deletion failure as well."
3808 )
3809
3810 self._format_exception(e)
3811
3812 def get_sf(self, sf_id):
3813 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3814 filter_dict = {"id": sf_id}
3815 sf_list = self.get_sf_list(filter_dict)
3816
3817 if len(sf_list) == 0:
3818 raise vimconn.VimConnNotFoundException(
3819 "Service Function '{}' not found".format(sf_id)
3820 )
3821 elif len(sf_list) > 1:
3822 raise vimconn.VimConnConflictException(
3823 "Found more than one Service Function with this criteria"
3824 )
3825
3826 sf = sf_list[0]
3827
3828 return sf
3829
3830 def get_sf_list(self, filter_dict={}):
3831 self.logger.debug(
3832 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3833 )
3834
3835 try:
3836 self._reload_connection()
3837 filter_dict_os = filter_dict.copy()
3838
3839 if self.api_version3 and "tenant_id" in filter_dict_os:
3840 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3841
3842 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3843 sf_list = sf_dict["port_pair_groups"]
3844 self.__sf_os2mano(sf_list)
3845
3846 return sf_list
3847 except (
3848 neExceptions.ConnectionFailed,
3849 ksExceptions.ClientException,
3850 neExceptions.NeutronException,
3851 ConnectionError,
3852 ) as e:
3853 self._format_exception(e)
3854
3855 def delete_sf(self, sf_id):
3856 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3857
3858 try:
3859 self._reload_connection()
3860 self.neutron.delete_sfc_port_pair_group(sf_id)
3861
3862 return sf_id
3863 except (
3864 neExceptions.ConnectionFailed,
3865 neExceptions.NeutronException,
3866 ksExceptions.ClientException,
3867 neExceptions.NeutronException,
3868 ConnectionError,
3869 ) as e:
3870 self._format_exception(e)
3871
3872 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3873 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3874
3875 try:
3876 new_sfp = None
3877 self._reload_connection()
3878 # In networking-sfc the MPLS encapsulation is legacy
3879 # should be used when no full SFC Encapsulation is intended
3880 correlation = "mpls"
3881
3882 if sfc_encap:
3883 correlation = "nsh"
3884
3885 sfp_dict = {
3886 "name": name,
3887 "flow_classifiers": classifications,
3888 "port_pair_groups": sfs,
3889 "chain_parameters": {"correlation": correlation},
3890 }
3891
3892 if spi:
3893 sfp_dict["chain_id"] = spi
3894
3895 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3896
3897 return new_sfp["port_chain"]["id"]
3898 except (
3899 neExceptions.ConnectionFailed,
3900 ksExceptions.ClientException,
3901 neExceptions.NeutronException,
3902 ConnectionError,
3903 ) as e:
3904 if new_sfp:
3905 try:
3906 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3907 except Exception:
3908 self.logger.error(
3909 "Creation of Service Function Path failed, with "
3910 "subsequent deletion failure as well."
3911 )
3912
3913 self._format_exception(e)
3914
3915 def get_sfp(self, sfp_id):
3916 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3917
3918 filter_dict = {"id": sfp_id}
3919 sfp_list = self.get_sfp_list(filter_dict)
3920
3921 if len(sfp_list) == 0:
3922 raise vimconn.VimConnNotFoundException(
3923 "Service Function Path '{}' not found".format(sfp_id)
3924 )
3925 elif len(sfp_list) > 1:
3926 raise vimconn.VimConnConflictException(
3927 "Found more than one Service Function Path with this criteria"
3928 )
3929
3930 sfp = sfp_list[0]
3931
3932 return sfp
3933
3934 def get_sfp_list(self, filter_dict={}):
3935 self.logger.debug(
3936 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3937 )
3938
3939 try:
3940 self._reload_connection()
3941 filter_dict_os = filter_dict.copy()
3942
3943 if self.api_version3 and "tenant_id" in filter_dict_os:
3944 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3945
3946 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3947 sfp_list = sfp_dict["port_chains"]
3948 self.__sfp_os2mano(sfp_list)
3949
3950 return sfp_list
3951 except (
3952 neExceptions.ConnectionFailed,
3953 ksExceptions.ClientException,
3954 neExceptions.NeutronException,
3955 ConnectionError,
3956 ) as e:
3957 self._format_exception(e)
3958
3959 def delete_sfp(self, sfp_id):
3960 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3961
3962 try:
3963 self._reload_connection()
3964 self.neutron.delete_sfc_port_chain(sfp_id)
3965
3966 return sfp_id
3967 except (
3968 neExceptions.ConnectionFailed,
3969 neExceptions.NeutronException,
3970 ksExceptions.ClientException,
3971 neExceptions.NeutronException,
3972 ConnectionError,
3973 ) as e:
3974 self._format_exception(e)
3975
3976 def refresh_sfps_status(self, sfp_list):
3977 """Get the status of the service function path
3978 Params: the list of sfp identifiers
3979 Returns a dictionary with:
3980 vm_id: #VIM id of this service function path
3981 status: #Mandatory. Text with one of:
3982 # DELETED (not found at vim)
3983 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3984 # OTHER (Vim reported other status not understood)
3985 # ERROR (VIM indicates an ERROR status)
3986 # ACTIVE,
3987 # CREATING (on building process)
3988 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3989 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3990 """
3991 sfp_dict = {}
3992 self.logger.debug(
3993 "refresh_sfps status: Getting tenant SFP information from VIM"
3994 )
3995
3996 for sfp_id in sfp_list:
3997 sfp = {}
3998
3999 try:
4000 sfp_vim = self.get_sfp(sfp_id)
4001
4002 if sfp_vim["spi"]:
4003 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4004 else:
4005 sfp["status"] = "OTHER"
4006 sfp["error_msg"] = "VIM status reported " + sfp["status"]
4007
4008 sfp["vim_info"] = self.serialize(sfp_vim)
4009
4010 if sfp_vim.get("fault"):
4011 sfp["error_msg"] = str(sfp_vim["fault"])
4012 except vimconn.VimConnNotFoundException as e:
4013 self.logger.error("Exception getting sfp status: %s", str(e))
4014 sfp["status"] = "DELETED"
4015 sfp["error_msg"] = str(e)
4016 except vimconn.VimConnException as e:
4017 self.logger.error("Exception getting sfp status: %s", str(e))
4018 sfp["status"] = "VIM_ERROR"
4019 sfp["error_msg"] = str(e)
4020
4021 sfp_dict[sfp_id] = sfp
4022
4023 return sfp_dict
4024
4025 def refresh_sfis_status(self, sfi_list):
4026 """Get the status of the service function instances
4027 Params: the list of sfi identifiers
4028 Returns a dictionary with:
4029 vm_id: #VIM id of this service function instance
4030 status: #Mandatory. Text with one of:
4031 # DELETED (not found at vim)
4032 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4033 # OTHER (Vim reported other status not understood)
4034 # ERROR (VIM indicates an ERROR status)
4035 # ACTIVE,
4036 # CREATING (on building process)
4037 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4038 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4039 """
4040 sfi_dict = {}
4041 self.logger.debug(
4042 "refresh_sfis status: Getting tenant sfi information from VIM"
4043 )
4044
4045 for sfi_id in sfi_list:
4046 sfi = {}
4047
4048 try:
4049 sfi_vim = self.get_sfi(sfi_id)
4050
4051 if sfi_vim:
4052 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4053 else:
4054 sfi["status"] = "OTHER"
4055 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4056
4057 sfi["vim_info"] = self.serialize(sfi_vim)
4058
4059 if sfi_vim.get("fault"):
4060 sfi["error_msg"] = str(sfi_vim["fault"])
4061 except vimconn.VimConnNotFoundException as e:
4062 self.logger.error("Exception getting sfi status: %s", str(e))
4063 sfi["status"] = "DELETED"
4064 sfi["error_msg"] = str(e)
4065 except vimconn.VimConnException as e:
4066 self.logger.error("Exception getting sfi status: %s", str(e))
4067 sfi["status"] = "VIM_ERROR"
4068 sfi["error_msg"] = str(e)
4069
4070 sfi_dict[sfi_id] = sfi
4071
4072 return sfi_dict
4073
4074 def refresh_sfs_status(self, sf_list):
4075 """Get the status of the service functions
4076 Params: the list of sf identifiers
4077 Returns a dictionary with:
4078 vm_id: #VIM id of this service function
4079 status: #Mandatory. Text with one of:
4080 # DELETED (not found at vim)
4081 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4082 # OTHER (Vim reported other status not understood)
4083 # ERROR (VIM indicates an ERROR status)
4084 # ACTIVE,
4085 # CREATING (on building process)
4086 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4087 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4088 """
4089 sf_dict = {}
4090 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4091
4092 for sf_id in sf_list:
4093 sf = {}
4094
4095 try:
4096 sf_vim = self.get_sf(sf_id)
4097
4098 if sf_vim:
4099 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4100 else:
4101 sf["status"] = "OTHER"
4102 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4103
4104 sf["vim_info"] = self.serialize(sf_vim)
4105
4106 if sf_vim.get("fault"):
4107 sf["error_msg"] = str(sf_vim["fault"])
4108 except vimconn.VimConnNotFoundException as e:
4109 self.logger.error("Exception getting sf status: %s", str(e))
4110 sf["status"] = "DELETED"
4111 sf["error_msg"] = str(e)
4112 except vimconn.VimConnException as e:
4113 self.logger.error("Exception getting sf status: %s", str(e))
4114 sf["status"] = "VIM_ERROR"
4115 sf["error_msg"] = str(e)
4116
4117 sf_dict[sf_id] = sf
4118
4119 return sf_dict
4120
4121 def refresh_classifications_status(self, classification_list):
4122 """Get the status of the classifications
4123 Params: the list of classification identifiers
4124 Returns a dictionary with:
4125 vm_id: #VIM id of this classifier
4126 status: #Mandatory. Text with one of:
4127 # DELETED (not found at vim)
4128 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4129 # OTHER (Vim reported other status not understood)
4130 # ERROR (VIM indicates an ERROR status)
4131 # ACTIVE,
4132 # CREATING (on building process)
4133 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4134 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4135 """
4136 classification_dict = {}
4137 self.logger.debug(
4138 "refresh_classifications status: Getting tenant classification information from VIM"
4139 )
4140
4141 for classification_id in classification_list:
4142 classification = {}
4143
4144 try:
4145 classification_vim = self.get_classification(classification_id)
4146
4147 if classification_vim:
4148 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4149 else:
4150 classification["status"] = "OTHER"
4151 classification["error_msg"] = (
4152 "VIM status reported " + classification["status"]
4153 )
4154
4155 classification["vim_info"] = self.serialize(classification_vim)
4156
4157 if classification_vim.get("fault"):
4158 classification["error_msg"] = str(classification_vim["fault"])
4159 except vimconn.VimConnNotFoundException as e:
4160 self.logger.error("Exception getting classification status: %s", str(e))
4161 classification["status"] = "DELETED"
4162 classification["error_msg"] = str(e)
4163 except vimconn.VimConnException as e:
4164 self.logger.error("Exception getting classification status: %s", str(e))
4165 classification["status"] = "VIM_ERROR"
4166 classification["error_msg"] = str(e)
4167
4168 classification_dict[classification_id] = classification
4169
4170 return classification_dict
4171
4172 def new_affinity_group(self, affinity_group_data):
4173 """Adds a server group to VIM
4174 affinity_group_data contains a dictionary with information, keys:
4175 name: name in VIM for the server group
4176 type: affinity or anti-affinity
4177 scope: Only nfvi-node allowed
4178 Returns the server group identifier"""
4179 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4180
4181 try:
4182 name = affinity_group_data["name"]
4183 policy = affinity_group_data["type"]
4184
4185 self._reload_connection()
4186 new_server_group = self.nova.server_groups.create(name, policy)
4187
4188 return new_server_group.id
4189 except (
4190 ksExceptions.ClientException,
4191 nvExceptions.ClientException,
4192 ConnectionError,
4193 KeyError,
4194 ) as e:
4195 self._format_exception(e)
4196
4197 def get_affinity_group(self, affinity_group_id):
4198 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4199 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4200 try:
4201 self._reload_connection()
4202 server_group = self.nova.server_groups.find(id=affinity_group_id)
4203
4204 return server_group.to_dict()
4205 except (
4206 nvExceptions.NotFound,
4207 nvExceptions.ClientException,
4208 ksExceptions.ClientException,
4209 ConnectionError,
4210 ) as e:
4211 self._format_exception(e)
4212
4213 def delete_affinity_group(self, affinity_group_id):
4214 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4215 self.logger.debug("Getting server group '%s'", affinity_group_id)
4216 try:
4217 self._reload_connection()
4218 self.nova.server_groups.delete(affinity_group_id)
4219
4220 return affinity_group_id
4221 except (
4222 nvExceptions.NotFound,
4223 ksExceptions.ClientException,
4224 nvExceptions.ClientException,
4225 ConnectionError,
4226 ) as e:
4227 self._format_exception(e)
4228
4229 def get_vdu_state(self, vm_id):
4230 """
4231 Getting the state of a vdu
4232 param:
4233 vm_id: ID of an instance
4234 """
4235 self.logger.debug("Getting the status of VM")
4236 self.logger.debug("VIM VM ID %s", vm_id)
4237 self._reload_connection()
4238 server = self.nova.servers.find(id=vm_id)
4239 server_dict = server.to_dict()
4240 vdu_data = [
4241 server_dict["status"],
4242 server_dict["flavor"]["id"],
4243 server_dict["OS-EXT-SRV-ATTR:host"],
4244 server_dict["OS-EXT-AZ:availability_zone"],
4245 ]
4246 self.logger.debug("vdu_data %s", vdu_data)
4247 return vdu_data
4248
4249 def check_compute_availability(self, host, server_flavor_details):
4250 self._reload_connection()
4251 hypervisor_search = self.nova.hypervisors.search(
4252 hypervisor_match=host, servers=True
4253 )
4254 for hypervisor in hypervisor_search:
4255 hypervisor_id = hypervisor.to_dict()["id"]
4256 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4257 hypervisor_dict = hypervisor_details.to_dict()
4258 hypervisor_temp = json.dumps(hypervisor_dict)
4259 hypervisor_json = json.loads(hypervisor_temp)
4260 resources_available = [
4261 hypervisor_json["free_ram_mb"],
4262 hypervisor_json["disk_available_least"],
4263 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4264 ]
4265 compute_available = all(
4266 x > y for x, y in zip(resources_available, server_flavor_details)
4267 )
4268 if compute_available:
4269 return host
4270
4271 def check_availability_zone(
4272 self, old_az, server_flavor_details, old_host, host=None
4273 ):
4274 self._reload_connection()
4275 az_check = {"zone_check": False, "compute_availability": None}
4276 aggregates_list = self.nova.aggregates.list()
4277 for aggregate in aggregates_list:
4278 aggregate_details = aggregate.to_dict()
4279 aggregate_temp = json.dumps(aggregate_details)
4280 aggregate_json = json.loads(aggregate_temp)
4281 if aggregate_json["availability_zone"] == old_az:
4282 hosts_list = aggregate_json["hosts"]
4283 if host is not None:
4284 if host in hosts_list:
4285 az_check["zone_check"] = True
4286 available_compute_id = self.check_compute_availability(
4287 host, server_flavor_details
4288 )
4289 if available_compute_id is not None:
4290 az_check["compute_availability"] = available_compute_id
4291 else:
4292 for check_host in hosts_list:
4293 if check_host != old_host:
4294 available_compute_id = self.check_compute_availability(
4295 check_host, server_flavor_details
4296 )
4297 if available_compute_id is not None:
4298 az_check["zone_check"] = True
4299 az_check["compute_availability"] = available_compute_id
4300 break
4301 else:
4302 az_check["zone_check"] = True
4303 return az_check
4304
4305 def migrate_instance(self, vm_id, compute_host=None):
4306 """
4307 Migrate a vdu
4308 param:
4309 vm_id: ID of an instance
4310 compute_host: Host to migrate the vdu to
4311 """
4312 self._reload_connection()
4313 vm_state = False
4314 instance_state = self.get_vdu_state(vm_id)
4315 server_flavor_id = instance_state[1]
4316 server_hypervisor_name = instance_state[2]
4317 server_availability_zone = instance_state[3]
4318 try:
4319 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4320 server_flavor_details = [
4321 server_flavor["ram"],
4322 server_flavor["disk"],
4323 server_flavor["vcpus"],
4324 ]
4325 if compute_host == server_hypervisor_name:
4326 raise vimconn.VimConnException(
4327 "Unable to migrate instance '{}' to the same host '{}'".format(
4328 vm_id, compute_host
4329 ),
4330 http_code=vimconn.HTTP_Bad_Request,
4331 )
4332 az_status = self.check_availability_zone(
4333 server_availability_zone,
4334 server_flavor_details,
4335 server_hypervisor_name,
4336 compute_host,
4337 )
4338 availability_zone_check = az_status["zone_check"]
4339 available_compute_id = az_status.get("compute_availability")
4340
4341 if availability_zone_check is False:
4342 raise vimconn.VimConnException(
4343 "Unable to migrate instance '{}' to a different availability zone".format(
4344 vm_id
4345 ),
4346 http_code=vimconn.HTTP_Bad_Request,
4347 )
4348 if available_compute_id is not None:
4349 self.nova.servers.live_migrate(
4350 server=vm_id,
4351 host=available_compute_id,
4352 block_migration=True,
4353 disk_over_commit=False,
4354 )
4355 state = "MIGRATING"
4356 changed_compute_host = ""
4357 if state == "MIGRATING":
4358 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4359 changed_compute_host = self.get_vdu_state(vm_id)[2]
4360 if vm_state and changed_compute_host == available_compute_id:
4361 self.logger.debug(
4362 "Instance '{}' migrated to the new compute host '{}'".format(
4363 vm_id, changed_compute_host
4364 )
4365 )
4366 return state, available_compute_id
4367 else:
4368 raise vimconn.VimConnException(
4369 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4370 vm_id, available_compute_id
4371 ),
4372 http_code=vimconn.HTTP_Bad_Request,
4373 )
4374 else:
4375 raise vimconn.VimConnException(
4376 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4377 available_compute_id
4378 ),
4379 http_code=vimconn.HTTP_Bad_Request,
4380 )
4381 except (
4382 nvExceptions.BadRequest,
4383 nvExceptions.ClientException,
4384 nvExceptions.NotFound,
4385 ) as e:
4386 self._format_exception(e)
4387
4388 def resize_instance(self, vm_id, new_flavor_id):
4389 """
4390 For resizing the vm based on the given
4391 flavor details
4392 param:
4393 vm_id : ID of an instance
4394 new_flavor_id : Flavor id to be resized
4395 Return the status of a resized instance
4396 """
4397 self._reload_connection()
4398 self.logger.debug("resize the flavor of an instance")
4399 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4400 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4401 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4402 try:
4403 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4404 if old_flavor_disk > new_flavor_disk:
4405 raise nvExceptions.BadRequest(
4406 400,
4407 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4408 )
4409 else:
4410 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4411 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4412 if vm_state:
4413 instance_resized_status = self.confirm_resize(vm_id)
4414 return instance_resized_status
4415 else:
4416 raise nvExceptions.BadRequest(
4417 409,
4418 message="Cannot 'resize' vm_state is in ERROR",
4419 )
4420
4421 else:
4422 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4423 raise nvExceptions.BadRequest(
4424 409,
4425 message="Cannot 'resize' instance while it is in vm_state resized",
4426 )
4427 except (
4428 nvExceptions.BadRequest,
4429 nvExceptions.ClientException,
4430 nvExceptions.NotFound,
4431 ) as e:
4432 self._format_exception(e)
4433
4434 def confirm_resize(self, vm_id):
4435 """
4436 Confirm the resize of an instance
4437 param:
4438 vm_id: ID of an instance
4439 """
4440 self._reload_connection()
4441 self.nova.servers.confirm_resize(server=vm_id)
4442 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4443 self.__wait_for_vm(vm_id, "ACTIVE")
4444 instance_status = self.get_vdu_state(vm_id)[0]
4445 return instance_status