Fix Bug 2180 Adding VIO Numa support with unit tests
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 threads (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads = numa["paired-threads"] * 2
1298 extra_specs["hw:cpu_thread_policy"] = "require"
1299 extra_specs["hw:cpu_policy"] = "dedicated"
1300 return threads
1301
1302 @staticmethod
1303 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304 """Fill up extra_specs if numa has cores.
1305 Args:
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1308
1309 Returns:
1310 cores (int) Number of virtual cpus
1311
1312 """
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa.get("cores"):
1316 return
1317 cores = numa["cores"]
1318 extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs["hw:cpu_policy"] = "dedicated"
1320 return cores
1321
1322 @staticmethod
1323 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324 """Fill up extra_specs if numa has threads.
1325 Args:
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1328
1329 Returns:
1330 threads (int) Number of virtual cpus
1331
1332 """
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa.get("threads"):
1335 return
1336 threads = numa["threads"]
1337 extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs["hw:cpu_policy"] = "dedicated"
1339 return threads
1340
1341 def _process_numa_parameters_of_flavor(
1342 self, numas: List, extra_specs: Dict
1343 ) -> None:
1344 """Process numa parameters and fill up extra_specs.
1345
1346 Args:
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1349
1350 """
1351 numa_nodes = len(numas)
1352 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 cpu_cores, cpu_threads = 0, 0
1354
1355 if self.vim_type == "VIO":
1356 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1357
1358 for numa in numas:
1359 if "id" in numa:
1360 node_id = numa["id"]
1361 # overwrite ram and vcpus
1362 # check if key "memory" is present in numa else use ram value at flavor
1363 self.process_numa_memory(numa, node_id, extra_specs)
1364 self.process_numa_vcpu(numa, node_id, extra_specs)
1365
1366 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1367 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1368
1369 if "paired-threads" in numa:
1370 threads = self.process_numa_paired_threads(numa, extra_specs)
1371 cpu_threads += threads
1372
1373 elif "cores" in numa:
1374 cores = self.process_numa_cores(numa, extra_specs)
1375 cpu_cores += cores
1376
1377 elif "threads" in numa:
1378 threads = self.process_numa_threads(numa, extra_specs)
1379 cpu_threads += threads
1380
1381 if cpu_cores:
1382 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1383 if cpu_threads:
1384 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1385
1386 @staticmethod
1387 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1388 """According to number of numa nodes, updates the extra_specs for VIO.
1389
1390 Args:
1391
1392 numa_nodes (int): List keeps the numa node numbers
1393 extra_specs (dict): Extra specs dict to be updated
1394
1395 """
1396 # If there is not any numa, numas_nodes equals to 0.
1397 if not numa_nodes:
1398 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1399
1400 # If there are several numas, we do not define specific affinity.
1401 extra_specs["vmware:latency_sensitivity_level"] = "high"
1402
1403 def _change_flavor_name(
1404 self, name: str, name_suffix: int, flavor_data: dict
1405 ) -> str:
1406 """Change the flavor name if the name already exists.
1407
1408 Args:
1409 name (str): Flavor name to be checked
1410 name_suffix (int): Suffix to be appended to name
1411 flavor_data (dict): Flavor dict
1412
1413 Returns:
1414 name (str): New flavor name to be used
1415
1416 """
1417 # Get used names
1418 fl = self.nova.flavors.list()
1419 fl_names = [f.name for f in fl]
1420
1421 while name in fl_names:
1422 name_suffix += 1
1423 name = flavor_data["name"] + "-" + str(name_suffix)
1424
1425 return name
1426
1427 def _process_extended_config_of_flavor(
1428 self, extended: dict, extra_specs: dict
1429 ) -> None:
1430 """Process the extended dict to fill up extra_specs.
1431 Args:
1432
1433 extended (dict): Keeping the extra specification of flavor
1434 extra_specs (dict) Dict to be filled to be used during flavor creation
1435
1436 """
1437 quotas = {
1438 "cpu-quota": "cpu",
1439 "mem-quota": "memory",
1440 "vif-quota": "vif",
1441 "disk-io-quota": "disk_io",
1442 }
1443
1444 page_sizes = {
1445 "LARGE": "large",
1446 "SMALL": "small",
1447 "SIZE_2MB": "2MB",
1448 "SIZE_1GB": "1GB",
1449 "PREFER_LARGE": "any",
1450 }
1451
1452 policies = {
1453 "cpu-pinning-policy": "hw:cpu_policy",
1454 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1455 "mem-policy": "hw:numa_mempolicy",
1456 }
1457
1458 numas = extended.get("numas")
1459 if numas:
1460 self._process_numa_parameters_of_flavor(numas, extra_specs)
1461
1462 for quota, item in quotas.items():
1463 if quota in extended.keys():
1464 self.process_resource_quota(extended.get(quota), item, extra_specs)
1465
1466 # Set the mempage size as specified in the descriptor
1467 if extended.get("mempage-size"):
1468 if extended["mempage-size"] in page_sizes.keys():
1469 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1470 else:
1471 # Normally, validations in NBI should not allow to this condition.
1472 self.logger.debug(
1473 "Invalid mempage-size %s. Will be ignored",
1474 extended.get("mempage-size"),
1475 )
1476
1477 for policy, hw_policy in policies.items():
1478 if extended.get(policy):
1479 extra_specs[hw_policy] = extended[policy].lower()
1480
1481 @staticmethod
1482 def _get_flavor_details(flavor_data: dict) -> Tuple:
1483 """Returns the details of flavor
1484 Args:
1485 flavor_data (dict): Dictionary that includes required flavor details
1486
1487 Returns:
1488 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1489
1490 """
1491 return (
1492 flavor_data.get("ram", 64),
1493 flavor_data.get("vcpus", 1),
1494 {},
1495 flavor_data.get("extended"),
1496 )
1497
1498 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1499 """Adds a tenant flavor to openstack VIM.
1500 if change_name_if_used is True, it will change name in case of conflict,
1501 because it is not supported name repetition.
1502
1503 Args:
1504 flavor_data (dict): Flavor details to be processed
1505 change_name_if_used (bool): Change name in case of conflict
1506
1507 Returns:
1508 flavor_id (str): flavor identifier
1509
1510 """
1511 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1512 retry = 0
1513 max_retries = 3
1514 name_suffix = 0
1515
1516 try:
1517 name = flavor_data["name"]
1518 while retry < max_retries:
1519 retry += 1
1520 try:
1521 self._reload_connection()
1522
1523 if change_name_if_used:
1524 name = self._change_flavor_name(name, name_suffix, flavor_data)
1525
1526 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1527 flavor_data
1528 )
1529 if extended:
1530 self._process_extended_config_of_flavor(extended, extra_specs)
1531
1532 # Create flavor
1533
1534 new_flavor = self.nova.flavors.create(
1535 name=name,
1536 ram=ram,
1537 vcpus=vcpus,
1538 disk=flavor_data.get("disk", 0),
1539 ephemeral=flavor_data.get("ephemeral", 0),
1540 swap=flavor_data.get("swap", 0),
1541 is_public=flavor_data.get("is_public", True),
1542 )
1543
1544 # Add metadata
1545 if extra_specs:
1546 new_flavor.set_keys(extra_specs)
1547
1548 return new_flavor.id
1549
1550 except nvExceptions.Conflict as e:
1551 if change_name_if_used and retry < max_retries:
1552 continue
1553
1554 self._format_exception(e)
1555
1556 except (
1557 ksExceptions.ClientException,
1558 nvExceptions.ClientException,
1559 ConnectionError,
1560 KeyError,
1561 ) as e:
1562 self._format_exception(e)
1563
1564 def delete_flavor(self, flavor_id):
1565 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1566 try:
1567 self._reload_connection()
1568 self.nova.flavors.delete(flavor_id)
1569
1570 return flavor_id
1571 # except nvExceptions.BadRequest as e:
1572 except (
1573 nvExceptions.NotFound,
1574 ksExceptions.ClientException,
1575 nvExceptions.ClientException,
1576 ConnectionError,
1577 ) as e:
1578 self._format_exception(e)
1579
1580 def new_image(self, image_dict):
1581 """
1582 Adds a tenant image to VIM. imge_dict is a dictionary with:
1583 name: name
1584 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1585 location: path or URI
1586 public: "yes" or "no"
1587 metadata: metadata of the image
1588 Returns the image_id
1589 """
1590 retry = 0
1591 max_retries = 3
1592
1593 while retry < max_retries:
1594 retry += 1
1595 try:
1596 self._reload_connection()
1597
1598 # determine format http://docs.openstack.org/developer/glance/formats.html
1599 if "disk_format" in image_dict:
1600 disk_format = image_dict["disk_format"]
1601 else: # autodiscover based on extension
1602 if image_dict["location"].endswith(".qcow2"):
1603 disk_format = "qcow2"
1604 elif image_dict["location"].endswith(".vhd"):
1605 disk_format = "vhd"
1606 elif image_dict["location"].endswith(".vmdk"):
1607 disk_format = "vmdk"
1608 elif image_dict["location"].endswith(".vdi"):
1609 disk_format = "vdi"
1610 elif image_dict["location"].endswith(".iso"):
1611 disk_format = "iso"
1612 elif image_dict["location"].endswith(".aki"):
1613 disk_format = "aki"
1614 elif image_dict["location"].endswith(".ari"):
1615 disk_format = "ari"
1616 elif image_dict["location"].endswith(".ami"):
1617 disk_format = "ami"
1618 else:
1619 disk_format = "raw"
1620
1621 self.logger.debug(
1622 "new_image: '%s' loading from '%s'",
1623 image_dict["name"],
1624 image_dict["location"],
1625 )
1626 if self.vim_type == "VIO":
1627 container_format = "bare"
1628 if "container_format" in image_dict:
1629 container_format = image_dict["container_format"]
1630
1631 new_image = self.glance.images.create(
1632 name=image_dict["name"],
1633 container_format=container_format,
1634 disk_format=disk_format,
1635 )
1636 else:
1637 new_image = self.glance.images.create(name=image_dict["name"])
1638
1639 if image_dict["location"].startswith("http"):
1640 # TODO there is not a method to direct download. It must be downloaded locally with requests
1641 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1642 else: # local path
1643 with open(image_dict["location"]) as fimage:
1644 self.glance.images.upload(new_image.id, fimage)
1645 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1646 # image_dict.get("public","yes")=="yes",
1647 # container_format="bare", data=fimage, disk_format=disk_format)
1648
1649 metadata_to_load = image_dict.get("metadata")
1650
1651 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1652 # for openstack
1653 if self.vim_type == "VIO":
1654 metadata_to_load["upload_location"] = image_dict["location"]
1655 else:
1656 metadata_to_load["location"] = image_dict["location"]
1657
1658 self.glance.images.update(new_image.id, **metadata_to_load)
1659
1660 return new_image.id
1661 except (
1662 nvExceptions.Conflict,
1663 ksExceptions.ClientException,
1664 nvExceptions.ClientException,
1665 ) as e:
1666 self._format_exception(e)
1667 except (
1668 HTTPException,
1669 gl1Exceptions.HTTPException,
1670 gl1Exceptions.CommunicationError,
1671 ConnectionError,
1672 ) as e:
1673 if retry == max_retries:
1674 continue
1675
1676 self._format_exception(e)
1677 except IOError as e: # can not open the file
1678 raise vimconn.VimConnConnectionException(
1679 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1680 http_code=vimconn.HTTP_Bad_Request,
1681 )
1682
1683 def delete_image(self, image_id):
1684 """Deletes a tenant image from openstack VIM. Returns the old id"""
1685 try:
1686 self._reload_connection()
1687 self.glance.images.delete(image_id)
1688
1689 return image_id
1690 except (
1691 nvExceptions.NotFound,
1692 ksExceptions.ClientException,
1693 nvExceptions.ClientException,
1694 gl1Exceptions.CommunicationError,
1695 gl1Exceptions.HTTPNotFound,
1696 ConnectionError,
1697 ) as e: # TODO remove
1698 self._format_exception(e)
1699
1700 def get_image_id_from_path(self, path):
1701 """Get the image id from image path in the VIM database. Returns the image_id"""
1702 try:
1703 self._reload_connection()
1704 images = self.glance.images.list()
1705
1706 for image in images:
1707 if image.metadata.get("location") == path:
1708 return image.id
1709
1710 raise vimconn.VimConnNotFoundException(
1711 "image with location '{}' not found".format(path)
1712 )
1713 except (
1714 ksExceptions.ClientException,
1715 nvExceptions.ClientException,
1716 gl1Exceptions.CommunicationError,
1717 ConnectionError,
1718 ) as e:
1719 self._format_exception(e)
1720
1721 def get_image_list(self, filter_dict={}):
1722 """Obtain tenant images from VIM
1723 Filter_dict can be:
1724 id: image id
1725 name: image name
1726 checksum: image checksum
1727 Returns the image list of dictionaries:
1728 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1729 List can be empty
1730 """
1731 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1732
1733 try:
1734 self._reload_connection()
1735 # filter_dict_os = filter_dict.copy()
1736 # First we filter by the available filter fields: name, id. The others are removed.
1737 image_list = self.glance.images.list()
1738 filtered_list = []
1739
1740 for image in image_list:
1741 try:
1742 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1743 continue
1744
1745 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1746 continue
1747
1748 if (
1749 filter_dict.get("checksum")
1750 and image["checksum"] != filter_dict["checksum"]
1751 ):
1752 continue
1753
1754 filtered_list.append(image.copy())
1755 except gl1Exceptions.HTTPNotFound:
1756 pass
1757
1758 return filtered_list
1759 except (
1760 ksExceptions.ClientException,
1761 nvExceptions.ClientException,
1762 gl1Exceptions.CommunicationError,
1763 ConnectionError,
1764 ) as e:
1765 self._format_exception(e)
1766
1767 def __wait_for_vm(self, vm_id, status):
1768 """wait until vm is in the desired status and return True.
1769 If the VM gets in ERROR status, return false.
1770 If the timeout is reached generate an exception"""
1771 elapsed_time = 0
1772 while elapsed_time < server_timeout:
1773 vm_status = self.nova.servers.get(vm_id).status
1774
1775 if vm_status == status:
1776 return True
1777
1778 if vm_status == "ERROR":
1779 return False
1780
1781 time.sleep(5)
1782 elapsed_time += 5
1783
1784 # if we exceeded the timeout rollback
1785 if elapsed_time >= server_timeout:
1786 raise vimconn.VimConnException(
1787 "Timeout waiting for instance " + vm_id + " to get " + status,
1788 http_code=vimconn.HTTP_Request_Timeout,
1789 )
1790
1791 def _get_openstack_availablity_zones(self):
1792 """
1793 Get from openstack availability zones available
1794 :return:
1795 """
1796 try:
1797 openstack_availability_zone = self.nova.availability_zones.list()
1798 openstack_availability_zone = [
1799 str(zone.zoneName)
1800 for zone in openstack_availability_zone
1801 if zone.zoneName != "internal"
1802 ]
1803
1804 return openstack_availability_zone
1805 except Exception:
1806 return None
1807
1808 def _set_availablity_zones(self):
1809 """
1810 Set vim availablity zone
1811 :return:
1812 """
1813 if "availability_zone" in self.config:
1814 vim_availability_zones = self.config.get("availability_zone")
1815
1816 if isinstance(vim_availability_zones, str):
1817 self.availability_zone = [vim_availability_zones]
1818 elif isinstance(vim_availability_zones, list):
1819 self.availability_zone = vim_availability_zones
1820 else:
1821 self.availability_zone = self._get_openstack_availablity_zones()
1822
1823 def _get_vm_availability_zone(
1824 self, availability_zone_index, availability_zone_list
1825 ):
1826 """
1827 Return thge availability zone to be used by the created VM.
1828 :return: The VIM availability zone to be used or None
1829 """
1830 if availability_zone_index is None:
1831 if not self.config.get("availability_zone"):
1832 return None
1833 elif isinstance(self.config.get("availability_zone"), str):
1834 return self.config["availability_zone"]
1835 else:
1836 # TODO consider using a different parameter at config for default AV and AV list match
1837 return self.config["availability_zone"][0]
1838
1839 vim_availability_zones = self.availability_zone
1840 # check if VIM offer enough availability zones describe in the VNFD
1841 if vim_availability_zones and len(availability_zone_list) <= len(
1842 vim_availability_zones
1843 ):
1844 # check if all the names of NFV AV match VIM AV names
1845 match_by_index = False
1846 for av in availability_zone_list:
1847 if av not in vim_availability_zones:
1848 match_by_index = True
1849 break
1850
1851 if match_by_index:
1852 return vim_availability_zones[availability_zone_index]
1853 else:
1854 return availability_zone_list[availability_zone_index]
1855 else:
1856 raise vimconn.VimConnConflictException(
1857 "No enough availability zones at VIM for this deployment"
1858 )
1859
1860 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1861 """Fill up the security_groups in the port_dict.
1862
1863 Args:
1864 net (dict): Network details
1865 port_dict (dict): Port details
1866
1867 """
1868 if (
1869 self.config.get("security_groups")
1870 and net.get("port_security") is not False
1871 and not self.config.get("no_port_security_extension")
1872 ):
1873 if not self.security_groups_id:
1874 self._get_ids_from_name()
1875
1876 port_dict["security_groups"] = self.security_groups_id
1877
1878 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1879 """Fill up the network binding depending on network type in the port_dict.
1880
1881 Args:
1882 net (dict): Network details
1883 port_dict (dict): Port details
1884
1885 """
1886 if not net.get("type"):
1887 raise vimconn.VimConnException("Type is missing in the network details.")
1888
1889 if net["type"] == "virtual":
1890 pass
1891
1892 # For VF
1893 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1894 port_dict["binding:vnic_type"] = "direct"
1895
1896 # VIO specific Changes
1897 if self.vim_type == "VIO":
1898 # Need to create port with port_security_enabled = False and no-security-groups
1899 port_dict["port_security_enabled"] = False
1900 port_dict["provider_security_groups"] = []
1901 port_dict["security_groups"] = []
1902
1903 else:
1904 # For PT PCI-PASSTHROUGH
1905 port_dict["binding:vnic_type"] = "direct-physical"
1906
1907 @staticmethod
1908 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1909 """Set the "ip" parameter in net dictionary.
1910
1911 Args:
1912 new_port (dict): New created port
1913 net (dict): Network details
1914
1915 """
1916 fixed_ips = new_port["port"].get("fixed_ips")
1917
1918 if fixed_ips:
1919 net["ip"] = fixed_ips[0].get("ip_address")
1920 else:
1921 net["ip"] = None
1922
1923 @staticmethod
1924 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1925 """Fill up the mac_address and fixed_ips in port_dict.
1926
1927 Args:
1928 net (dict): Network details
1929 port_dict (dict): Port details
1930
1931 """
1932 if net.get("mac_address"):
1933 port_dict["mac_address"] = net["mac_address"]
1934
1935 if net.get("ip_address"):
1936 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1937 # TODO add "subnet_id": <subnet_id>
1938
1939 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1940 """Create new port using neutron.
1941
1942 Args:
1943 port_dict (dict): Port details
1944 created_items (dict): All created items
1945 net (dict): Network details
1946
1947 Returns:
1948 new_port (dict): New created port
1949
1950 """
1951 new_port = self.neutron.create_port({"port": port_dict})
1952 created_items["port:" + str(new_port["port"]["id"])] = True
1953 net["mac_adress"] = new_port["port"]["mac_address"]
1954 net["vim_id"] = new_port["port"]["id"]
1955
1956 return new_port
1957
1958 def _create_port(
1959 self, net: dict, name: str, created_items: dict
1960 ) -> Tuple[dict, dict]:
1961 """Create port using net details.
1962
1963 Args:
1964 net (dict): Network details
1965 name (str): Name to be used as network name if net dict does not include name
1966 created_items (dict): All created items
1967
1968 Returns:
1969 new_port, port New created port, port dictionary
1970
1971 """
1972
1973 port_dict = {
1974 "network_id": net["net_id"],
1975 "name": net.get("name"),
1976 "admin_state_up": True,
1977 }
1978
1979 if not port_dict["name"]:
1980 port_dict["name"] = name
1981
1982 self._prepare_port_dict_security_groups(net, port_dict)
1983
1984 self._prepare_port_dict_binding(net, port_dict)
1985
1986 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1987
1988 new_port = self._create_new_port(port_dict, created_items, net)
1989
1990 vimconnector._set_fixed_ip(new_port, net)
1991
1992 port = {"port-id": new_port["port"]["id"]}
1993
1994 if float(self.nova.api_version.get_string()) >= 2.32:
1995 port["tag"] = new_port["port"]["name"]
1996
1997 return new_port, port
1998
1999 def _prepare_network_for_vminstance(
2000 self,
2001 name: str,
2002 net_list: list,
2003 created_items: dict,
2004 net_list_vim: list,
2005 external_network: list,
2006 no_secured_ports: list,
2007 ) -> None:
2008 """Create port and fill up net dictionary for new VM instance creation.
2009
2010 Args:
2011 name (str): Name of network
2012 net_list (list): List of networks
2013 created_items (dict): All created items belongs to a VM
2014 net_list_vim (list): List of ports
2015 external_network (list): List of external-networks
2016 no_secured_ports (list): Port security disabled ports
2017 """
2018
2019 self._reload_connection()
2020
2021 for net in net_list:
2022 # Skip non-connected iface
2023 if not net.get("net_id"):
2024 continue
2025
2026 new_port, port = self._create_port(net, name, created_items)
2027
2028 net_list_vim.append(port)
2029
2030 if net.get("floating_ip", False):
2031 net["exit_on_floating_ip_error"] = True
2032 external_network.append(net)
2033
2034 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2035 net["exit_on_floating_ip_error"] = False
2036 external_network.append(net)
2037 net["floating_ip"] = self.config.get("use_floating_ip")
2038
2039 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2040 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2041 if net.get("port_security") is False and not self.config.get(
2042 "no_port_security_extension"
2043 ):
2044 no_secured_ports.append(
2045 (
2046 new_port["port"]["id"],
2047 net.get("port_security_disable_strategy"),
2048 )
2049 )
2050
2051 def _prepare_persistent_root_volumes(
2052 self,
2053 name: str,
2054 vm_av_zone: list,
2055 disk: dict,
2056 base_disk_index: int,
2057 block_device_mapping: dict,
2058 existing_vim_volumes: list,
2059 created_items: dict,
2060 ) -> Optional[str]:
2061 """Prepare persistent root volumes for new VM instance.
2062
2063 Args:
2064 name (str): Name of VM instance
2065 vm_av_zone (list): List of availability zones
2066 disk (dict): Disk details
2067 base_disk_index (int): Disk index
2068 block_device_mapping (dict): Block device details
2069 existing_vim_volumes (list): Existing disk details
2070 created_items (dict): All created items belongs to VM
2071
2072 Returns:
2073 boot_volume_id (str): ID of boot volume
2074
2075 """
2076 # Disk may include only vim_volume_id or only vim_id."
2077 # Use existing persistent root volume finding with volume_id or vim_id
2078 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2079
2080 if disk.get(key_id):
2081 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2082 existing_vim_volumes.append({"id": disk[key_id]})
2083
2084 else:
2085 # Create persistent root volume
2086 volume = self.cinder.volumes.create(
2087 size=disk["size"],
2088 name=name + "vd" + chr(base_disk_index),
2089 imageRef=disk["image_id"],
2090 # Make sure volume is in the same AZ as the VM to be attached to
2091 availability_zone=vm_av_zone,
2092 )
2093 boot_volume_id = volume.id
2094 created_items["volume:" + str(volume.id)] = True
2095 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2096
2097 return boot_volume_id
2098
2099 def _prepare_non_root_persistent_volumes(
2100 self,
2101 name: str,
2102 disk: dict,
2103 vm_av_zone: list,
2104 block_device_mapping: dict,
2105 base_disk_index: int,
2106 existing_vim_volumes: list,
2107 created_items: dict,
2108 ) -> None:
2109 """Prepare persistent volumes for new VM instance.
2110
2111 Args:
2112 name (str): Name of VM instance
2113 disk (dict): Disk details
2114 vm_av_zone (list): List of availability zones
2115 block_device_mapping (dict): Block device details
2116 base_disk_index (int): Disk index
2117 existing_vim_volumes (list): Existing disk details
2118 created_items (dict): All created items belongs to VM
2119 """
2120 # Non-root persistent volumes
2121 # Disk may include only vim_volume_id or only vim_id."
2122 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2123
2124 if disk.get(key_id):
2125 # Use existing persistent volume
2126 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2127 existing_vim_volumes.append({"id": disk[key_id]})
2128
2129 else:
2130 # Create persistent volume
2131 volume = self.cinder.volumes.create(
2132 size=disk["size"],
2133 name=name + "vd" + chr(base_disk_index),
2134 # Make sure volume is in the same AZ as the VM to be attached to
2135 availability_zone=vm_av_zone,
2136 )
2137 created_items["volume:" + str(volume.id)] = True
2138 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2139
2140 def _wait_for_created_volumes_availability(
2141 self, elapsed_time: int, created_items: dict
2142 ) -> Optional[int]:
2143 """Wait till created volumes become available.
2144
2145 Args:
2146 elapsed_time (int): Passed time while waiting
2147 created_items (dict): All created items belongs to VM
2148
2149 Returns:
2150 elapsed_time (int): Time spent while waiting
2151
2152 """
2153
2154 while elapsed_time < volume_timeout:
2155 for created_item in created_items:
2156 v, _, volume_id = created_item.partition(":")
2157 if v == "volume":
2158 if self.cinder.volumes.get(volume_id).status != "available":
2159 break
2160 else:
2161 # All ready: break from while
2162 break
2163
2164 time.sleep(5)
2165 elapsed_time += 5
2166
2167 return elapsed_time
2168
2169 def _wait_for_existing_volumes_availability(
2170 self, elapsed_time: int, existing_vim_volumes: list
2171 ) -> Optional[int]:
2172 """Wait till existing volumes become available.
2173
2174 Args:
2175 elapsed_time (int): Passed time while waiting
2176 existing_vim_volumes (list): Existing volume details
2177
2178 Returns:
2179 elapsed_time (int): Time spent while waiting
2180
2181 """
2182
2183 while elapsed_time < volume_timeout:
2184 for volume in existing_vim_volumes:
2185 if self.cinder.volumes.get(volume["id"]).status != "available":
2186 break
2187 else: # all ready: break from while
2188 break
2189
2190 time.sleep(5)
2191 elapsed_time += 5
2192
2193 return elapsed_time
2194
2195 def _prepare_disk_for_vminstance(
2196 self,
2197 name: str,
2198 existing_vim_volumes: list,
2199 created_items: dict,
2200 vm_av_zone: list,
2201 disk_list: list = None,
2202 ) -> None:
2203 """Prepare all volumes for new VM instance.
2204
2205 Args:
2206 name (str): Name of Instance
2207 existing_vim_volumes (list): List of existing volumes
2208 created_items (dict): All created items belongs to VM
2209 vm_av_zone (list): VM availability zone
2210 disk_list (list): List of disks
2211
2212 """
2213 # Create additional volumes in case these are present in disk_list
2214 base_disk_index = ord("b")
2215 boot_volume_id = None
2216 elapsed_time = 0
2217
2218 block_device_mapping = {}
2219 for disk in disk_list:
2220 if "image_id" in disk:
2221 # Root persistent volume
2222 base_disk_index = ord("a")
2223 boot_volume_id = self._prepare_persistent_root_volumes(
2224 name=name,
2225 vm_av_zone=vm_av_zone,
2226 disk=disk,
2227 base_disk_index=base_disk_index,
2228 block_device_mapping=block_device_mapping,
2229 existing_vim_volumes=existing_vim_volumes,
2230 created_items=created_items,
2231 )
2232 else:
2233 # Non-root persistent volume
2234 self._prepare_non_root_persistent_volumes(
2235 name=name,
2236 disk=disk,
2237 vm_av_zone=vm_av_zone,
2238 block_device_mapping=block_device_mapping,
2239 base_disk_index=base_disk_index,
2240 existing_vim_volumes=existing_vim_volumes,
2241 created_items=created_items,
2242 )
2243 base_disk_index += 1
2244
2245 # Wait until created volumes are with status available
2246 elapsed_time = self._wait_for_created_volumes_availability(
2247 elapsed_time, created_items
2248 )
2249 # Wait until existing volumes in vim are with status available
2250 elapsed_time = self._wait_for_existing_volumes_availability(
2251 elapsed_time, existing_vim_volumes
2252 )
2253 # If we exceeded the timeout rollback
2254 if elapsed_time >= volume_timeout:
2255 raise vimconn.VimConnException(
2256 "Timeout creating volumes for instance " + name,
2257 http_code=vimconn.HTTP_Request_Timeout,
2258 )
2259 if boot_volume_id:
2260 self.cinder.volumes.set_bootable(boot_volume_id, True)
2261
2262 def _find_the_external_network_for_floating_ip(self):
2263 """Get the external network ip in order to create floating IP.
2264
2265 Returns:
2266 pool_id (str): External network pool ID
2267
2268 """
2269
2270 # Find the external network
2271 external_nets = list()
2272
2273 for net in self.neutron.list_networks()["networks"]:
2274 if net["router:external"]:
2275 external_nets.append(net)
2276
2277 if len(external_nets) == 0:
2278 raise vimconn.VimConnException(
2279 "Cannot create floating_ip automatically since "
2280 "no external network is present",
2281 http_code=vimconn.HTTP_Conflict,
2282 )
2283
2284 if len(external_nets) > 1:
2285 raise vimconn.VimConnException(
2286 "Cannot create floating_ip automatically since "
2287 "multiple external networks are present",
2288 http_code=vimconn.HTTP_Conflict,
2289 )
2290
2291 # Pool ID
2292 return external_nets[0].get("id")
2293
2294 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2295 """Trigger neutron to create a new floating IP using external network ID.
2296
2297 Args:
2298 param (dict): Input parameters to create a floating IP
2299 created_items (dict): All created items belongs to new VM instance
2300
2301 Raises:
2302
2303 VimConnException
2304 """
2305 try:
2306 self.logger.debug("Creating floating IP")
2307 new_floating_ip = self.neutron.create_floatingip(param)
2308 free_floating_ip = new_floating_ip["floatingip"]["id"]
2309 created_items["floating_ip:" + str(free_floating_ip)] = True
2310
2311 except Exception as e:
2312 raise vimconn.VimConnException(
2313 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2314 http_code=vimconn.HTTP_Conflict,
2315 )
2316
2317 def _create_floating_ip(
2318 self, floating_network: dict, server: object, created_items: dict
2319 ) -> None:
2320 """Get the available Pool ID and create a new floating IP.
2321
2322 Args:
2323 floating_network (dict): Dict including external network ID
2324 server (object): Server object
2325 created_items (dict): All created items belongs to new VM instance
2326
2327 """
2328
2329 # Pool_id is available
2330 if (
2331 isinstance(floating_network["floating_ip"], str)
2332 and floating_network["floating_ip"].lower() != "true"
2333 ):
2334 pool_id = floating_network["floating_ip"]
2335
2336 # Find the Pool_id
2337 else:
2338 pool_id = self._find_the_external_network_for_floating_ip()
2339
2340 param = {
2341 "floatingip": {
2342 "floating_network_id": pool_id,
2343 "tenant_id": server.tenant_id,
2344 }
2345 }
2346
2347 self._neutron_create_float_ip(param, created_items)
2348
2349 def _find_floating_ip(
2350 self,
2351 server: object,
2352 floating_ips: list,
2353 floating_network: dict,
2354 ) -> Optional[str]:
2355 """Find the available free floating IPs if there are.
2356
2357 Args:
2358 server (object): Server object
2359 floating_ips (list): List of floating IPs
2360 floating_network (dict): Details of floating network such as ID
2361
2362 Returns:
2363 free_floating_ip (str): Free floating ip address
2364
2365 """
2366 for fip in floating_ips:
2367 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2368 continue
2369
2370 if isinstance(floating_network["floating_ip"], str):
2371 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2372 continue
2373
2374 return fip["id"]
2375
2376 def _assign_floating_ip(
2377 self, free_floating_ip: str, floating_network: dict
2378 ) -> Dict:
2379 """Assign the free floating ip address to port.
2380
2381 Args:
2382 free_floating_ip (str): Floating IP to be assigned
2383 floating_network (dict): ID of floating network
2384
2385 Returns:
2386 fip (dict) (dict): Floating ip details
2387
2388 """
2389 # The vim_id key contains the neutron.port_id
2390 self.neutron.update_floatingip(
2391 free_floating_ip,
2392 {"floatingip": {"port_id": floating_network["vim_id"]}},
2393 )
2394 # For race condition ensure not re-assigned to other VM after 5 seconds
2395 time.sleep(5)
2396
2397 return self.neutron.show_floatingip(free_floating_ip)
2398
2399 def _get_free_floating_ip(
2400 self, server: object, floating_network: dict
2401 ) -> Optional[str]:
2402 """Get the free floating IP address.
2403
2404 Args:
2405 server (object): Server Object
2406 floating_network (dict): Floating network details
2407
2408 Returns:
2409 free_floating_ip (str): Free floating ip addr
2410
2411 """
2412
2413 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2414
2415 # Randomize
2416 random.shuffle(floating_ips)
2417
2418 return self._find_floating_ip(server, floating_ips, floating_network)
2419
2420 def _prepare_external_network_for_vminstance(
2421 self,
2422 external_network: list,
2423 server: object,
2424 created_items: dict,
2425 vm_start_time: float,
2426 ) -> None:
2427 """Assign floating IP address for VM instance.
2428
2429 Args:
2430 external_network (list): ID of External network
2431 server (object): Server Object
2432 created_items (dict): All created items belongs to new VM instance
2433 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2434
2435 Raises:
2436 VimConnException
2437
2438 """
2439 for floating_network in external_network:
2440 try:
2441 assigned = False
2442 floating_ip_retries = 3
2443 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2444 # several times
2445 while not assigned:
2446 free_floating_ip = self._get_free_floating_ip(
2447 server, floating_network
2448 )
2449
2450 if not free_floating_ip:
2451 self._create_floating_ip(
2452 floating_network, server, created_items
2453 )
2454
2455 try:
2456 # For race condition ensure not already assigned
2457 fip = self.neutron.show_floatingip(free_floating_ip)
2458
2459 if fip["floatingip"].get("port_id"):
2460 continue
2461
2462 # Assign floating ip
2463 fip = self._assign_floating_ip(
2464 free_floating_ip, floating_network
2465 )
2466
2467 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2468 self.logger.warning(
2469 "floating_ip {} re-assigned to other port".format(
2470 free_floating_ip
2471 )
2472 )
2473 continue
2474
2475 self.logger.debug(
2476 "Assigned floating_ip {} to VM {}".format(
2477 free_floating_ip, server.id
2478 )
2479 )
2480
2481 assigned = True
2482
2483 except Exception as e:
2484 # Openstack need some time after VM creation to assign an IP. So retry if fails
2485 vm_status = self.nova.servers.get(server.id).status
2486
2487 if vm_status not in ("ACTIVE", "ERROR"):
2488 if time.time() - vm_start_time < server_timeout:
2489 time.sleep(5)
2490 continue
2491 elif floating_ip_retries > 0:
2492 floating_ip_retries -= 1
2493 continue
2494
2495 raise vimconn.VimConnException(
2496 "Cannot create floating_ip: {} {}".format(
2497 type(e).__name__, e
2498 ),
2499 http_code=vimconn.HTTP_Conflict,
2500 )
2501
2502 except Exception as e:
2503 if not floating_network["exit_on_floating_ip_error"]:
2504 self.logger.error("Cannot create floating_ip. %s", str(e))
2505 continue
2506
2507 raise
2508
2509 def _update_port_security_for_vminstance(
2510 self,
2511 no_secured_ports: list,
2512 server: object,
2513 ) -> None:
2514 """Updates the port security according to no_secured_ports list.
2515
2516 Args:
2517 no_secured_ports (list): List of ports that security will be disabled
2518 server (object): Server Object
2519
2520 Raises:
2521 VimConnException
2522
2523 """
2524 # Wait until the VM is active and then disable the port-security
2525 if no_secured_ports:
2526 self.__wait_for_vm(server.id, "ACTIVE")
2527
2528 for port in no_secured_ports:
2529 port_update = {
2530 "port": {"port_security_enabled": False, "security_groups": None}
2531 }
2532
2533 if port[1] == "allow-address-pairs":
2534 port_update = {
2535 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2536 }
2537
2538 try:
2539 self.neutron.update_port(port[0], port_update)
2540
2541 except Exception:
2542 raise vimconn.VimConnException(
2543 "It was not possible to disable port security for port {}".format(
2544 port[0]
2545 )
2546 )
2547
2548 def new_vminstance(
2549 self,
2550 name: str,
2551 description: str,
2552 start: bool,
2553 image_id: str,
2554 flavor_id: str,
2555 affinity_group_list: list,
2556 net_list: list,
2557 cloud_config=None,
2558 disk_list=None,
2559 availability_zone_index=None,
2560 availability_zone_list=None,
2561 ) -> tuple:
2562 """Adds a VM instance to VIM.
2563
2564 Args:
2565 name (str): name of VM
2566 description (str): description
2567 start (bool): indicates if VM must start or boot in pause mode. Ignored
2568 image_id (str) image uuid
2569 flavor_id (str) flavor uuid
2570 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2571 net_list (list): list of interfaces, each one is a dictionary with:
2572 name: name of network
2573 net_id: network uuid to connect
2574 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2575 model: interface model, ignored #TODO
2576 mac_address: used for SR-IOV ifaces #TODO for other types
2577 use: 'data', 'bridge', 'mgmt'
2578 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2579 vim_id: filled/added by this function
2580 floating_ip: True/False (or it can be None)
2581 port_security: True/False
2582 cloud_config (dict): (optional) dictionary with:
2583 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2584 users: (optional) list of users to be inserted, each item is a dict with:
2585 name: (mandatory) user name,
2586 key-pairs: (optional) list of strings with the public key to be inserted to the user
2587 user-data: (optional) string is a text script to be passed directly to cloud-init
2588 config-files: (optional). List of files to be transferred. Each item is a dict with:
2589 dest: (mandatory) string with the destination absolute path
2590 encoding: (optional, by default text). Can be one of:
2591 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2592 content : (mandatory) string with the content of the file
2593 permissions: (optional) string with file permissions, typically octal notation '0644'
2594 owner: (optional) file owner, string with the format 'owner:group'
2595 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2596 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2597 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2598 size: (mandatory) string with the size of the disk in GB
2599 vim_id: (optional) should use this existing volume id
2600 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2601 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2602 availability_zone_index is None
2603 #TODO ip, security groups
2604
2605 Returns:
2606 A tuple with the instance identifier and created_items or raises an exception on error
2607 created_items can be None or a dictionary where this method can include key-values that will be passed to
2608 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2609 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2610 as not present.
2611 """
2612 self.logger.debug(
2613 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2614 image_id,
2615 flavor_id,
2616 str(net_list),
2617 )
2618
2619 try:
2620 server = None
2621 created_items = {}
2622 net_list_vim = []
2623 # list of external networks to be connected to instance, later on used to create floating_ip
2624 external_network = []
2625 # List of ports with port-security disabled
2626 no_secured_ports = []
2627 block_device_mapping = None
2628 existing_vim_volumes = []
2629 server_group_id = None
2630 scheduller_hints = {}
2631
2632 # Check the Openstack Connection
2633 self._reload_connection()
2634
2635 # Prepare network list
2636 self._prepare_network_for_vminstance(
2637 name=name,
2638 net_list=net_list,
2639 created_items=created_items,
2640 net_list_vim=net_list_vim,
2641 external_network=external_network,
2642 no_secured_ports=no_secured_ports,
2643 )
2644
2645 # Cloud config
2646 config_drive, userdata = self._create_user_data(cloud_config)
2647
2648 # Get availability Zone
2649 vm_av_zone = self._get_vm_availability_zone(
2650 availability_zone_index, availability_zone_list
2651 )
2652
2653 if disk_list:
2654 # Prepare disks
2655 self._prepare_disk_for_vminstance(
2656 name=name,
2657 existing_vim_volumes=existing_vim_volumes,
2658 created_items=created_items,
2659 vm_av_zone=vm_av_zone,
2660 disk_list=disk_list,
2661 )
2662
2663 if affinity_group_list:
2664 # Only first id on the list will be used. Openstack restriction
2665 server_group_id = affinity_group_list[0]["affinity_group_id"]
2666 scheduller_hints["group"] = server_group_id
2667
2668 self.logger.debug(
2669 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2670 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2671 "block_device_mapping={}, server_group={})".format(
2672 name,
2673 image_id,
2674 flavor_id,
2675 net_list_vim,
2676 self.config.get("security_groups"),
2677 vm_av_zone,
2678 self.config.get("keypair"),
2679 userdata,
2680 config_drive,
2681 block_device_mapping,
2682 server_group_id,
2683 )
2684 )
2685
2686 # Create VM
2687 server = self.nova.servers.create(
2688 name=name,
2689 image=image_id,
2690 flavor=flavor_id,
2691 nics=net_list_vim,
2692 security_groups=self.config.get("security_groups"),
2693 # TODO remove security_groups in future versions. Already at neutron port
2694 availability_zone=vm_av_zone,
2695 key_name=self.config.get("keypair"),
2696 userdata=userdata,
2697 config_drive=config_drive,
2698 block_device_mapping=block_device_mapping,
2699 scheduler_hints=scheduller_hints,
2700 )
2701
2702 vm_start_time = time.time()
2703
2704 self._update_port_security_for_vminstance(no_secured_ports, server)
2705
2706 self._prepare_external_network_for_vminstance(
2707 external_network=external_network,
2708 server=server,
2709 created_items=created_items,
2710 vm_start_time=vm_start_time,
2711 )
2712
2713 return server.id, created_items
2714
2715 except Exception as e:
2716 server_id = None
2717 if server:
2718 server_id = server.id
2719
2720 try:
2721 self.delete_vminstance(server_id, created_items)
2722
2723 except Exception as e2:
2724 self.logger.error("new_vminstance rollback fail {}".format(e2))
2725
2726 self._format_exception(e)
2727
2728 def get_vminstance(self, vm_id):
2729 """Returns the VM instance information from VIM"""
2730 # self.logger.debug("Getting VM from VIM")
2731 try:
2732 self._reload_connection()
2733 server = self.nova.servers.find(id=vm_id)
2734 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2735
2736 return server.to_dict()
2737 except (
2738 ksExceptions.ClientException,
2739 nvExceptions.ClientException,
2740 nvExceptions.NotFound,
2741 ConnectionError,
2742 ) as e:
2743 self._format_exception(e)
2744
2745 def get_vminstance_console(self, vm_id, console_type="vnc"):
2746 """
2747 Get a console for the virtual machine
2748 Params:
2749 vm_id: uuid of the VM
2750 console_type, can be:
2751 "novnc" (by default), "xvpvnc" for VNC types,
2752 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2753 Returns dict with the console parameters:
2754 protocol: ssh, ftp, http, https, ...
2755 server: usually ip address
2756 port: the http, ssh, ... port
2757 suffix: extra text, e.g. the http path and query string
2758 """
2759 self.logger.debug("Getting VM CONSOLE from VIM")
2760
2761 try:
2762 self._reload_connection()
2763 server = self.nova.servers.find(id=vm_id)
2764
2765 if console_type is None or console_type == "novnc":
2766 console_dict = server.get_vnc_console("novnc")
2767 elif console_type == "xvpvnc":
2768 console_dict = server.get_vnc_console(console_type)
2769 elif console_type == "rdp-html5":
2770 console_dict = server.get_rdp_console(console_type)
2771 elif console_type == "spice-html5":
2772 console_dict = server.get_spice_console(console_type)
2773 else:
2774 raise vimconn.VimConnException(
2775 "console type '{}' not allowed".format(console_type),
2776 http_code=vimconn.HTTP_Bad_Request,
2777 )
2778
2779 console_dict1 = console_dict.get("console")
2780
2781 if console_dict1:
2782 console_url = console_dict1.get("url")
2783
2784 if console_url:
2785 # parse console_url
2786 protocol_index = console_url.find("//")
2787 suffix_index = (
2788 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2789 )
2790 port_index = (
2791 console_url[protocol_index + 2 : suffix_index].find(":")
2792 + protocol_index
2793 + 2
2794 )
2795
2796 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2797 return (
2798 -vimconn.HTTP_Internal_Server_Error,
2799 "Unexpected response from VIM",
2800 )
2801
2802 console_dict = {
2803 "protocol": console_url[0:protocol_index],
2804 "server": console_url[protocol_index + 2 : port_index],
2805 "port": console_url[port_index:suffix_index],
2806 "suffix": console_url[suffix_index + 1 :],
2807 }
2808 protocol_index += 2
2809
2810 return console_dict
2811 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2812 except (
2813 nvExceptions.NotFound,
2814 ksExceptions.ClientException,
2815 nvExceptions.ClientException,
2816 nvExceptions.BadRequest,
2817 ConnectionError,
2818 ) as e:
2819 self._format_exception(e)
2820
2821 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2822 """Neutron delete ports by id.
2823 Args:
2824 k_id (str): Port id in the VIM
2825 """
2826 try:
2827 port_dict = self.neutron.list_ports()
2828 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2829
2830 if k_id in existing_ports:
2831 self.neutron.delete_port(k_id)
2832
2833 except Exception as e:
2834 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2835
2836 def _delete_volumes_by_id_wth_cinder(
2837 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2838 ) -> bool:
2839 """Cinder delete volume by id.
2840 Args:
2841 k (str): Full item name in created_items
2842 k_id (str): ID of floating ip in VIM
2843 volumes_to_hold (list): Volumes not to delete
2844 created_items (dict): All created items belongs to VM
2845 """
2846 try:
2847 if k_id in volumes_to_hold:
2848 return
2849
2850 if self.cinder.volumes.get(k_id).status != "available":
2851 return True
2852
2853 else:
2854 self.cinder.volumes.delete(k_id)
2855 created_items[k] = None
2856
2857 except Exception as e:
2858 self.logger.error(
2859 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2860 )
2861
2862 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2863 """Neutron delete floating ip by id.
2864 Args:
2865 k (str): Full item name in created_items
2866 k_id (str): ID of floating ip in VIM
2867 created_items (dict): All created items belongs to VM
2868 """
2869 try:
2870 self.neutron.delete_floatingip(k_id)
2871 created_items[k] = None
2872
2873 except Exception as e:
2874 self.logger.error(
2875 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2876 )
2877
2878 @staticmethod
2879 def _get_item_name_id(k: str) -> Tuple[str, str]:
2880 k_item, _, k_id = k.partition(":")
2881 return k_item, k_id
2882
2883 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2884 """Delete VM ports attached to the networks before deleting virtual machine.
2885 Args:
2886 created_items (dict): All created items belongs to VM
2887 """
2888
2889 for k, v in created_items.items():
2890 if not v: # skip already deleted
2891 continue
2892
2893 try:
2894 k_item, k_id = self._get_item_name_id(k)
2895 if k_item == "port":
2896 self._delete_ports_by_id_wth_neutron(k_id)
2897
2898 except Exception as e:
2899 self.logger.error(
2900 "Error deleting port: {}: {}".format(type(e).__name__, e)
2901 )
2902
2903 def _delete_created_items(
2904 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2905 ) -> bool:
2906 """Delete Volumes and floating ip if they exist in created_items."""
2907 for k, v in created_items.items():
2908 if not v: # skip already deleted
2909 continue
2910
2911 try:
2912 k_item, k_id = self._get_item_name_id(k)
2913
2914 if k_item == "volume":
2915 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2916 k, k_id, volumes_to_hold, created_items
2917 )
2918
2919 if unavailable_vol:
2920 keep_waiting = True
2921
2922 elif k_item == "floating_ip":
2923 self._delete_floating_ip_by_id(k, k_id, created_items)
2924
2925 except Exception as e:
2926 self.logger.error("Error deleting {}: {}".format(k, e))
2927
2928 return keep_waiting
2929
2930 def delete_vminstance(
2931 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
2932 ) -> None:
2933 """Removes a VM instance from VIM. Returns the old identifier.
2934 Args:
2935 vm_id (str): Identifier of VM instance
2936 created_items (dict): All created items belongs to VM
2937 volumes_to_hold (list): Volumes_to_hold
2938 """
2939 if created_items is None:
2940 created_items = {}
2941 if volumes_to_hold is None:
2942 volumes_to_hold = []
2943
2944 try:
2945 self._reload_connection()
2946
2947 # Delete VM ports attached to the networks before the virtual machine
2948 if created_items:
2949 self._delete_vm_ports_attached_to_network(created_items)
2950
2951 if vm_id:
2952 self.nova.servers.delete(vm_id)
2953
2954 # Although having detached, volumes should have in active status before deleting.
2955 # We ensure in this loop
2956 keep_waiting = True
2957 elapsed_time = 0
2958
2959 while keep_waiting and elapsed_time < volume_timeout:
2960 keep_waiting = False
2961
2962 # Delete volumes and floating IP.
2963 keep_waiting = self._delete_created_items(
2964 created_items, volumes_to_hold, keep_waiting
2965 )
2966
2967 if keep_waiting:
2968 time.sleep(1)
2969 elapsed_time += 1
2970
2971 except (
2972 nvExceptions.NotFound,
2973 ksExceptions.ClientException,
2974 nvExceptions.ClientException,
2975 ConnectionError,
2976 ) as e:
2977 self._format_exception(e)
2978
2979 def refresh_vms_status(self, vm_list):
2980 """Get the status of the virtual machines and their interfaces/ports
2981 Params: the list of VM identifiers
2982 Returns a dictionary with:
2983 vm_id: #VIM id of this Virtual Machine
2984 status: #Mandatory. Text with one of:
2985 # DELETED (not found at vim)
2986 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2987 # OTHER (Vim reported other status not understood)
2988 # ERROR (VIM indicates an ERROR status)
2989 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2990 # CREATING (on building process), ERROR
2991 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2992 #
2993 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2994 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2995 interfaces:
2996 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2997 mac_address: #Text format XX:XX:XX:XX:XX:XX
2998 vim_net_id: #network id where this interface is connected
2999 vim_interface_id: #interface/port VIM id
3000 ip_address: #null, or text with IPv4, IPv6 address
3001 compute_node: #identification of compute node where PF,VF interface is allocated
3002 pci: #PCI address of the NIC that hosts the PF,VF
3003 vlan: #physical VLAN used for VF
3004 """
3005 vm_dict = {}
3006 self.logger.debug(
3007 "refresh_vms status: Getting tenant VM instance information from VIM"
3008 )
3009
3010 for vm_id in vm_list:
3011 vm = {}
3012
3013 try:
3014 vm_vim = self.get_vminstance(vm_id)
3015
3016 if vm_vim["status"] in vmStatus2manoFormat:
3017 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3018 else:
3019 vm["status"] = "OTHER"
3020 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3021
3022 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3023 vm_vim.pop("user_data", None)
3024 vm["vim_info"] = self.serialize(vm_vim)
3025
3026 vm["interfaces"] = []
3027 if vm_vim.get("fault"):
3028 vm["error_msg"] = str(vm_vim["fault"])
3029
3030 # get interfaces
3031 try:
3032 self._reload_connection()
3033 port_dict = self.neutron.list_ports(device_id=vm_id)
3034
3035 for port in port_dict["ports"]:
3036 interface = {}
3037 interface["vim_info"] = self.serialize(port)
3038 interface["mac_address"] = port.get("mac_address")
3039 interface["vim_net_id"] = port["network_id"]
3040 interface["vim_interface_id"] = port["id"]
3041 # check if OS-EXT-SRV-ATTR:host is there,
3042 # in case of non-admin credentials, it will be missing
3043
3044 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3045 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3046
3047 interface["pci"] = None
3048
3049 # check if binding:profile is there,
3050 # in case of non-admin credentials, it will be missing
3051 if port.get("binding:profile"):
3052 if port["binding:profile"].get("pci_slot"):
3053 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3054 # the slot to 0x00
3055 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3056 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3057 pci = port["binding:profile"]["pci_slot"]
3058 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3059 interface["pci"] = pci
3060
3061 interface["vlan"] = None
3062
3063 if port.get("binding:vif_details"):
3064 interface["vlan"] = port["binding:vif_details"].get("vlan")
3065
3066 # Get vlan from network in case not present in port for those old openstacks and cases where
3067 # it is needed vlan at PT
3068 if not interface["vlan"]:
3069 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3070 network = self.neutron.show_network(port["network_id"])
3071
3072 if (
3073 network["network"].get("provider:network_type")
3074 == "vlan"
3075 ):
3076 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3077 interface["vlan"] = network["network"].get(
3078 "provider:segmentation_id"
3079 )
3080
3081 ips = []
3082 # look for floating ip address
3083 try:
3084 floating_ip_dict = self.neutron.list_floatingips(
3085 port_id=port["id"]
3086 )
3087
3088 if floating_ip_dict.get("floatingips"):
3089 ips.append(
3090 floating_ip_dict["floatingips"][0].get(
3091 "floating_ip_address"
3092 )
3093 )
3094 except Exception:
3095 pass
3096
3097 for subnet in port["fixed_ips"]:
3098 ips.append(subnet["ip_address"])
3099
3100 interface["ip_address"] = ";".join(ips)
3101 vm["interfaces"].append(interface)
3102 except Exception as e:
3103 self.logger.error(
3104 "Error getting vm interface information {}: {}".format(
3105 type(e).__name__, e
3106 ),
3107 exc_info=True,
3108 )
3109 except vimconn.VimConnNotFoundException as e:
3110 self.logger.error("Exception getting vm status: %s", str(e))
3111 vm["status"] = "DELETED"
3112 vm["error_msg"] = str(e)
3113 except vimconn.VimConnException as e:
3114 self.logger.error("Exception getting vm status: %s", str(e))
3115 vm["status"] = "VIM_ERROR"
3116 vm["error_msg"] = str(e)
3117
3118 vm_dict[vm_id] = vm
3119
3120 return vm_dict
3121
3122 def action_vminstance(self, vm_id, action_dict, created_items={}):
3123 """Send and action over a VM instance from VIM
3124 Returns None or the console dict if the action was successfully sent to the VIM
3125 """
3126 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3127
3128 try:
3129 self._reload_connection()
3130 server = self.nova.servers.find(id=vm_id)
3131
3132 if "start" in action_dict:
3133 if action_dict["start"] == "rebuild":
3134 server.rebuild()
3135 else:
3136 if server.status == "PAUSED":
3137 server.unpause()
3138 elif server.status == "SUSPENDED":
3139 server.resume()
3140 elif server.status == "SHUTOFF":
3141 server.start()
3142 else:
3143 self.logger.debug(
3144 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3145 )
3146 raise vimconn.VimConnException(
3147 "Cannot 'start' instance while it is in active state",
3148 http_code=vimconn.HTTP_Bad_Request,
3149 )
3150
3151 elif "pause" in action_dict:
3152 server.pause()
3153 elif "resume" in action_dict:
3154 server.resume()
3155 elif "shutoff" in action_dict or "shutdown" in action_dict:
3156 self.logger.debug("server status %s", server.status)
3157 if server.status == "ACTIVE":
3158 server.stop()
3159 else:
3160 self.logger.debug("ERROR: VM is not in Active state")
3161 raise vimconn.VimConnException(
3162 "VM is not in active state, stop operation is not allowed",
3163 http_code=vimconn.HTTP_Bad_Request,
3164 )
3165 elif "forceOff" in action_dict:
3166 server.stop() # TODO
3167 elif "terminate" in action_dict:
3168 server.delete()
3169 elif "createImage" in action_dict:
3170 server.create_image()
3171 # "path":path_schema,
3172 # "description":description_schema,
3173 # "name":name_schema,
3174 # "metadata":metadata_schema,
3175 # "imageRef": id_schema,
3176 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3177 elif "rebuild" in action_dict:
3178 server.rebuild(server.image["id"])
3179 elif "reboot" in action_dict:
3180 server.reboot() # reboot_type="SOFT"
3181 elif "console" in action_dict:
3182 console_type = action_dict["console"]
3183
3184 if console_type is None or console_type == "novnc":
3185 console_dict = server.get_vnc_console("novnc")
3186 elif console_type == "xvpvnc":
3187 console_dict = server.get_vnc_console(console_type)
3188 elif console_type == "rdp-html5":
3189 console_dict = server.get_rdp_console(console_type)
3190 elif console_type == "spice-html5":
3191 console_dict = server.get_spice_console(console_type)
3192 else:
3193 raise vimconn.VimConnException(
3194 "console type '{}' not allowed".format(console_type),
3195 http_code=vimconn.HTTP_Bad_Request,
3196 )
3197
3198 try:
3199 console_url = console_dict["console"]["url"]
3200 # parse console_url
3201 protocol_index = console_url.find("//")
3202 suffix_index = (
3203 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3204 )
3205 port_index = (
3206 console_url[protocol_index + 2 : suffix_index].find(":")
3207 + protocol_index
3208 + 2
3209 )
3210
3211 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3212 raise vimconn.VimConnException(
3213 "Unexpected response from VIM " + str(console_dict)
3214 )
3215
3216 console_dict2 = {
3217 "protocol": console_url[0:protocol_index],
3218 "server": console_url[protocol_index + 2 : port_index],
3219 "port": int(console_url[port_index + 1 : suffix_index]),
3220 "suffix": console_url[suffix_index + 1 :],
3221 }
3222
3223 return console_dict2
3224 except Exception:
3225 raise vimconn.VimConnException(
3226 "Unexpected response from VIM " + str(console_dict)
3227 )
3228
3229 return None
3230 except (
3231 ksExceptions.ClientException,
3232 nvExceptions.ClientException,
3233 nvExceptions.NotFound,
3234 ConnectionError,
3235 ) as e:
3236 self._format_exception(e)
3237 # TODO insert exception vimconn.HTTP_Unauthorized
3238
3239 # ###### VIO Specific Changes #########
3240 def _generate_vlanID(self):
3241 """
3242 Method to get unused vlanID
3243 Args:
3244 None
3245 Returns:
3246 vlanID
3247 """
3248 # Get used VLAN IDs
3249 usedVlanIDs = []
3250 networks = self.get_network_list()
3251
3252 for net in networks:
3253 if net.get("provider:segmentation_id"):
3254 usedVlanIDs.append(net.get("provider:segmentation_id"))
3255
3256 used_vlanIDs = set(usedVlanIDs)
3257
3258 # find unused VLAN ID
3259 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3260 try:
3261 start_vlanid, end_vlanid = map(
3262 int, vlanID_range.replace(" ", "").split("-")
3263 )
3264
3265 for vlanID in range(start_vlanid, end_vlanid + 1):
3266 if vlanID not in used_vlanIDs:
3267 return vlanID
3268 except Exception as exp:
3269 raise vimconn.VimConnException(
3270 "Exception {} occurred while generating VLAN ID.".format(exp)
3271 )
3272 else:
3273 raise vimconn.VimConnConflictException(
3274 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3275 self.config.get("dataplane_net_vlan_range")
3276 )
3277 )
3278
3279 def _generate_multisegment_vlanID(self):
3280 """
3281 Method to get unused vlanID
3282 Args:
3283 None
3284 Returns:
3285 vlanID
3286 """
3287 # Get used VLAN IDs
3288 usedVlanIDs = []
3289 networks = self.get_network_list()
3290 for net in networks:
3291 if net.get("provider:network_type") == "vlan" and net.get(
3292 "provider:segmentation_id"
3293 ):
3294 usedVlanIDs.append(net.get("provider:segmentation_id"))
3295 elif net.get("segments"):
3296 for segment in net.get("segments"):
3297 if segment.get("provider:network_type") == "vlan" and segment.get(
3298 "provider:segmentation_id"
3299 ):
3300 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3301
3302 used_vlanIDs = set(usedVlanIDs)
3303
3304 # find unused VLAN ID
3305 for vlanID_range in self.config.get("multisegment_vlan_range"):
3306 try:
3307 start_vlanid, end_vlanid = map(
3308 int, vlanID_range.replace(" ", "").split("-")
3309 )
3310
3311 for vlanID in range(start_vlanid, end_vlanid + 1):
3312 if vlanID not in used_vlanIDs:
3313 return vlanID
3314 except Exception as exp:
3315 raise vimconn.VimConnException(
3316 "Exception {} occurred while generating VLAN ID.".format(exp)
3317 )
3318 else:
3319 raise vimconn.VimConnConflictException(
3320 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3321 self.config.get("multisegment_vlan_range")
3322 )
3323 )
3324
3325 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3326 """
3327 Method to validate user given vlanID ranges
3328 Args: None
3329 Returns: None
3330 """
3331 for vlanID_range in input_vlan_range:
3332 vlan_range = vlanID_range.replace(" ", "")
3333 # validate format
3334 vlanID_pattern = r"(\d)*-(\d)*$"
3335 match_obj = re.match(vlanID_pattern, vlan_range)
3336 if not match_obj:
3337 raise vimconn.VimConnConflictException(
3338 "Invalid VLAN range for {}: {}.You must provide "
3339 "'{}' in format [start_ID - end_ID].".format(
3340 text_vlan_range, vlanID_range, text_vlan_range
3341 )
3342 )
3343
3344 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3345 if start_vlanid <= 0:
3346 raise vimconn.VimConnConflictException(
3347 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3348 "networks valid IDs are 1 to 4094 ".format(
3349 text_vlan_range, vlanID_range
3350 )
3351 )
3352
3353 if end_vlanid > 4094:
3354 raise vimconn.VimConnConflictException(
3355 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3356 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3357 text_vlan_range, vlanID_range
3358 )
3359 )
3360
3361 if start_vlanid > end_vlanid:
3362 raise vimconn.VimConnConflictException(
3363 "Invalid VLAN range for {}: {}. You must provide '{}'"
3364 " in format start_ID - end_ID and start_ID < end_ID ".format(
3365 text_vlan_range, vlanID_range, text_vlan_range
3366 )
3367 )
3368
3369 # NOT USED FUNCTIONS
3370
3371 def new_external_port(self, port_data):
3372 """Adds a external port to VIM
3373 Returns the port identifier"""
3374 # TODO openstack if needed
3375 return (
3376 -vimconn.HTTP_Internal_Server_Error,
3377 "osconnector.new_external_port() not implemented",
3378 )
3379
3380 def connect_port_network(self, port_id, network_id, admin=False):
3381 """Connects a external port to a network
3382 Returns status code of the VIM response"""
3383 # TODO openstack if needed
3384 return (
3385 -vimconn.HTTP_Internal_Server_Error,
3386 "osconnector.connect_port_network() not implemented",
3387 )
3388
3389 def new_user(self, user_name, user_passwd, tenant_id=None):
3390 """Adds a new user to openstack VIM
3391 Returns the user identifier"""
3392 self.logger.debug("osconnector: Adding a new user to VIM")
3393
3394 try:
3395 self._reload_connection()
3396 user = self.keystone.users.create(
3397 user_name, password=user_passwd, default_project=tenant_id
3398 )
3399 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3400
3401 return user.id
3402 except ksExceptions.ConnectionError as e:
3403 error_value = -vimconn.HTTP_Bad_Request
3404 error_text = (
3405 type(e).__name__
3406 + ": "
3407 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3408 )
3409 except ksExceptions.ClientException as e: # TODO remove
3410 error_value = -vimconn.HTTP_Bad_Request
3411 error_text = (
3412 type(e).__name__
3413 + ": "
3414 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3415 )
3416
3417 # TODO insert exception vimconn.HTTP_Unauthorized
3418 # if reaching here is because an exception
3419 self.logger.debug("new_user " + error_text)
3420
3421 return error_value, error_text
3422
3423 def delete_user(self, user_id):
3424 """Delete a user from openstack VIM
3425 Returns the user identifier"""
3426 if self.debug:
3427 print("osconnector: Deleting a user from VIM")
3428
3429 try:
3430 self._reload_connection()
3431 self.keystone.users.delete(user_id)
3432
3433 return 1, user_id
3434 except ksExceptions.ConnectionError as e:
3435 error_value = -vimconn.HTTP_Bad_Request
3436 error_text = (
3437 type(e).__name__
3438 + ": "
3439 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3440 )
3441 except ksExceptions.NotFound as e:
3442 error_value = -vimconn.HTTP_Not_Found
3443 error_text = (
3444 type(e).__name__
3445 + ": "
3446 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3447 )
3448 except ksExceptions.ClientException as e: # TODO remove
3449 error_value = -vimconn.HTTP_Bad_Request
3450 error_text = (
3451 type(e).__name__
3452 + ": "
3453 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3454 )
3455
3456 # TODO insert exception vimconn.HTTP_Unauthorized
3457 # if reaching here is because an exception
3458 self.logger.debug("delete_tenant " + error_text)
3459
3460 return error_value, error_text
3461
3462 def get_hosts_info(self):
3463 """Get the information of deployed hosts
3464 Returns the hosts content"""
3465 if self.debug:
3466 print("osconnector: Getting Host info from VIM")
3467
3468 try:
3469 h_list = []
3470 self._reload_connection()
3471 hypervisors = self.nova.hypervisors.list()
3472
3473 for hype in hypervisors:
3474 h_list.append(hype.to_dict())
3475
3476 return 1, {"hosts": h_list}
3477 except nvExceptions.NotFound as e:
3478 error_value = -vimconn.HTTP_Not_Found
3479 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3480 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3481 error_value = -vimconn.HTTP_Bad_Request
3482 error_text = (
3483 type(e).__name__
3484 + ": "
3485 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3486 )
3487
3488 # TODO insert exception vimconn.HTTP_Unauthorized
3489 # if reaching here is because an exception
3490 self.logger.debug("get_hosts_info " + error_text)
3491
3492 return error_value, error_text
3493
3494 def get_hosts(self, vim_tenant):
3495 """Get the hosts and deployed instances
3496 Returns the hosts content"""
3497 r, hype_dict = self.get_hosts_info()
3498
3499 if r < 0:
3500 return r, hype_dict
3501
3502 hypervisors = hype_dict["hosts"]
3503
3504 try:
3505 servers = self.nova.servers.list()
3506 for hype in hypervisors:
3507 for server in servers:
3508 if (
3509 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3510 == hype["hypervisor_hostname"]
3511 ):
3512 if "vm" in hype:
3513 hype["vm"].append(server.id)
3514 else:
3515 hype["vm"] = [server.id]
3516
3517 return 1, hype_dict
3518 except nvExceptions.NotFound as e:
3519 error_value = -vimconn.HTTP_Not_Found
3520 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3521 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3522 error_value = -vimconn.HTTP_Bad_Request
3523 error_text = (
3524 type(e).__name__
3525 + ": "
3526 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3527 )
3528
3529 # TODO insert exception vimconn.HTTP_Unauthorized
3530 # if reaching here is because an exception
3531 self.logger.debug("get_hosts " + error_text)
3532
3533 return error_value, error_text
3534
3535 def new_classification(self, name, ctype, definition):
3536 self.logger.debug(
3537 "Adding a new (Traffic) Classification to VIM, named %s", name
3538 )
3539
3540 try:
3541 new_class = None
3542 self._reload_connection()
3543
3544 if ctype not in supportedClassificationTypes:
3545 raise vimconn.VimConnNotSupportedException(
3546 "OpenStack VIM connector does not support provided "
3547 "Classification Type {}, supported ones are: {}".format(
3548 ctype, supportedClassificationTypes
3549 )
3550 )
3551
3552 if not self._validate_classification(ctype, definition):
3553 raise vimconn.VimConnException(
3554 "Incorrect Classification definition for the type specified."
3555 )
3556
3557 classification_dict = definition
3558 classification_dict["name"] = name
3559 new_class = self.neutron.create_sfc_flow_classifier(
3560 {"flow_classifier": classification_dict}
3561 )
3562
3563 return new_class["flow_classifier"]["id"]
3564 except (
3565 neExceptions.ConnectionFailed,
3566 ksExceptions.ClientException,
3567 neExceptions.NeutronException,
3568 ConnectionError,
3569 ) as e:
3570 self.logger.error("Creation of Classification failed.")
3571 self._format_exception(e)
3572
3573 def get_classification(self, class_id):
3574 self.logger.debug(" Getting Classification %s from VIM", class_id)
3575 filter_dict = {"id": class_id}
3576 class_list = self.get_classification_list(filter_dict)
3577
3578 if len(class_list) == 0:
3579 raise vimconn.VimConnNotFoundException(
3580 "Classification '{}' not found".format(class_id)
3581 )
3582 elif len(class_list) > 1:
3583 raise vimconn.VimConnConflictException(
3584 "Found more than one Classification with this criteria"
3585 )
3586
3587 classification = class_list[0]
3588
3589 return classification
3590
3591 def get_classification_list(self, filter_dict={}):
3592 self.logger.debug(
3593 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3594 )
3595
3596 try:
3597 filter_dict_os = filter_dict.copy()
3598 self._reload_connection()
3599
3600 if self.api_version3 and "tenant_id" in filter_dict_os:
3601 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3602
3603 classification_dict = self.neutron.list_sfc_flow_classifiers(
3604 **filter_dict_os
3605 )
3606 classification_list = classification_dict["flow_classifiers"]
3607 self.__classification_os2mano(classification_list)
3608
3609 return classification_list
3610 except (
3611 neExceptions.ConnectionFailed,
3612 ksExceptions.ClientException,
3613 neExceptions.NeutronException,
3614 ConnectionError,
3615 ) as e:
3616 self._format_exception(e)
3617
3618 def delete_classification(self, class_id):
3619 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3620
3621 try:
3622 self._reload_connection()
3623 self.neutron.delete_sfc_flow_classifier(class_id)
3624
3625 return class_id
3626 except (
3627 neExceptions.ConnectionFailed,
3628 neExceptions.NeutronException,
3629 ksExceptions.ClientException,
3630 neExceptions.NeutronException,
3631 ConnectionError,
3632 ) as e:
3633 self._format_exception(e)
3634
3635 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3636 self.logger.debug(
3637 "Adding a new Service Function Instance to VIM, named '%s'", name
3638 )
3639
3640 try:
3641 new_sfi = None
3642 self._reload_connection()
3643 correlation = None
3644
3645 if sfc_encap:
3646 correlation = "nsh"
3647
3648 if len(ingress_ports) != 1:
3649 raise vimconn.VimConnNotSupportedException(
3650 "OpenStack VIM connector can only have 1 ingress port per SFI"
3651 )
3652
3653 if len(egress_ports) != 1:
3654 raise vimconn.VimConnNotSupportedException(
3655 "OpenStack VIM connector can only have 1 egress port per SFI"
3656 )
3657
3658 sfi_dict = {
3659 "name": name,
3660 "ingress": ingress_ports[0],
3661 "egress": egress_ports[0],
3662 "service_function_parameters": {"correlation": correlation},
3663 }
3664 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3665
3666 return new_sfi["port_pair"]["id"]
3667 except (
3668 neExceptions.ConnectionFailed,
3669 ksExceptions.ClientException,
3670 neExceptions.NeutronException,
3671 ConnectionError,
3672 ) as e:
3673 if new_sfi:
3674 try:
3675 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3676 except Exception:
3677 self.logger.error(
3678 "Creation of Service Function Instance failed, with "
3679 "subsequent deletion failure as well."
3680 )
3681
3682 self._format_exception(e)
3683
3684 def get_sfi(self, sfi_id):
3685 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3686 filter_dict = {"id": sfi_id}
3687 sfi_list = self.get_sfi_list(filter_dict)
3688
3689 if len(sfi_list) == 0:
3690 raise vimconn.VimConnNotFoundException(
3691 "Service Function Instance '{}' not found".format(sfi_id)
3692 )
3693 elif len(sfi_list) > 1:
3694 raise vimconn.VimConnConflictException(
3695 "Found more than one Service Function Instance with this criteria"
3696 )
3697
3698 sfi = sfi_list[0]
3699
3700 return sfi
3701
3702 def get_sfi_list(self, filter_dict={}):
3703 self.logger.debug(
3704 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3705 )
3706
3707 try:
3708 self._reload_connection()
3709 filter_dict_os = filter_dict.copy()
3710
3711 if self.api_version3 and "tenant_id" in filter_dict_os:
3712 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3713
3714 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3715 sfi_list = sfi_dict["port_pairs"]
3716 self.__sfi_os2mano(sfi_list)
3717
3718 return sfi_list
3719 except (
3720 neExceptions.ConnectionFailed,
3721 ksExceptions.ClientException,
3722 neExceptions.NeutronException,
3723 ConnectionError,
3724 ) as e:
3725 self._format_exception(e)
3726
3727 def delete_sfi(self, sfi_id):
3728 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3729
3730 try:
3731 self._reload_connection()
3732 self.neutron.delete_sfc_port_pair(sfi_id)
3733
3734 return sfi_id
3735 except (
3736 neExceptions.ConnectionFailed,
3737 neExceptions.NeutronException,
3738 ksExceptions.ClientException,
3739 neExceptions.NeutronException,
3740 ConnectionError,
3741 ) as e:
3742 self._format_exception(e)
3743
3744 def new_sf(self, name, sfis, sfc_encap=True):
3745 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3746
3747 try:
3748 new_sf = None
3749 self._reload_connection()
3750 # correlation = None
3751 # if sfc_encap:
3752 # correlation = "nsh"
3753
3754 for instance in sfis:
3755 sfi = self.get_sfi(instance)
3756
3757 if sfi.get("sfc_encap") != sfc_encap:
3758 raise vimconn.VimConnNotSupportedException(
3759 "OpenStack VIM connector requires all SFIs of the "
3760 "same SF to share the same SFC Encapsulation"
3761 )
3762
3763 sf_dict = {"name": name, "port_pairs": sfis}
3764 new_sf = self.neutron.create_sfc_port_pair_group(
3765 {"port_pair_group": sf_dict}
3766 )
3767
3768 return new_sf["port_pair_group"]["id"]
3769 except (
3770 neExceptions.ConnectionFailed,
3771 ksExceptions.ClientException,
3772 neExceptions.NeutronException,
3773 ConnectionError,
3774 ) as e:
3775 if new_sf:
3776 try:
3777 self.neutron.delete_sfc_port_pair_group(
3778 new_sf["port_pair_group"]["id"]
3779 )
3780 except Exception:
3781 self.logger.error(
3782 "Creation of Service Function failed, with "
3783 "subsequent deletion failure as well."
3784 )
3785
3786 self._format_exception(e)
3787
3788 def get_sf(self, sf_id):
3789 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3790 filter_dict = {"id": sf_id}
3791 sf_list = self.get_sf_list(filter_dict)
3792
3793 if len(sf_list) == 0:
3794 raise vimconn.VimConnNotFoundException(
3795 "Service Function '{}' not found".format(sf_id)
3796 )
3797 elif len(sf_list) > 1:
3798 raise vimconn.VimConnConflictException(
3799 "Found more than one Service Function with this criteria"
3800 )
3801
3802 sf = sf_list[0]
3803
3804 return sf
3805
3806 def get_sf_list(self, filter_dict={}):
3807 self.logger.debug(
3808 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3809 )
3810
3811 try:
3812 self._reload_connection()
3813 filter_dict_os = filter_dict.copy()
3814
3815 if self.api_version3 and "tenant_id" in filter_dict_os:
3816 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3817
3818 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3819 sf_list = sf_dict["port_pair_groups"]
3820 self.__sf_os2mano(sf_list)
3821
3822 return sf_list
3823 except (
3824 neExceptions.ConnectionFailed,
3825 ksExceptions.ClientException,
3826 neExceptions.NeutronException,
3827 ConnectionError,
3828 ) as e:
3829 self._format_exception(e)
3830
3831 def delete_sf(self, sf_id):
3832 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3833
3834 try:
3835 self._reload_connection()
3836 self.neutron.delete_sfc_port_pair_group(sf_id)
3837
3838 return sf_id
3839 except (
3840 neExceptions.ConnectionFailed,
3841 neExceptions.NeutronException,
3842 ksExceptions.ClientException,
3843 neExceptions.NeutronException,
3844 ConnectionError,
3845 ) as e:
3846 self._format_exception(e)
3847
3848 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3849 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3850
3851 try:
3852 new_sfp = None
3853 self._reload_connection()
3854 # In networking-sfc the MPLS encapsulation is legacy
3855 # should be used when no full SFC Encapsulation is intended
3856 correlation = "mpls"
3857
3858 if sfc_encap:
3859 correlation = "nsh"
3860
3861 sfp_dict = {
3862 "name": name,
3863 "flow_classifiers": classifications,
3864 "port_pair_groups": sfs,
3865 "chain_parameters": {"correlation": correlation},
3866 }
3867
3868 if spi:
3869 sfp_dict["chain_id"] = spi
3870
3871 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3872
3873 return new_sfp["port_chain"]["id"]
3874 except (
3875 neExceptions.ConnectionFailed,
3876 ksExceptions.ClientException,
3877 neExceptions.NeutronException,
3878 ConnectionError,
3879 ) as e:
3880 if new_sfp:
3881 try:
3882 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3883 except Exception:
3884 self.logger.error(
3885 "Creation of Service Function Path failed, with "
3886 "subsequent deletion failure as well."
3887 )
3888
3889 self._format_exception(e)
3890
3891 def get_sfp(self, sfp_id):
3892 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3893
3894 filter_dict = {"id": sfp_id}
3895 sfp_list = self.get_sfp_list(filter_dict)
3896
3897 if len(sfp_list) == 0:
3898 raise vimconn.VimConnNotFoundException(
3899 "Service Function Path '{}' not found".format(sfp_id)
3900 )
3901 elif len(sfp_list) > 1:
3902 raise vimconn.VimConnConflictException(
3903 "Found more than one Service Function Path with this criteria"
3904 )
3905
3906 sfp = sfp_list[0]
3907
3908 return sfp
3909
3910 def get_sfp_list(self, filter_dict={}):
3911 self.logger.debug(
3912 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3913 )
3914
3915 try:
3916 self._reload_connection()
3917 filter_dict_os = filter_dict.copy()
3918
3919 if self.api_version3 and "tenant_id" in filter_dict_os:
3920 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3921
3922 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3923 sfp_list = sfp_dict["port_chains"]
3924 self.__sfp_os2mano(sfp_list)
3925
3926 return sfp_list
3927 except (
3928 neExceptions.ConnectionFailed,
3929 ksExceptions.ClientException,
3930 neExceptions.NeutronException,
3931 ConnectionError,
3932 ) as e:
3933 self._format_exception(e)
3934
3935 def delete_sfp(self, sfp_id):
3936 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3937
3938 try:
3939 self._reload_connection()
3940 self.neutron.delete_sfc_port_chain(sfp_id)
3941
3942 return sfp_id
3943 except (
3944 neExceptions.ConnectionFailed,
3945 neExceptions.NeutronException,
3946 ksExceptions.ClientException,
3947 neExceptions.NeutronException,
3948 ConnectionError,
3949 ) as e:
3950 self._format_exception(e)
3951
3952 def refresh_sfps_status(self, sfp_list):
3953 """Get the status of the service function path
3954 Params: the list of sfp identifiers
3955 Returns a dictionary with:
3956 vm_id: #VIM id of this service function path
3957 status: #Mandatory. Text with one of:
3958 # DELETED (not found at vim)
3959 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3960 # OTHER (Vim reported other status not understood)
3961 # ERROR (VIM indicates an ERROR status)
3962 # ACTIVE,
3963 # CREATING (on building process)
3964 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3965 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3966 """
3967 sfp_dict = {}
3968 self.logger.debug(
3969 "refresh_sfps status: Getting tenant SFP information from VIM"
3970 )
3971
3972 for sfp_id in sfp_list:
3973 sfp = {}
3974
3975 try:
3976 sfp_vim = self.get_sfp(sfp_id)
3977
3978 if sfp_vim["spi"]:
3979 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3980 else:
3981 sfp["status"] = "OTHER"
3982 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3983
3984 sfp["vim_info"] = self.serialize(sfp_vim)
3985
3986 if sfp_vim.get("fault"):
3987 sfp["error_msg"] = str(sfp_vim["fault"])
3988 except vimconn.VimConnNotFoundException as e:
3989 self.logger.error("Exception getting sfp status: %s", str(e))
3990 sfp["status"] = "DELETED"
3991 sfp["error_msg"] = str(e)
3992 except vimconn.VimConnException as e:
3993 self.logger.error("Exception getting sfp status: %s", str(e))
3994 sfp["status"] = "VIM_ERROR"
3995 sfp["error_msg"] = str(e)
3996
3997 sfp_dict[sfp_id] = sfp
3998
3999 return sfp_dict
4000
4001 def refresh_sfis_status(self, sfi_list):
4002 """Get the status of the service function instances
4003 Params: the list of sfi identifiers
4004 Returns a dictionary with:
4005 vm_id: #VIM id of this service function instance
4006 status: #Mandatory. Text with one of:
4007 # DELETED (not found at vim)
4008 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4009 # OTHER (Vim reported other status not understood)
4010 # ERROR (VIM indicates an ERROR status)
4011 # ACTIVE,
4012 # CREATING (on building process)
4013 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4014 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4015 """
4016 sfi_dict = {}
4017 self.logger.debug(
4018 "refresh_sfis status: Getting tenant sfi information from VIM"
4019 )
4020
4021 for sfi_id in sfi_list:
4022 sfi = {}
4023
4024 try:
4025 sfi_vim = self.get_sfi(sfi_id)
4026
4027 if sfi_vim:
4028 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4029 else:
4030 sfi["status"] = "OTHER"
4031 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4032
4033 sfi["vim_info"] = self.serialize(sfi_vim)
4034
4035 if sfi_vim.get("fault"):
4036 sfi["error_msg"] = str(sfi_vim["fault"])
4037 except vimconn.VimConnNotFoundException as e:
4038 self.logger.error("Exception getting sfi status: %s", str(e))
4039 sfi["status"] = "DELETED"
4040 sfi["error_msg"] = str(e)
4041 except vimconn.VimConnException as e:
4042 self.logger.error("Exception getting sfi status: %s", str(e))
4043 sfi["status"] = "VIM_ERROR"
4044 sfi["error_msg"] = str(e)
4045
4046 sfi_dict[sfi_id] = sfi
4047
4048 return sfi_dict
4049
4050 def refresh_sfs_status(self, sf_list):
4051 """Get the status of the service functions
4052 Params: the list of sf identifiers
4053 Returns a dictionary with:
4054 vm_id: #VIM id of this service function
4055 status: #Mandatory. Text with one of:
4056 # DELETED (not found at vim)
4057 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4058 # OTHER (Vim reported other status not understood)
4059 # ERROR (VIM indicates an ERROR status)
4060 # ACTIVE,
4061 # CREATING (on building process)
4062 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4063 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4064 """
4065 sf_dict = {}
4066 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4067
4068 for sf_id in sf_list:
4069 sf = {}
4070
4071 try:
4072 sf_vim = self.get_sf(sf_id)
4073
4074 if sf_vim:
4075 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4076 else:
4077 sf["status"] = "OTHER"
4078 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4079
4080 sf["vim_info"] = self.serialize(sf_vim)
4081
4082 if sf_vim.get("fault"):
4083 sf["error_msg"] = str(sf_vim["fault"])
4084 except vimconn.VimConnNotFoundException as e:
4085 self.logger.error("Exception getting sf status: %s", str(e))
4086 sf["status"] = "DELETED"
4087 sf["error_msg"] = str(e)
4088 except vimconn.VimConnException as e:
4089 self.logger.error("Exception getting sf status: %s", str(e))
4090 sf["status"] = "VIM_ERROR"
4091 sf["error_msg"] = str(e)
4092
4093 sf_dict[sf_id] = sf
4094
4095 return sf_dict
4096
4097 def refresh_classifications_status(self, classification_list):
4098 """Get the status of the classifications
4099 Params: the list of classification identifiers
4100 Returns a dictionary with:
4101 vm_id: #VIM id of this classifier
4102 status: #Mandatory. Text with one of:
4103 # DELETED (not found at vim)
4104 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4105 # OTHER (Vim reported other status not understood)
4106 # ERROR (VIM indicates an ERROR status)
4107 # ACTIVE,
4108 # CREATING (on building process)
4109 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4110 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4111 """
4112 classification_dict = {}
4113 self.logger.debug(
4114 "refresh_classifications status: Getting tenant classification information from VIM"
4115 )
4116
4117 for classification_id in classification_list:
4118 classification = {}
4119
4120 try:
4121 classification_vim = self.get_classification(classification_id)
4122
4123 if classification_vim:
4124 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4125 else:
4126 classification["status"] = "OTHER"
4127 classification["error_msg"] = (
4128 "VIM status reported " + classification["status"]
4129 )
4130
4131 classification["vim_info"] = self.serialize(classification_vim)
4132
4133 if classification_vim.get("fault"):
4134 classification["error_msg"] = str(classification_vim["fault"])
4135 except vimconn.VimConnNotFoundException as e:
4136 self.logger.error("Exception getting classification status: %s", str(e))
4137 classification["status"] = "DELETED"
4138 classification["error_msg"] = str(e)
4139 except vimconn.VimConnException as e:
4140 self.logger.error("Exception getting classification status: %s", str(e))
4141 classification["status"] = "VIM_ERROR"
4142 classification["error_msg"] = str(e)
4143
4144 classification_dict[classification_id] = classification
4145
4146 return classification_dict
4147
4148 def new_affinity_group(self, affinity_group_data):
4149 """Adds a server group to VIM
4150 affinity_group_data contains a dictionary with information, keys:
4151 name: name in VIM for the server group
4152 type: affinity or anti-affinity
4153 scope: Only nfvi-node allowed
4154 Returns the server group identifier"""
4155 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4156
4157 try:
4158 name = affinity_group_data["name"]
4159 policy = affinity_group_data["type"]
4160
4161 self._reload_connection()
4162 new_server_group = self.nova.server_groups.create(name, policy)
4163
4164 return new_server_group.id
4165 except (
4166 ksExceptions.ClientException,
4167 nvExceptions.ClientException,
4168 ConnectionError,
4169 KeyError,
4170 ) as e:
4171 self._format_exception(e)
4172
4173 def get_affinity_group(self, affinity_group_id):
4174 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4175 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4176 try:
4177 self._reload_connection()
4178 server_group = self.nova.server_groups.find(id=affinity_group_id)
4179
4180 return server_group.to_dict()
4181 except (
4182 nvExceptions.NotFound,
4183 nvExceptions.ClientException,
4184 ksExceptions.ClientException,
4185 ConnectionError,
4186 ) as e:
4187 self._format_exception(e)
4188
4189 def delete_affinity_group(self, affinity_group_id):
4190 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4191 self.logger.debug("Getting server group '%s'", affinity_group_id)
4192 try:
4193 self._reload_connection()
4194 self.nova.server_groups.delete(affinity_group_id)
4195
4196 return affinity_group_id
4197 except (
4198 nvExceptions.NotFound,
4199 ksExceptions.ClientException,
4200 nvExceptions.ClientException,
4201 ConnectionError,
4202 ) as e:
4203 self._format_exception(e)
4204
4205 def get_vdu_state(self, vm_id):
4206 """
4207 Getting the state of a vdu
4208 param:
4209 vm_id: ID of an instance
4210 """
4211 self.logger.debug("Getting the status of VM")
4212 self.logger.debug("VIM VM ID %s", vm_id)
4213 self._reload_connection()
4214 server = self.nova.servers.find(id=vm_id)
4215 server_dict = server.to_dict()
4216 vdu_data = [
4217 server_dict["status"],
4218 server_dict["flavor"]["id"],
4219 server_dict["OS-EXT-SRV-ATTR:host"],
4220 server_dict["OS-EXT-AZ:availability_zone"],
4221 ]
4222 self.logger.debug("vdu_data %s", vdu_data)
4223 return vdu_data
4224
4225 def check_compute_availability(self, host, server_flavor_details):
4226 self._reload_connection()
4227 hypervisor_search = self.nova.hypervisors.search(
4228 hypervisor_match=host, servers=True
4229 )
4230 for hypervisor in hypervisor_search:
4231 hypervisor_id = hypervisor.to_dict()["id"]
4232 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4233 hypervisor_dict = hypervisor_details.to_dict()
4234 hypervisor_temp = json.dumps(hypervisor_dict)
4235 hypervisor_json = json.loads(hypervisor_temp)
4236 resources_available = [
4237 hypervisor_json["free_ram_mb"],
4238 hypervisor_json["disk_available_least"],
4239 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4240 ]
4241 compute_available = all(
4242 x > y for x, y in zip(resources_available, server_flavor_details)
4243 )
4244 if compute_available:
4245 return host
4246
4247 def check_availability_zone(
4248 self, old_az, server_flavor_details, old_host, host=None
4249 ):
4250 self._reload_connection()
4251 az_check = {"zone_check": False, "compute_availability": None}
4252 aggregates_list = self.nova.aggregates.list()
4253 for aggregate in aggregates_list:
4254 aggregate_details = aggregate.to_dict()
4255 aggregate_temp = json.dumps(aggregate_details)
4256 aggregate_json = json.loads(aggregate_temp)
4257 if aggregate_json["availability_zone"] == old_az:
4258 hosts_list = aggregate_json["hosts"]
4259 if host is not None:
4260 if host in hosts_list:
4261 az_check["zone_check"] = True
4262 available_compute_id = self.check_compute_availability(
4263 host, server_flavor_details
4264 )
4265 if available_compute_id is not None:
4266 az_check["compute_availability"] = available_compute_id
4267 else:
4268 for check_host in hosts_list:
4269 if check_host != old_host:
4270 available_compute_id = self.check_compute_availability(
4271 check_host, server_flavor_details
4272 )
4273 if available_compute_id is not None:
4274 az_check["zone_check"] = True
4275 az_check["compute_availability"] = available_compute_id
4276 break
4277 else:
4278 az_check["zone_check"] = True
4279 return az_check
4280
4281 def migrate_instance(self, vm_id, compute_host=None):
4282 """
4283 Migrate a vdu
4284 param:
4285 vm_id: ID of an instance
4286 compute_host: Host to migrate the vdu to
4287 """
4288 self._reload_connection()
4289 vm_state = False
4290 instance_state = self.get_vdu_state(vm_id)
4291 server_flavor_id = instance_state[1]
4292 server_hypervisor_name = instance_state[2]
4293 server_availability_zone = instance_state[3]
4294 try:
4295 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4296 server_flavor_details = [
4297 server_flavor["ram"],
4298 server_flavor["disk"],
4299 server_flavor["vcpus"],
4300 ]
4301 if compute_host == server_hypervisor_name:
4302 raise vimconn.VimConnException(
4303 "Unable to migrate instance '{}' to the same host '{}'".format(
4304 vm_id, compute_host
4305 ),
4306 http_code=vimconn.HTTP_Bad_Request,
4307 )
4308 az_status = self.check_availability_zone(
4309 server_availability_zone,
4310 server_flavor_details,
4311 server_hypervisor_name,
4312 compute_host,
4313 )
4314 availability_zone_check = az_status["zone_check"]
4315 available_compute_id = az_status.get("compute_availability")
4316
4317 if availability_zone_check is False:
4318 raise vimconn.VimConnException(
4319 "Unable to migrate instance '{}' to a different availability zone".format(
4320 vm_id
4321 ),
4322 http_code=vimconn.HTTP_Bad_Request,
4323 )
4324 if available_compute_id is not None:
4325 self.nova.servers.live_migrate(
4326 server=vm_id,
4327 host=available_compute_id,
4328 block_migration=True,
4329 disk_over_commit=False,
4330 )
4331 state = "MIGRATING"
4332 changed_compute_host = ""
4333 if state == "MIGRATING":
4334 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4335 changed_compute_host = self.get_vdu_state(vm_id)[2]
4336 if vm_state and changed_compute_host == available_compute_id:
4337 self.logger.debug(
4338 "Instance '{}' migrated to the new compute host '{}'".format(
4339 vm_id, changed_compute_host
4340 )
4341 )
4342 return state, available_compute_id
4343 else:
4344 raise vimconn.VimConnException(
4345 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4346 vm_id, available_compute_id
4347 ),
4348 http_code=vimconn.HTTP_Bad_Request,
4349 )
4350 else:
4351 raise vimconn.VimConnException(
4352 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4353 available_compute_id
4354 ),
4355 http_code=vimconn.HTTP_Bad_Request,
4356 )
4357 except (
4358 nvExceptions.BadRequest,
4359 nvExceptions.ClientException,
4360 nvExceptions.NotFound,
4361 ) as e:
4362 self._format_exception(e)
4363
4364 def resize_instance(self, vm_id, new_flavor_id):
4365 """
4366 For resizing the vm based on the given
4367 flavor details
4368 param:
4369 vm_id : ID of an instance
4370 new_flavor_id : Flavor id to be resized
4371 Return the status of a resized instance
4372 """
4373 self._reload_connection()
4374 self.logger.debug("resize the flavor of an instance")
4375 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4376 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4377 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4378 try:
4379 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4380 if old_flavor_disk > new_flavor_disk:
4381 raise nvExceptions.BadRequest(
4382 400,
4383 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4384 )
4385 else:
4386 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4387 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4388 if vm_state:
4389 instance_resized_status = self.confirm_resize(vm_id)
4390 return instance_resized_status
4391 else:
4392 raise nvExceptions.BadRequest(
4393 409,
4394 message="Cannot 'resize' vm_state is in ERROR",
4395 )
4396
4397 else:
4398 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4399 raise nvExceptions.BadRequest(
4400 409,
4401 message="Cannot 'resize' instance while it is in vm_state resized",
4402 )
4403 except (
4404 nvExceptions.BadRequest,
4405 nvExceptions.ClientException,
4406 nvExceptions.NotFound,
4407 ) as e:
4408 self._format_exception(e)
4409
4410 def confirm_resize(self, vm_id):
4411 """
4412 Confirm the resize of an instance
4413 param:
4414 vm_id: ID of an instance
4415 """
4416 self._reload_connection()
4417 self.nova.servers.confirm_resize(server=vm_id)
4418 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4419 self.__wait_for_vm(vm_id, "ACTIVE")
4420 instance_status = self.get_vdu_state(vm_id)[0]
4421 return instance_status