Reformat files according to new black validation
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41
42 from cinderclient import client as cClient
43 from glanceclient import client as glClient
44 import glanceclient.exc as gl1Exceptions
45 from keystoneauth1 import session
46 from keystoneauth1.identity import v2, v3
47 import keystoneclient.exceptions as ksExceptions
48 import keystoneclient.v2_0.client as ksClient_v2
49 import keystoneclient.v3.client as ksClient_v3
50 import netaddr
51 from neutronclient.common import exceptions as neExceptions
52 from neutronclient.neutron import client as neClient
53 from novaclient import client as nClient, exceptions as nvExceptions
54 from osm_ro_plugin import vimconn
55 from requests.exceptions import ConnectionError
56 import yaml
57
58 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
59 __date__ = "$22-sep-2017 23:59:59$"
60
61 """contain the openstack virtual machine status to openmano status"""
62 vmStatus2manoFormat = {
63 "ACTIVE": "ACTIVE",
64 "PAUSED": "PAUSED",
65 "SUSPENDED": "SUSPENDED",
66 "SHUTOFF": "INACTIVE",
67 "BUILD": "BUILD",
68 "ERROR": "ERROR",
69 "DELETED": "DELETED",
70 }
71 netStatus2manoFormat = {
72 "ACTIVE": "ACTIVE",
73 "PAUSED": "PAUSED",
74 "INACTIVE": "INACTIVE",
75 "BUILD": "BUILD",
76 "ERROR": "ERROR",
77 "DELETED": "DELETED",
78 }
79
80 supportedClassificationTypes = ["legacy_flow_classifier"]
81
82 # global var to have a timeout creating and deleting volumes
83 volume_timeout = 1800
84 server_timeout = 1800
85
86
87 class SafeDumper(yaml.SafeDumper):
88 def represent_data(self, data):
89 # Openstack APIs use custom subclasses of dict and YAML safe dumper
90 # is designed to not handle that (reference issue 142 of pyyaml)
91 if isinstance(data, dict) and data.__class__ != dict:
92 # A simple solution is to convert those items back to dicts
93 data = dict(data.items())
94
95 return super(SafeDumper, self).represent_data(data)
96
97
98 class vimconnector(vimconn.VimConnector):
99 def __init__(
100 self,
101 uuid,
102 name,
103 tenant_id,
104 tenant_name,
105 url,
106 url_admin=None,
107 user=None,
108 passwd=None,
109 log_level=None,
110 config={},
111 persistent_info={},
112 ):
113 """using common constructor parameters. In this case
114 'url' is the keystone authorization url,
115 'url_admin' is not use
116 """
117 api_version = config.get("APIversion")
118
119 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
120 raise vimconn.VimConnException(
121 "Invalid value '{}' for config:APIversion. "
122 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
123 )
124
125 vim_type = config.get("vim_type")
126
127 if vim_type and vim_type not in ("vio", "VIO"):
128 raise vimconn.VimConnException(
129 "Invalid value '{}' for config:vim_type."
130 "Allowed values are 'vio' or 'VIO'".format(vim_type)
131 )
132
133 if config.get("dataplane_net_vlan_range") is not None:
134 # validate vlan ranges provided by user
135 self._validate_vlan_ranges(
136 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
137 )
138
139 if config.get("multisegment_vlan_range") is not None:
140 # validate vlan ranges provided by user
141 self._validate_vlan_ranges(
142 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
143 )
144
145 vimconn.VimConnector.__init__(
146 self,
147 uuid,
148 name,
149 tenant_id,
150 tenant_name,
151 url,
152 url_admin,
153 user,
154 passwd,
155 log_level,
156 config,
157 )
158
159 if self.config.get("insecure") and self.config.get("ca_cert"):
160 raise vimconn.VimConnException(
161 "options insecure and ca_cert are mutually exclusive"
162 )
163
164 self.verify = True
165
166 if self.config.get("insecure"):
167 self.verify = False
168
169 if self.config.get("ca_cert"):
170 self.verify = self.config.get("ca_cert")
171
172 if not url:
173 raise TypeError("url param can not be NoneType")
174
175 self.persistent_info = persistent_info
176 self.availability_zone = persistent_info.get("availability_zone", None)
177 self.session = persistent_info.get("session", {"reload_client": True})
178 self.my_tenant_id = self.session.get("my_tenant_id")
179 self.nova = self.session.get("nova")
180 self.neutron = self.session.get("neutron")
181 self.cinder = self.session.get("cinder")
182 self.glance = self.session.get("glance")
183 # self.glancev1 = self.session.get("glancev1")
184 self.keystone = self.session.get("keystone")
185 self.api_version3 = self.session.get("api_version3")
186 self.vim_type = self.config.get("vim_type")
187
188 if self.vim_type:
189 self.vim_type = self.vim_type.upper()
190
191 if self.config.get("use_internal_endpoint"):
192 self.endpoint_type = "internalURL"
193 else:
194 self.endpoint_type = None
195
196 logging.getLogger("urllib3").setLevel(logging.WARNING)
197 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
198 logging.getLogger("novaclient").setLevel(logging.WARNING)
199 self.logger = logging.getLogger("ro.vim.openstack")
200
201 # allow security_groups to be a list or a single string
202 if isinstance(self.config.get("security_groups"), str):
203 self.config["security_groups"] = [self.config["security_groups"]]
204
205 self.security_groups_id = None
206
207 # ###### VIO Specific Changes #########
208 if self.vim_type == "VIO":
209 self.logger = logging.getLogger("ro.vim.vio")
210
211 if log_level:
212 self.logger.setLevel(getattr(logging, log_level))
213
214 def __getitem__(self, index):
215 """Get individuals parameters.
216 Throw KeyError"""
217 if index == "project_domain_id":
218 return self.config.get("project_domain_id")
219 elif index == "user_domain_id":
220 return self.config.get("user_domain_id")
221 else:
222 return vimconn.VimConnector.__getitem__(self, index)
223
224 def __setitem__(self, index, value):
225 """Set individuals parameters and it is marked as dirty so to force connection reload.
226 Throw KeyError"""
227 if index == "project_domain_id":
228 self.config["project_domain_id"] = value
229 elif index == "user_domain_id":
230 self.config["user_domain_id"] = value
231 else:
232 vimconn.VimConnector.__setitem__(self, index, value)
233
234 self.session["reload_client"] = True
235
236 def serialize(self, value):
237 """Serialization of python basic types.
238
239 In the case value is not serializable a message will be logged and a
240 simple representation of the data that cannot be converted back to
241 python is returned.
242 """
243 if isinstance(value, str):
244 return value
245
246 try:
247 return yaml.dump(
248 value, Dumper=SafeDumper, default_flow_style=True, width=256
249 )
250 except yaml.representer.RepresenterError:
251 self.logger.debug(
252 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
253 pformat(value),
254 exc_info=True,
255 )
256
257 return str(value)
258
259 def _reload_connection(self):
260 """Called before any operation, it check if credentials has changed
261 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
262 """
263 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
264 if self.session["reload_client"]:
265 if self.config.get("APIversion"):
266 self.api_version3 = (
267 self.config["APIversion"] == "v3.3"
268 or self.config["APIversion"] == "3"
269 )
270 else: # get from ending auth_url that end with v3 or with v2.0
271 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
272 "/v3/"
273 )
274
275 self.session["api_version3"] = self.api_version3
276
277 if self.api_version3:
278 if self.config.get("project_domain_id") or self.config.get(
279 "project_domain_name"
280 ):
281 project_domain_id_default = None
282 else:
283 project_domain_id_default = "default"
284
285 if self.config.get("user_domain_id") or self.config.get(
286 "user_domain_name"
287 ):
288 user_domain_id_default = None
289 else:
290 user_domain_id_default = "default"
291 auth = v3.Password(
292 auth_url=self.url,
293 username=self.user,
294 password=self.passwd,
295 project_name=self.tenant_name,
296 project_id=self.tenant_id,
297 project_domain_id=self.config.get(
298 "project_domain_id", project_domain_id_default
299 ),
300 user_domain_id=self.config.get(
301 "user_domain_id", user_domain_id_default
302 ),
303 project_domain_name=self.config.get("project_domain_name"),
304 user_domain_name=self.config.get("user_domain_name"),
305 )
306 else:
307 auth = v2.Password(
308 auth_url=self.url,
309 username=self.user,
310 password=self.passwd,
311 tenant_name=self.tenant_name,
312 tenant_id=self.tenant_id,
313 )
314
315 sess = session.Session(auth=auth, verify=self.verify)
316 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
317 # Titanium cloud and StarlingX
318 region_name = self.config.get("region_name")
319
320 if self.api_version3:
321 self.keystone = ksClient_v3.Client(
322 session=sess,
323 endpoint_type=self.endpoint_type,
324 region_name=region_name,
325 )
326 else:
327 self.keystone = ksClient_v2.Client(
328 session=sess, endpoint_type=self.endpoint_type
329 )
330
331 self.session["keystone"] = self.keystone
332 # In order to enable microversion functionality an explicit microversion must be specified in "config".
333 # This implementation approach is due to the warning message in
334 # https://developer.openstack.org/api-guide/compute/microversions.html
335 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
336 # always require an specific microversion.
337 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
338 version = self.config.get("microversion")
339
340 if not version:
341 version = "2.1"
342
343 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
344 # Titanium cloud and StarlingX
345 self.nova = self.session["nova"] = nClient.Client(
346 str(version),
347 session=sess,
348 endpoint_type=self.endpoint_type,
349 region_name=region_name,
350 )
351 self.neutron = self.session["neutron"] = neClient.Client(
352 "2.0",
353 session=sess,
354 endpoint_type=self.endpoint_type,
355 region_name=region_name,
356 )
357 self.cinder = self.session["cinder"] = cClient.Client(
358 2,
359 session=sess,
360 endpoint_type=self.endpoint_type,
361 region_name=region_name,
362 )
363
364 try:
365 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
366 except Exception:
367 self.logger.error("Cannot get project_id from session", exc_info=True)
368
369 if self.endpoint_type == "internalURL":
370 glance_service_id = self.keystone.services.list(name="glance")[0].id
371 glance_endpoint = self.keystone.endpoints.list(
372 glance_service_id, interface="internal"
373 )[0].url
374 else:
375 glance_endpoint = None
376
377 self.glance = self.session["glance"] = glClient.Client(
378 2, session=sess, endpoint=glance_endpoint
379 )
380 # using version 1 of glance client in new_image()
381 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
382 # endpoint=glance_endpoint)
383 self.session["reload_client"] = False
384 self.persistent_info["session"] = self.session
385 # add availablity zone info inside self.persistent_info
386 self._set_availablity_zones()
387 self.persistent_info["availability_zone"] = self.availability_zone
388 # force to get again security_groups_ids next time they are needed
389 self.security_groups_id = None
390
391 def __net_os2mano(self, net_list_dict):
392 """Transform the net openstack format to mano format
393 net_list_dict can be a list of dict or a single dict"""
394 if type(net_list_dict) is dict:
395 net_list_ = (net_list_dict,)
396 elif type(net_list_dict) is list:
397 net_list_ = net_list_dict
398 else:
399 raise TypeError("param net_list_dict must be a list or a dictionary")
400 for net in net_list_:
401 if net.get("provider:network_type") == "vlan":
402 net["type"] = "data"
403 else:
404 net["type"] = "bridge"
405
406 def __classification_os2mano(self, class_list_dict):
407 """Transform the openstack format (Flow Classifier) to mano format
408 (Classification) class_list_dict can be a list of dict or a single dict
409 """
410 if isinstance(class_list_dict, dict):
411 class_list_ = [class_list_dict]
412 elif isinstance(class_list_dict, list):
413 class_list_ = class_list_dict
414 else:
415 raise TypeError("param class_list_dict must be a list or a dictionary")
416 for classification in class_list_:
417 id = classification.pop("id")
418 name = classification.pop("name")
419 description = classification.pop("description")
420 project_id = classification.pop("project_id")
421 tenant_id = classification.pop("tenant_id")
422 original_classification = copy.deepcopy(classification)
423 classification.clear()
424 classification["ctype"] = "legacy_flow_classifier"
425 classification["definition"] = original_classification
426 classification["id"] = id
427 classification["name"] = name
428 classification["description"] = description
429 classification["project_id"] = project_id
430 classification["tenant_id"] = tenant_id
431
432 def __sfi_os2mano(self, sfi_list_dict):
433 """Transform the openstack format (Port Pair) to mano format (SFI)
434 sfi_list_dict can be a list of dict or a single dict
435 """
436 if isinstance(sfi_list_dict, dict):
437 sfi_list_ = [sfi_list_dict]
438 elif isinstance(sfi_list_dict, list):
439 sfi_list_ = sfi_list_dict
440 else:
441 raise TypeError("param sfi_list_dict must be a list or a dictionary")
442
443 for sfi in sfi_list_:
444 sfi["ingress_ports"] = []
445 sfi["egress_ports"] = []
446
447 if sfi.get("ingress"):
448 sfi["ingress_ports"].append(sfi["ingress"])
449
450 if sfi.get("egress"):
451 sfi["egress_ports"].append(sfi["egress"])
452
453 del sfi["ingress"]
454 del sfi["egress"]
455 params = sfi.get("service_function_parameters")
456 sfc_encap = False
457
458 if params:
459 correlation = params.get("correlation")
460
461 if correlation:
462 sfc_encap = True
463
464 sfi["sfc_encap"] = sfc_encap
465 del sfi["service_function_parameters"]
466
467 def __sf_os2mano(self, sf_list_dict):
468 """Transform the openstack format (Port Pair Group) to mano format (SF)
469 sf_list_dict can be a list of dict or a single dict
470 """
471 if isinstance(sf_list_dict, dict):
472 sf_list_ = [sf_list_dict]
473 elif isinstance(sf_list_dict, list):
474 sf_list_ = sf_list_dict
475 else:
476 raise TypeError("param sf_list_dict must be a list or a dictionary")
477
478 for sf in sf_list_:
479 del sf["port_pair_group_parameters"]
480 sf["sfis"] = sf["port_pairs"]
481 del sf["port_pairs"]
482
483 def __sfp_os2mano(self, sfp_list_dict):
484 """Transform the openstack format (Port Chain) to mano format (SFP)
485 sfp_list_dict can be a list of dict or a single dict
486 """
487 if isinstance(sfp_list_dict, dict):
488 sfp_list_ = [sfp_list_dict]
489 elif isinstance(sfp_list_dict, list):
490 sfp_list_ = sfp_list_dict
491 else:
492 raise TypeError("param sfp_list_dict must be a list or a dictionary")
493
494 for sfp in sfp_list_:
495 params = sfp.pop("chain_parameters")
496 sfc_encap = False
497
498 if params:
499 correlation = params.get("correlation")
500
501 if correlation:
502 sfc_encap = True
503
504 sfp["sfc_encap"] = sfc_encap
505 sfp["spi"] = sfp.pop("chain_id")
506 sfp["classifications"] = sfp.pop("flow_classifiers")
507 sfp["service_functions"] = sfp.pop("port_pair_groups")
508
509 # placeholder for now; read TODO note below
510 def _validate_classification(self, type, definition):
511 # only legacy_flow_classifier Type is supported at this point
512 return True
513 # TODO(igordcard): this method should be an abstract method of an
514 # abstract Classification class to be implemented by the specific
515 # Types. Also, abstract vimconnector should call the validation
516 # method before the implemented VIM connectors are called.
517
518 def _format_exception(self, exception):
519 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
520 message_error = str(exception)
521 tip = ""
522
523 if isinstance(
524 exception,
525 (
526 neExceptions.NetworkNotFoundClient,
527 nvExceptions.NotFound,
528 ksExceptions.NotFound,
529 gl1Exceptions.HTTPNotFound,
530 ),
531 ):
532 raise vimconn.VimConnNotFoundException(
533 type(exception).__name__ + ": " + message_error
534 )
535 elif isinstance(
536 exception,
537 (
538 HTTPException,
539 gl1Exceptions.HTTPException,
540 gl1Exceptions.CommunicationError,
541 ConnectionError,
542 ksExceptions.ConnectionError,
543 neExceptions.ConnectionFailed,
544 ),
545 ):
546 if type(exception).__name__ == "SSLError":
547 tip = " (maybe option 'insecure' must be added to the VIM)"
548
549 raise vimconn.VimConnConnectionException(
550 "Invalid URL or credentials{}: {}".format(tip, message_error)
551 )
552 elif isinstance(
553 exception,
554 (
555 KeyError,
556 nvExceptions.BadRequest,
557 ksExceptions.BadRequest,
558 ),
559 ):
560 raise vimconn.VimConnException(
561 type(exception).__name__ + ": " + message_error
562 )
563 elif isinstance(
564 exception,
565 (
566 nvExceptions.ClientException,
567 ksExceptions.ClientException,
568 neExceptions.NeutronException,
569 ),
570 ):
571 raise vimconn.VimConnUnexpectedResponse(
572 type(exception).__name__ + ": " + message_error
573 )
574 elif isinstance(exception, nvExceptions.Conflict):
575 raise vimconn.VimConnConflictException(
576 type(exception).__name__ + ": " + message_error
577 )
578 elif isinstance(exception, vimconn.VimConnException):
579 raise exception
580 else: # ()
581 self.logger.error("General Exception " + message_error, exc_info=True)
582
583 raise vimconn.VimConnConnectionException(
584 type(exception).__name__ + ": " + message_error
585 )
586
587 def _get_ids_from_name(self):
588 """
589 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
590 :return: None
591 """
592 # get tenant_id if only tenant_name is supplied
593 self._reload_connection()
594
595 if not self.my_tenant_id:
596 raise vimconn.VimConnConnectionException(
597 "Error getting tenant information from name={} id={}".format(
598 self.tenant_name, self.tenant_id
599 )
600 )
601
602 if self.config.get("security_groups") and not self.security_groups_id:
603 # convert from name to id
604 neutron_sg_list = self.neutron.list_security_groups(
605 tenant_id=self.my_tenant_id
606 )["security_groups"]
607
608 self.security_groups_id = []
609 for sg in self.config.get("security_groups"):
610 for neutron_sg in neutron_sg_list:
611 if sg in (neutron_sg["id"], neutron_sg["name"]):
612 self.security_groups_id.append(neutron_sg["id"])
613 break
614 else:
615 self.security_groups_id = None
616
617 raise vimconn.VimConnConnectionException(
618 "Not found security group {} for this tenant".format(sg)
619 )
620
621 def check_vim_connectivity(self):
622 # just get network list to check connectivity and credentials
623 self.get_network_list(filter_dict={})
624
625 def get_tenant_list(self, filter_dict={}):
626 """Obtain tenants of VIM
627 filter_dict can contain the following keys:
628 name: filter by tenant name
629 id: filter by tenant uuid/id
630 <other VIM specific>
631 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
632 """
633 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
634
635 try:
636 self._reload_connection()
637
638 if self.api_version3:
639 project_class_list = self.keystone.projects.list(
640 name=filter_dict.get("name")
641 )
642 else:
643 project_class_list = self.keystone.tenants.findall(**filter_dict)
644
645 project_list = []
646
647 for project in project_class_list:
648 if filter_dict.get("id") and filter_dict["id"] != project.id:
649 continue
650
651 project_list.append(project.to_dict())
652
653 return project_list
654 except (
655 ksExceptions.ConnectionError,
656 ksExceptions.ClientException,
657 ConnectionError,
658 ) as e:
659 self._format_exception(e)
660
661 def new_tenant(self, tenant_name, tenant_description):
662 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
663 self.logger.debug("Adding a new tenant name: %s", tenant_name)
664
665 try:
666 self._reload_connection()
667
668 if self.api_version3:
669 project = self.keystone.projects.create(
670 tenant_name,
671 self.config.get("project_domain_id", "default"),
672 description=tenant_description,
673 is_domain=False,
674 )
675 else:
676 project = self.keystone.tenants.create(tenant_name, tenant_description)
677
678 return project.id
679 except (
680 ksExceptions.ConnectionError,
681 ksExceptions.ClientException,
682 ksExceptions.BadRequest,
683 ConnectionError,
684 ) as e:
685 self._format_exception(e)
686
687 def delete_tenant(self, tenant_id):
688 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
689 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
690
691 try:
692 self._reload_connection()
693
694 if self.api_version3:
695 self.keystone.projects.delete(tenant_id)
696 else:
697 self.keystone.tenants.delete(tenant_id)
698
699 return tenant_id
700 except (
701 ksExceptions.ConnectionError,
702 ksExceptions.ClientException,
703 ksExceptions.NotFound,
704 ConnectionError,
705 ) as e:
706 self._format_exception(e)
707
708 def new_network(
709 self,
710 net_name,
711 net_type,
712 ip_profile=None,
713 shared=False,
714 provider_network_profile=None,
715 ):
716 """Adds a tenant network to VIM
717 Params:
718 'net_name': name of the network
719 'net_type': one of:
720 'bridge': overlay isolated network
721 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
722 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
723 'ip_profile': is a dict containing the IP parameters of the network
724 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
725 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
726 'gateway_address': (Optional) ip_schema, that is X.X.X.X
727 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
728 'dhcp_enabled': True or False
729 'dhcp_start_address': ip_schema, first IP to grant
730 'dhcp_count': number of IPs to grant.
731 'shared': if this network can be seen/use by other tenants/organization
732 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
733 physical-network: physnet-label}
734 Returns a tuple with the network identifier and created_items, or raises an exception on error
735 created_items can be None or a dictionary where this method can include key-values that will be passed to
736 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
737 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
738 as not present.
739 """
740 self.logger.debug(
741 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
742 )
743 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
744
745 try:
746 vlan = None
747
748 if provider_network_profile:
749 vlan = provider_network_profile.get("segmentation-id")
750
751 new_net = None
752 created_items = {}
753 self._reload_connection()
754 network_dict = {"name": net_name, "admin_state_up": True}
755
756 if net_type in ("data", "ptp") or provider_network_profile:
757 provider_physical_network = None
758
759 if provider_network_profile and provider_network_profile.get(
760 "physical-network"
761 ):
762 provider_physical_network = provider_network_profile.get(
763 "physical-network"
764 )
765
766 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
767 # or not declared, just ignore the checking
768 if (
769 isinstance(
770 self.config.get("dataplane_physical_net"), (tuple, list)
771 )
772 and provider_physical_network
773 not in self.config["dataplane_physical_net"]
774 ):
775 raise vimconn.VimConnConflictException(
776 "Invalid parameter 'provider-network:physical-network' "
777 "for network creation. '{}' is not one of the declared "
778 "list at VIM_config:dataplane_physical_net".format(
779 provider_physical_network
780 )
781 )
782
783 # use the default dataplane_physical_net
784 if not provider_physical_network:
785 provider_physical_network = self.config.get(
786 "dataplane_physical_net"
787 )
788
789 # if it is non empty list, use the first value. If it is a string use the value directly
790 if (
791 isinstance(provider_physical_network, (tuple, list))
792 and provider_physical_network
793 ):
794 provider_physical_network = provider_physical_network[0]
795
796 if not provider_physical_network:
797 raise vimconn.VimConnConflictException(
798 "missing information needed for underlay networks. Provide "
799 "'dataplane_physical_net' configuration at VIM or use the NS "
800 "instantiation parameter 'provider-network.physical-network'"
801 " for the VLD"
802 )
803
804 if not self.config.get("multisegment_support"):
805 network_dict[
806 "provider:physical_network"
807 ] = provider_physical_network
808
809 if (
810 provider_network_profile
811 and "network-type" in provider_network_profile
812 ):
813 network_dict[
814 "provider:network_type"
815 ] = provider_network_profile["network-type"]
816 else:
817 network_dict["provider:network_type"] = self.config.get(
818 "dataplane_network_type", "vlan"
819 )
820
821 if vlan:
822 network_dict["provider:segmentation_id"] = vlan
823 else:
824 # Multi-segment case
825 segment_list = []
826 segment1_dict = {
827 "provider:physical_network": "",
828 "provider:network_type": "vxlan",
829 }
830 segment_list.append(segment1_dict)
831 segment2_dict = {
832 "provider:physical_network": provider_physical_network,
833 "provider:network_type": "vlan",
834 }
835
836 if vlan:
837 segment2_dict["provider:segmentation_id"] = vlan
838 elif self.config.get("multisegment_vlan_range"):
839 vlanID = self._generate_multisegment_vlanID()
840 segment2_dict["provider:segmentation_id"] = vlanID
841
842 # else
843 # raise vimconn.VimConnConflictException(
844 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
845 # network")
846 segment_list.append(segment2_dict)
847 network_dict["segments"] = segment_list
848
849 # VIO Specific Changes. It needs a concrete VLAN
850 if self.vim_type == "VIO" and vlan is None:
851 if self.config.get("dataplane_net_vlan_range") is None:
852 raise vimconn.VimConnConflictException(
853 "You must provide 'dataplane_net_vlan_range' in format "
854 "[start_ID - end_ID] at VIM_config for creating underlay "
855 "networks"
856 )
857
858 network_dict["provider:segmentation_id"] = self._generate_vlanID()
859
860 network_dict["shared"] = shared
861
862 if self.config.get("disable_network_port_security"):
863 network_dict["port_security_enabled"] = False
864
865 if self.config.get("neutron_availability_zone_hints"):
866 hints = self.config.get("neutron_availability_zone_hints")
867
868 if isinstance(hints, str):
869 hints = [hints]
870
871 network_dict["availability_zone_hints"] = hints
872
873 new_net = self.neutron.create_network({"network": network_dict})
874 # print new_net
875 # create subnetwork, even if there is no profile
876
877 if not ip_profile:
878 ip_profile = {}
879
880 if not ip_profile.get("subnet_address"):
881 # Fake subnet is required
882 subnet_rand = random.randint(0, 255)
883 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
884
885 if "ip_version" not in ip_profile:
886 ip_profile["ip_version"] = "IPv4"
887
888 subnet = {
889 "name": net_name + "-subnet",
890 "network_id": new_net["network"]["id"],
891 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
892 "cidr": ip_profile["subnet_address"],
893 }
894
895 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
896 if ip_profile.get("gateway_address"):
897 subnet["gateway_ip"] = ip_profile["gateway_address"]
898 else:
899 subnet["gateway_ip"] = None
900
901 if ip_profile.get("dns_address"):
902 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
903
904 if "dhcp_enabled" in ip_profile:
905 subnet["enable_dhcp"] = (
906 False
907 if ip_profile["dhcp_enabled"] == "false"
908 or ip_profile["dhcp_enabled"] is False
909 else True
910 )
911
912 if ip_profile.get("dhcp_start_address"):
913 subnet["allocation_pools"] = []
914 subnet["allocation_pools"].append(dict())
915 subnet["allocation_pools"][0]["start"] = ip_profile[
916 "dhcp_start_address"
917 ]
918
919 if ip_profile.get("dhcp_count"):
920 # parts = ip_profile["dhcp_start_address"].split(".")
921 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
922 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
923 ip_int += ip_profile["dhcp_count"] - 1
924 ip_str = str(netaddr.IPAddress(ip_int))
925 subnet["allocation_pools"][0]["end"] = ip_str
926
927 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
928 self.neutron.create_subnet({"subnet": subnet})
929
930 if net_type == "data" and self.config.get("multisegment_support"):
931 if self.config.get("l2gw_support"):
932 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
933 for l2gw in l2gw_list:
934 l2gw_conn = {
935 "l2_gateway_id": l2gw["id"],
936 "network_id": new_net["network"]["id"],
937 "segmentation_id": str(vlanID),
938 }
939 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
940 {"l2_gateway_connection": l2gw_conn}
941 )
942 created_items[
943 "l2gwconn:"
944 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
945 ] = True
946
947 return new_net["network"]["id"], created_items
948 except Exception as e:
949 # delete l2gw connections (if any) before deleting the network
950 for k, v in created_items.items():
951 if not v: # skip already deleted
952 continue
953
954 try:
955 k_item, _, k_id = k.partition(":")
956
957 if k_item == "l2gwconn":
958 self.neutron.delete_l2_gateway_connection(k_id)
959 except Exception as e2:
960 self.logger.error(
961 "Error deleting l2 gateway connection: {}: {}".format(
962 type(e2).__name__, e2
963 )
964 )
965
966 if new_net:
967 self.neutron.delete_network(new_net["network"]["id"])
968
969 self._format_exception(e)
970
971 def get_network_list(self, filter_dict={}):
972 """Obtain tenant networks of VIM
973 Filter_dict can be:
974 name: network name
975 id: network uuid
976 shared: boolean
977 tenant_id: tenant
978 admin_state_up: boolean
979 status: 'ACTIVE'
980 Returns the network list of dictionaries
981 """
982 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
983
984 try:
985 self._reload_connection()
986 filter_dict_os = filter_dict.copy()
987
988 if self.api_version3 and "tenant_id" in filter_dict_os:
989 # TODO check
990 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
991
992 net_dict = self.neutron.list_networks(**filter_dict_os)
993 net_list = net_dict["networks"]
994 self.__net_os2mano(net_list)
995
996 return net_list
997 except (
998 neExceptions.ConnectionFailed,
999 ksExceptions.ClientException,
1000 neExceptions.NeutronException,
1001 ConnectionError,
1002 ) as e:
1003 self._format_exception(e)
1004
1005 def get_network(self, net_id):
1006 """Obtain details of network from VIM
1007 Returns the network information from a network id"""
1008 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1009 filter_dict = {"id": net_id}
1010 net_list = self.get_network_list(filter_dict)
1011
1012 if len(net_list) == 0:
1013 raise vimconn.VimConnNotFoundException(
1014 "Network '{}' not found".format(net_id)
1015 )
1016 elif len(net_list) > 1:
1017 raise vimconn.VimConnConflictException(
1018 "Found more than one network with this criteria"
1019 )
1020
1021 net = net_list[0]
1022 subnets = []
1023 for subnet_id in net.get("subnets", ()):
1024 try:
1025 subnet = self.neutron.show_subnet(subnet_id)
1026 except Exception as e:
1027 self.logger.error(
1028 "osconnector.get_network(): Error getting subnet %s %s"
1029 % (net_id, str(e))
1030 )
1031 subnet = {"id": subnet_id, "fault": str(e)}
1032
1033 subnets.append(subnet)
1034
1035 net["subnets"] = subnets
1036 net["encapsulation"] = net.get("provider:network_type")
1037 net["encapsulation_type"] = net.get("provider:network_type")
1038 net["segmentation_id"] = net.get("provider:segmentation_id")
1039 net["encapsulation_id"] = net.get("provider:segmentation_id")
1040
1041 return net
1042
1043 def delete_network(self, net_id, created_items=None):
1044 """
1045 Removes a tenant network from VIM and its associated elements
1046 :param net_id: VIM identifier of the network, provided by method new_network
1047 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1048 Returns the network identifier or raises an exception upon error or when network is not found
1049 """
1050 self.logger.debug("Deleting network '%s' from VIM", net_id)
1051
1052 if created_items is None:
1053 created_items = {}
1054
1055 try:
1056 self._reload_connection()
1057 # delete l2gw connections (if any) before deleting the network
1058 for k, v in created_items.items():
1059 if not v: # skip already deleted
1060 continue
1061
1062 try:
1063 k_item, _, k_id = k.partition(":")
1064 if k_item == "l2gwconn":
1065 self.neutron.delete_l2_gateway_connection(k_id)
1066 except Exception as e:
1067 self.logger.error(
1068 "Error deleting l2 gateway connection: {}: {}".format(
1069 type(e).__name__, e
1070 )
1071 )
1072
1073 # delete VM ports attached to this networks before the network
1074 ports = self.neutron.list_ports(network_id=net_id)
1075 for p in ports["ports"]:
1076 try:
1077 self.neutron.delete_port(p["id"])
1078 except Exception as e:
1079 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1080
1081 self.neutron.delete_network(net_id)
1082
1083 return net_id
1084 except (
1085 neExceptions.ConnectionFailed,
1086 neExceptions.NetworkNotFoundClient,
1087 neExceptions.NeutronException,
1088 ksExceptions.ClientException,
1089 neExceptions.NeutronException,
1090 ConnectionError,
1091 ) as e:
1092 self._format_exception(e)
1093
1094 def refresh_nets_status(self, net_list):
1095 """Get the status of the networks
1096 Params: the list of network identifiers
1097 Returns a dictionary with:
1098 net_id: #VIM id of this network
1099 status: #Mandatory. Text with one of:
1100 # DELETED (not found at vim)
1101 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1102 # OTHER (Vim reported other status not understood)
1103 # ERROR (VIM indicates an ERROR status)
1104 # ACTIVE, INACTIVE, DOWN (admin down),
1105 # BUILD (on building process)
1106 #
1107 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1108 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1109 """
1110 net_dict = {}
1111
1112 for net_id in net_list:
1113 net = {}
1114
1115 try:
1116 net_vim = self.get_network(net_id)
1117
1118 if net_vim["status"] in netStatus2manoFormat:
1119 net["status"] = netStatus2manoFormat[net_vim["status"]]
1120 else:
1121 net["status"] = "OTHER"
1122 net["error_msg"] = "VIM status reported " + net_vim["status"]
1123
1124 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1125 net["status"] = "DOWN"
1126
1127 net["vim_info"] = self.serialize(net_vim)
1128
1129 if net_vim.get("fault"): # TODO
1130 net["error_msg"] = str(net_vim["fault"])
1131 except vimconn.VimConnNotFoundException as e:
1132 self.logger.error("Exception getting net status: %s", str(e))
1133 net["status"] = "DELETED"
1134 net["error_msg"] = str(e)
1135 except vimconn.VimConnException as e:
1136 self.logger.error("Exception getting net status: %s", str(e))
1137 net["status"] = "VIM_ERROR"
1138 net["error_msg"] = str(e)
1139 net_dict[net_id] = net
1140 return net_dict
1141
1142 def get_flavor(self, flavor_id):
1143 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1144 self.logger.debug("Getting flavor '%s'", flavor_id)
1145
1146 try:
1147 self._reload_connection()
1148 flavor = self.nova.flavors.find(id=flavor_id)
1149 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1150
1151 return flavor.to_dict()
1152 except (
1153 nvExceptions.NotFound,
1154 nvExceptions.ClientException,
1155 ksExceptions.ClientException,
1156 ConnectionError,
1157 ) as e:
1158 self._format_exception(e)
1159
1160 def get_flavor_id_from_data(self, flavor_dict):
1161 """Obtain flavor id that match the flavor description
1162 Returns the flavor_id or raises a vimconnNotFoundException
1163 flavor_dict: contains the required ram, vcpus, disk
1164 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1165 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1166 vimconnNotFoundException is raised
1167 """
1168 exact_match = False if self.config.get("use_existing_flavors") else True
1169
1170 try:
1171 self._reload_connection()
1172 flavor_candidate_id = None
1173 flavor_candidate_data = (10000, 10000, 10000)
1174 flavor_target = (
1175 flavor_dict["ram"],
1176 flavor_dict["vcpus"],
1177 flavor_dict["disk"],
1178 flavor_dict.get("ephemeral", 0),
1179 flavor_dict.get("swap", 0),
1180 )
1181 # numa=None
1182 extended = flavor_dict.get("extended", {})
1183 if extended:
1184 # TODO
1185 raise vimconn.VimConnNotFoundException(
1186 "Flavor with EPA still not implemented"
1187 )
1188 # if len(numas) > 1:
1189 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1190 # numa=numas[0]
1191 # numas = extended.get("numas")
1192 for flavor in self.nova.flavors.list():
1193 epa = flavor.get_keys()
1194
1195 if epa:
1196 continue
1197 # TODO
1198
1199 flavor_data = (
1200 flavor.ram,
1201 flavor.vcpus,
1202 flavor.disk,
1203 flavor.ephemeral,
1204 flavor.swap if isinstance(flavor.swap, int) else 0,
1205 )
1206 if flavor_data == flavor_target:
1207 return flavor.id
1208 elif (
1209 not exact_match
1210 and flavor_target < flavor_data < flavor_candidate_data
1211 ):
1212 flavor_candidate_id = flavor.id
1213 flavor_candidate_data = flavor_data
1214
1215 if not exact_match and flavor_candidate_id:
1216 return flavor_candidate_id
1217
1218 raise vimconn.VimConnNotFoundException(
1219 "Cannot find any flavor matching '{}'".format(flavor_dict)
1220 )
1221 except (
1222 nvExceptions.NotFound,
1223 nvExceptions.ClientException,
1224 ksExceptions.ClientException,
1225 ConnectionError,
1226 ) as e:
1227 self._format_exception(e)
1228
1229 def process_resource_quota(self, quota, prefix, extra_specs):
1230 """
1231 :param prefix:
1232 :param extra_specs:
1233 :return:
1234 """
1235 if "limit" in quota:
1236 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1237
1238 if "reserve" in quota:
1239 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1240
1241 if "shares" in quota:
1242 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1243 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1244
1245 def new_flavor(self, flavor_data, change_name_if_used=True):
1246 """Adds a tenant flavor to openstack VIM
1247 if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1248 repetition
1249 Returns the flavor identifier
1250 """
1251 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1252 retry = 0
1253 max_retries = 3
1254 name_suffix = 0
1255
1256 try:
1257 name = flavor_data["name"]
1258 while retry < max_retries:
1259 retry += 1
1260 try:
1261 self._reload_connection()
1262
1263 if change_name_if_used:
1264 # get used names
1265 fl_names = []
1266 fl = self.nova.flavors.list()
1267
1268 for f in fl:
1269 fl_names.append(f.name)
1270
1271 while name in fl_names:
1272 name_suffix += 1
1273 name = flavor_data["name"] + "-" + str(name_suffix)
1274
1275 ram = flavor_data.get("ram", 64)
1276 vcpus = flavor_data.get("vcpus", 1)
1277 extra_specs = {}
1278
1279 extended = flavor_data.get("extended")
1280 if extended:
1281 numas = extended.get("numas")
1282
1283 if numas:
1284 numa_nodes = len(numas)
1285
1286 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1287
1288 if self.vim_type == "VIO":
1289 extra_specs[
1290 "vmware:extra_config"
1291 ] = '{"numa.nodeAffinity":"0"}'
1292 extra_specs["vmware:latency_sensitivity_level"] = "high"
1293
1294 for numa in numas:
1295 if "id" in numa:
1296 node_id = numa["id"]
1297
1298 if "memory" in numa:
1299 memory_mb = numa["memory"] * 1024
1300 memory = "hw:numa_mem.{}".format(node_id)
1301 extra_specs[memory] = int(memory_mb)
1302
1303 if "vcpu" in numa:
1304 vcpu = numa["vcpu"]
1305 cpu = "hw:numa_cpus.{}".format(node_id)
1306 vcpu = ",".join(map(str, vcpu))
1307 extra_specs[cpu] = vcpu
1308
1309 # overwrite ram and vcpus
1310 # check if key "memory" is present in numa else use ram value at flavor
1311 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1312 # implemented/virt-driver-cpu-thread-pinning.html
1313 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1314
1315 if "paired-threads" in numa:
1316 vcpus = numa["paired-threads"] * 2
1317 # cpu_thread_policy "require" implies that the compute node must have an
1318 # STM architecture
1319 extra_specs["hw:cpu_thread_policy"] = "require"
1320 extra_specs["hw:cpu_policy"] = "dedicated"
1321 elif "cores" in numa:
1322 vcpus = numa["cores"]
1323 # cpu_thread_policy "prefer" implies that the host must not have an SMT
1324 # architecture, or a non-SMT architecture will be emulated
1325 extra_specs["hw:cpu_thread_policy"] = "isolate"
1326 extra_specs["hw:cpu_policy"] = "dedicated"
1327 elif "threads" in numa:
1328 vcpus = numa["threads"]
1329 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1330 # architecture
1331 extra_specs["hw:cpu_thread_policy"] = "prefer"
1332 extra_specs["hw:cpu_policy"] = "dedicated"
1333 # for interface in numa.get("interfaces",() ):
1334 # if interface["dedicated"]=="yes":
1335 # raise vimconn.VimConnException("Passthrough interfaces are not supported
1336 # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1337 # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1338 # when a way to connect it is available
1339 elif extended.get("cpu-quota"):
1340 self.process_resource_quota(
1341 extended.get("cpu-quota"), "cpu", extra_specs
1342 )
1343
1344 if extended.get("mem-quota"):
1345 self.process_resource_quota(
1346 extended.get("mem-quota"), "memory", extra_specs
1347 )
1348
1349 if extended.get("vif-quota"):
1350 self.process_resource_quota(
1351 extended.get("vif-quota"), "vif", extra_specs
1352 )
1353
1354 if extended.get("disk-io-quota"):
1355 self.process_resource_quota(
1356 extended.get("disk-io-quota"), "disk_io", extra_specs
1357 )
1358
1359 # Set the mempage size as specified in the descriptor
1360 if extended.get("mempage-size"):
1361 if extended.get("mempage-size") == "LARGE":
1362 extra_specs["hw:mem_page_size"] = "large"
1363 elif extended.get("mempage-size") == "SMALL":
1364 extra_specs["hw:mem_page_size"] = "small"
1365 elif extended.get("mempage-size") == "SIZE_2MB":
1366 extra_specs["hw:mem_page_size"] = "2MB"
1367 elif extended.get("mempage-size") == "SIZE_1GB":
1368 extra_specs["hw:mem_page_size"] = "1GB"
1369 elif extended.get("mempage-size") == "PREFER_LARGE":
1370 extra_specs["hw:mem_page_size"] = "any"
1371 else:
1372 # The validations in NBI should make reaching here not possible.
1373 # If this message is shown, check validations
1374 self.logger.debug(
1375 "Invalid mempage-size %s. Will be ignored",
1376 extended.get("mempage-size"),
1377 )
1378 if extended.get("cpu-pinning-policy"):
1379 extra_specs["hw:cpu_policy"] = extended.get(
1380 "cpu-pinning-policy"
1381 ).lower()
1382
1383 # Set the cpu thread pinning policy as specified in the descriptor
1384 if extended.get("cpu-thread-pinning-policy"):
1385 extra_specs["hw:cpu_thread_policy"] = extended.get(
1386 "cpu-thread-pinning-policy"
1387 ).lower()
1388
1389 # Set the mem policy as specified in the descriptor
1390 if extended.get("mem-policy"):
1391 extra_specs["hw:numa_mempolicy"] = extended.get(
1392 "mem-policy"
1393 ).lower()
1394
1395 # create flavor
1396 new_flavor = self.nova.flavors.create(
1397 name=name,
1398 ram=ram,
1399 vcpus=vcpus,
1400 disk=flavor_data.get("disk", 0),
1401 ephemeral=flavor_data.get("ephemeral", 0),
1402 swap=flavor_data.get("swap", 0),
1403 is_public=flavor_data.get("is_public", True),
1404 )
1405 # add metadata
1406 if extra_specs:
1407 new_flavor.set_keys(extra_specs)
1408
1409 return new_flavor.id
1410 except nvExceptions.Conflict as e:
1411 if change_name_if_used and retry < max_retries:
1412 continue
1413
1414 self._format_exception(e)
1415 # except nvExceptions.BadRequest as e:
1416 except (
1417 ksExceptions.ClientException,
1418 nvExceptions.ClientException,
1419 ConnectionError,
1420 KeyError,
1421 ) as e:
1422 self._format_exception(e)
1423
1424 def delete_flavor(self, flavor_id):
1425 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1426 try:
1427 self._reload_connection()
1428 self.nova.flavors.delete(flavor_id)
1429
1430 return flavor_id
1431 # except nvExceptions.BadRequest as e:
1432 except (
1433 nvExceptions.NotFound,
1434 ksExceptions.ClientException,
1435 nvExceptions.ClientException,
1436 ConnectionError,
1437 ) as e:
1438 self._format_exception(e)
1439
1440 def new_image(self, image_dict):
1441 """
1442 Adds a tenant image to VIM. imge_dict is a dictionary with:
1443 name: name
1444 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1445 location: path or URI
1446 public: "yes" or "no"
1447 metadata: metadata of the image
1448 Returns the image_id
1449 """
1450 retry = 0
1451 max_retries = 3
1452
1453 while retry < max_retries:
1454 retry += 1
1455 try:
1456 self._reload_connection()
1457
1458 # determine format http://docs.openstack.org/developer/glance/formats.html
1459 if "disk_format" in image_dict:
1460 disk_format = image_dict["disk_format"]
1461 else: # autodiscover based on extension
1462 if image_dict["location"].endswith(".qcow2"):
1463 disk_format = "qcow2"
1464 elif image_dict["location"].endswith(".vhd"):
1465 disk_format = "vhd"
1466 elif image_dict["location"].endswith(".vmdk"):
1467 disk_format = "vmdk"
1468 elif image_dict["location"].endswith(".vdi"):
1469 disk_format = "vdi"
1470 elif image_dict["location"].endswith(".iso"):
1471 disk_format = "iso"
1472 elif image_dict["location"].endswith(".aki"):
1473 disk_format = "aki"
1474 elif image_dict["location"].endswith(".ari"):
1475 disk_format = "ari"
1476 elif image_dict["location"].endswith(".ami"):
1477 disk_format = "ami"
1478 else:
1479 disk_format = "raw"
1480
1481 self.logger.debug(
1482 "new_image: '%s' loading from '%s'",
1483 image_dict["name"],
1484 image_dict["location"],
1485 )
1486 if self.vim_type == "VIO":
1487 container_format = "bare"
1488 if "container_format" in image_dict:
1489 container_format = image_dict["container_format"]
1490
1491 new_image = self.glance.images.create(
1492 name=image_dict["name"],
1493 container_format=container_format,
1494 disk_format=disk_format,
1495 )
1496 else:
1497 new_image = self.glance.images.create(name=image_dict["name"])
1498
1499 if image_dict["location"].startswith("http"):
1500 # TODO there is not a method to direct download. It must be downloaded locally with requests
1501 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1502 else: # local path
1503 with open(image_dict["location"]) as fimage:
1504 self.glance.images.upload(new_image.id, fimage)
1505 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1506 # image_dict.get("public","yes")=="yes",
1507 # container_format="bare", data=fimage, disk_format=disk_format)
1508
1509 metadata_to_load = image_dict.get("metadata")
1510
1511 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1512 # for openstack
1513 if self.vim_type == "VIO":
1514 metadata_to_load["upload_location"] = image_dict["location"]
1515 else:
1516 metadata_to_load["location"] = image_dict["location"]
1517
1518 self.glance.images.update(new_image.id, **metadata_to_load)
1519
1520 return new_image.id
1521 except (
1522 nvExceptions.Conflict,
1523 ksExceptions.ClientException,
1524 nvExceptions.ClientException,
1525 ) as e:
1526 self._format_exception(e)
1527 except (
1528 HTTPException,
1529 gl1Exceptions.HTTPException,
1530 gl1Exceptions.CommunicationError,
1531 ConnectionError,
1532 ) as e:
1533 if retry == max_retries:
1534 continue
1535
1536 self._format_exception(e)
1537 except IOError as e: # can not open the file
1538 raise vimconn.VimConnConnectionException(
1539 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1540 http_code=vimconn.HTTP_Bad_Request,
1541 )
1542
1543 def delete_image(self, image_id):
1544 """Deletes a tenant image from openstack VIM. Returns the old id"""
1545 try:
1546 self._reload_connection()
1547 self.glance.images.delete(image_id)
1548
1549 return image_id
1550 except (
1551 nvExceptions.NotFound,
1552 ksExceptions.ClientException,
1553 nvExceptions.ClientException,
1554 gl1Exceptions.CommunicationError,
1555 gl1Exceptions.HTTPNotFound,
1556 ConnectionError,
1557 ) as e: # TODO remove
1558 self._format_exception(e)
1559
1560 def get_image_id_from_path(self, path):
1561 """Get the image id from image path in the VIM database. Returns the image_id"""
1562 try:
1563 self._reload_connection()
1564 images = self.glance.images.list()
1565
1566 for image in images:
1567 if image.metadata.get("location") == path:
1568 return image.id
1569
1570 raise vimconn.VimConnNotFoundException(
1571 "image with location '{}' not found".format(path)
1572 )
1573 except (
1574 ksExceptions.ClientException,
1575 nvExceptions.ClientException,
1576 gl1Exceptions.CommunicationError,
1577 ConnectionError,
1578 ) as e:
1579 self._format_exception(e)
1580
1581 def get_image_list(self, filter_dict={}):
1582 """Obtain tenant images from VIM
1583 Filter_dict can be:
1584 id: image id
1585 name: image name
1586 checksum: image checksum
1587 Returns the image list of dictionaries:
1588 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1589 List can be empty
1590 """
1591 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1592
1593 try:
1594 self._reload_connection()
1595 # filter_dict_os = filter_dict.copy()
1596 # First we filter by the available filter fields: name, id. The others are removed.
1597 image_list = self.glance.images.list()
1598 filtered_list = []
1599
1600 for image in image_list:
1601 try:
1602 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1603 continue
1604
1605 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1606 continue
1607
1608 if (
1609 filter_dict.get("checksum")
1610 and image["checksum"] != filter_dict["checksum"]
1611 ):
1612 continue
1613
1614 filtered_list.append(image.copy())
1615 except gl1Exceptions.HTTPNotFound:
1616 pass
1617
1618 return filtered_list
1619 except (
1620 ksExceptions.ClientException,
1621 nvExceptions.ClientException,
1622 gl1Exceptions.CommunicationError,
1623 ConnectionError,
1624 ) as e:
1625 self._format_exception(e)
1626
1627 def __wait_for_vm(self, vm_id, status):
1628 """wait until vm is in the desired status and return True.
1629 If the VM gets in ERROR status, return false.
1630 If the timeout is reached generate an exception"""
1631 elapsed_time = 0
1632 while elapsed_time < server_timeout:
1633 vm_status = self.nova.servers.get(vm_id).status
1634
1635 if vm_status == status:
1636 return True
1637
1638 if vm_status == "ERROR":
1639 return False
1640
1641 time.sleep(5)
1642 elapsed_time += 5
1643
1644 # if we exceeded the timeout rollback
1645 if elapsed_time >= server_timeout:
1646 raise vimconn.VimConnException(
1647 "Timeout waiting for instance " + vm_id + " to get " + status,
1648 http_code=vimconn.HTTP_Request_Timeout,
1649 )
1650
1651 def _get_openstack_availablity_zones(self):
1652 """
1653 Get from openstack availability zones available
1654 :return:
1655 """
1656 try:
1657 openstack_availability_zone = self.nova.availability_zones.list()
1658 openstack_availability_zone = [
1659 str(zone.zoneName)
1660 for zone in openstack_availability_zone
1661 if zone.zoneName != "internal"
1662 ]
1663
1664 return openstack_availability_zone
1665 except Exception:
1666 return None
1667
1668 def _set_availablity_zones(self):
1669 """
1670 Set vim availablity zone
1671 :return:
1672 """
1673 if "availability_zone" in self.config:
1674 vim_availability_zones = self.config.get("availability_zone")
1675
1676 if isinstance(vim_availability_zones, str):
1677 self.availability_zone = [vim_availability_zones]
1678 elif isinstance(vim_availability_zones, list):
1679 self.availability_zone = vim_availability_zones
1680 else:
1681 self.availability_zone = self._get_openstack_availablity_zones()
1682
1683 def _get_vm_availability_zone(
1684 self, availability_zone_index, availability_zone_list
1685 ):
1686 """
1687 Return thge availability zone to be used by the created VM.
1688 :return: The VIM availability zone to be used or None
1689 """
1690 if availability_zone_index is None:
1691 if not self.config.get("availability_zone"):
1692 return None
1693 elif isinstance(self.config.get("availability_zone"), str):
1694 return self.config["availability_zone"]
1695 else:
1696 # TODO consider using a different parameter at config for default AV and AV list match
1697 return self.config["availability_zone"][0]
1698
1699 vim_availability_zones = self.availability_zone
1700 # check if VIM offer enough availability zones describe in the VNFD
1701 if vim_availability_zones and len(availability_zone_list) <= len(
1702 vim_availability_zones
1703 ):
1704 # check if all the names of NFV AV match VIM AV names
1705 match_by_index = False
1706 for av in availability_zone_list:
1707 if av not in vim_availability_zones:
1708 match_by_index = True
1709 break
1710
1711 if match_by_index:
1712 return vim_availability_zones[availability_zone_index]
1713 else:
1714 return availability_zone_list[availability_zone_index]
1715 else:
1716 raise vimconn.VimConnConflictException(
1717 "No enough availability zones at VIM for this deployment"
1718 )
1719
1720 def new_vminstance(
1721 self,
1722 name,
1723 description,
1724 start,
1725 image_id,
1726 flavor_id,
1727 affinity_group_list,
1728 net_list,
1729 cloud_config=None,
1730 disk_list=None,
1731 availability_zone_index=None,
1732 availability_zone_list=None,
1733 ):
1734 """Adds a VM instance to VIM
1735 Params:
1736 start: indicates if VM must start or boot in pause mode. Ignored
1737 image_id,flavor_id: image and flavor uuid
1738 affinity_group_list: list of affinity groups, each one is a dictionary.
1739 Ignore if empty.
1740 net_list: list of interfaces, each one is a dictionary with:
1741 name:
1742 net_id: network uuid to connect
1743 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
1744 model: interface model, ignored #TODO
1745 mac_address: used for SR-IOV ifaces #TODO for other types
1746 use: 'data', 'bridge', 'mgmt'
1747 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
1748 vim_id: filled/added by this function
1749 floating_ip: True/False (or it can be None)
1750 port_security: True/False
1751 'cloud_config': (optional) dictionary with:
1752 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1753 'users': (optional) list of users to be inserted, each item is a dict with:
1754 'name': (mandatory) user name,
1755 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1756 'user-data': (optional) string is a text script to be passed directly to cloud-init
1757 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1758 'dest': (mandatory) string with the destination absolute path
1759 'encoding': (optional, by default text). Can be one of:
1760 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1761 'content' (mandatory): string with the content of the file
1762 'permissions': (optional) string with file permissions, typically octal notation '0644'
1763 'owner': (optional) file owner, string with the format 'owner:group'
1764 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1765 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1766 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1767 'size': (mandatory) string with the size of the disk in GB
1768 'vim_id' (optional) should use this existing volume id
1769 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1770 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1771 availability_zone_index is None
1772 #TODO ip, security groups
1773 Returns a tuple with the instance identifier and created_items or raises an exception on error
1774 created_items can be None or a dictionary where this method can include key-values that will be passed to
1775 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1776 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1777 as not present.
1778 """
1779 self.logger.debug(
1780 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
1781 image_id,
1782 flavor_id,
1783 str(net_list),
1784 )
1785
1786 try:
1787 server = None
1788 created_items = {}
1789 # metadata = {}
1790 net_list_vim = []
1791 external_network = []
1792 # ^list of external networks to be connected to instance, later on used to create floating_ip
1793 no_secured_ports = [] # List of port-is with port-security disabled
1794 self._reload_connection()
1795 # metadata_vpci = {} # For a specific neutron plugin
1796 block_device_mapping = None
1797
1798 for net in net_list:
1799 if not net.get("net_id"): # skip non connected iface
1800 continue
1801
1802 port_dict = {
1803 "network_id": net["net_id"],
1804 "name": net.get("name"),
1805 "admin_state_up": True,
1806 }
1807
1808 if (
1809 self.config.get("security_groups")
1810 and net.get("port_security") is not False
1811 and not self.config.get("no_port_security_extension")
1812 ):
1813 if not self.security_groups_id:
1814 self._get_ids_from_name()
1815
1816 port_dict["security_groups"] = self.security_groups_id
1817
1818 if net["type"] == "virtual":
1819 pass
1820 # if "vpci" in net:
1821 # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
1822 elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF
1823 # if "vpci" in net:
1824 # if "VF" not in metadata_vpci:
1825 # metadata_vpci["VF"]=[]
1826 # metadata_vpci["VF"].append([ net["vpci"], "" ])
1827 port_dict["binding:vnic_type"] = "direct"
1828
1829 # VIO specific Changes
1830 if self.vim_type == "VIO":
1831 # Need to create port with port_security_enabled = False and no-security-groups
1832 port_dict["port_security_enabled"] = False
1833 port_dict["provider_security_groups"] = []
1834 port_dict["security_groups"] = []
1835 else: # For PT PCI-PASSTHROUGH
1836 # if "vpci" in net:
1837 # if "PF" not in metadata_vpci:
1838 # metadata_vpci["PF"]=[]
1839 # metadata_vpci["PF"].append([ net["vpci"], "" ])
1840 port_dict["binding:vnic_type"] = "direct-physical"
1841
1842 if not port_dict["name"]:
1843 port_dict["name"] = name
1844
1845 if net.get("mac_address"):
1846 port_dict["mac_address"] = net["mac_address"]
1847
1848 if net.get("ip_address"):
1849 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1850 # TODO add "subnet_id": <subnet_id>
1851
1852 new_port = self.neutron.create_port({"port": port_dict})
1853 created_items["port:" + str(new_port["port"]["id"])] = True
1854 net["mac_adress"] = new_port["port"]["mac_address"]
1855 net["vim_id"] = new_port["port"]["id"]
1856 # if try to use a network without subnetwork, it will return a emtpy list
1857 fixed_ips = new_port["port"].get("fixed_ips")
1858
1859 if fixed_ips:
1860 net["ip"] = fixed_ips[0].get("ip_address")
1861 else:
1862 net["ip"] = None
1863
1864 port = {"port-id": new_port["port"]["id"]}
1865 if float(self.nova.api_version.get_string()) >= 2.32:
1866 port["tag"] = new_port["port"]["name"]
1867
1868 net_list_vim.append(port)
1869
1870 if net.get("floating_ip", False):
1871 net["exit_on_floating_ip_error"] = True
1872 external_network.append(net)
1873 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
1874 net["exit_on_floating_ip_error"] = False
1875 external_network.append(net)
1876 net["floating_ip"] = self.config.get("use_floating_ip")
1877
1878 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1879 # is dropped.
1880 # As a workaround we wait until the VM is active and then disable the port-security
1881 if net.get("port_security") is False and not self.config.get(
1882 "no_port_security_extension"
1883 ):
1884 no_secured_ports.append(
1885 (
1886 new_port["port"]["id"],
1887 net.get("port_security_disable_strategy"),
1888 )
1889 )
1890
1891 # if metadata_vpci:
1892 # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
1893 # if len(metadata["pci_assignement"]) >255:
1894 # #limit the metadata size
1895 # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
1896 # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
1897 # metadata = {}
1898
1899 self.logger.debug(
1900 "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
1901 name,
1902 image_id,
1903 flavor_id,
1904 str(net_list_vim),
1905 description,
1906 )
1907
1908 # cloud config
1909 config_drive, userdata = self._create_user_data(cloud_config)
1910
1911 # get availability Zone
1912 vm_av_zone = self._get_vm_availability_zone(
1913 availability_zone_index, availability_zone_list
1914 )
1915
1916 # Create additional volumes in case these are present in disk_list
1917 existing_vim_volumes = []
1918 base_disk_index = ord("b")
1919 boot_volume_id = None
1920 if disk_list:
1921 block_device_mapping = {}
1922 for disk in disk_list:
1923 if "image_id" in disk:
1924 # persistent root volume
1925 base_disk_index = ord("a")
1926 image_id = ""
1927 # use existing persistent root volume
1928 if disk.get("vim_volume_id"):
1929 block_device_mapping["vd" + chr(base_disk_index)] = disk[
1930 "vim_volume_id"
1931 ]
1932 existing_vim_volumes.append({"id": disk["vim_volume_id"]})
1933 # use existing persistent root volume
1934 elif disk.get("vim_id"):
1935 block_device_mapping["vd" + chr(base_disk_index)] = disk[
1936 "vim_id"
1937 ]
1938 existing_vim_volumes.append({"id": disk["vim_id"]})
1939 else:
1940 # create persistent root volume
1941 volume = self.cinder.volumes.create(
1942 size=disk["size"],
1943 name=name + "vd" + chr(base_disk_index),
1944 imageRef=disk["image_id"],
1945 # Make sure volume is in the same AZ as the VM to be attached to
1946 availability_zone=vm_av_zone,
1947 )
1948 boot_volume_id = volume.id
1949 created_items["volume:" + str(volume.id)] = True
1950 block_device_mapping[
1951 "vd" + chr(base_disk_index)
1952 ] = volume.id
1953 else:
1954 # non-root persistent volume
1955 key_id = (
1956 "vim_volume_id"
1957 if "vim_volume_id" in disk.keys()
1958 else "vim_id"
1959 )
1960 if disk.get(key_id):
1961 # use existing persistent volume
1962 block_device_mapping["vd" + chr(base_disk_index)] = disk[
1963 key_id
1964 ]
1965 existing_vim_volumes.append({"id": disk[key_id]})
1966 else:
1967 # create persistent volume
1968 volume = self.cinder.volumes.create(
1969 size=disk["size"],
1970 name=name + "vd" + chr(base_disk_index),
1971 # Make sure volume is in the same AZ as the VM to be attached to
1972 availability_zone=vm_av_zone,
1973 )
1974 created_items["volume:" + str(volume.id)] = True
1975 block_device_mapping[
1976 "vd" + chr(base_disk_index)
1977 ] = volume.id
1978
1979 base_disk_index += 1
1980
1981 # Wait until created volumes are with status available
1982 elapsed_time = 0
1983 while elapsed_time < volume_timeout:
1984 for created_item in created_items:
1985 v, _, volume_id = created_item.partition(":")
1986 if v == "volume":
1987 if self.cinder.volumes.get(volume_id).status != "available":
1988 break
1989 else: # all ready: break from while
1990 break
1991
1992 time.sleep(5)
1993 elapsed_time += 5
1994
1995 # Wait until existing volumes in vim are with status available
1996 while elapsed_time < volume_timeout:
1997 for volume in existing_vim_volumes:
1998 if self.cinder.volumes.get(volume["id"]).status != "available":
1999 break
2000 else: # all ready: break from while
2001 break
2002
2003 time.sleep(5)
2004 elapsed_time += 5
2005
2006 # If we exceeded the timeout rollback
2007 if elapsed_time >= volume_timeout:
2008 raise vimconn.VimConnException(
2009 "Timeout creating volumes for instance " + name,
2010 http_code=vimconn.HTTP_Request_Timeout,
2011 )
2012 if boot_volume_id:
2013 self.cinder.volumes.set_bootable(boot_volume_id, True)
2014
2015 # Manage affinity groups/server groups
2016 server_group_id = None
2017 scheduller_hints = {}
2018
2019 if affinity_group_list:
2020 # Only first id on the list will be used. Openstack restriction
2021 server_group_id = affinity_group_list[0]["affinity_group_id"]
2022 scheduller_hints["group"] = server_group_id
2023
2024 self.logger.debug(
2025 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2026 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2027 "block_device_mapping={}, server_group={})".format(
2028 name,
2029 image_id,
2030 flavor_id,
2031 net_list_vim,
2032 self.config.get("security_groups"),
2033 vm_av_zone,
2034 self.config.get("keypair"),
2035 userdata,
2036 config_drive,
2037 block_device_mapping,
2038 server_group_id,
2039 )
2040 )
2041 server = self.nova.servers.create(
2042 name=name,
2043 image=image_id,
2044 flavor=flavor_id,
2045 nics=net_list_vim,
2046 security_groups=self.config.get("security_groups"),
2047 # TODO remove security_groups in future versions. Already at neutron port
2048 availability_zone=vm_av_zone,
2049 key_name=self.config.get("keypair"),
2050 userdata=userdata,
2051 config_drive=config_drive,
2052 block_device_mapping=block_device_mapping,
2053 scheduler_hints=scheduller_hints,
2054 ) # , description=description)
2055
2056 vm_start_time = time.time()
2057 # Previously mentioned workaround to wait until the VM is active and then disable the port-security
2058 if no_secured_ports:
2059 self.__wait_for_vm(server.id, "ACTIVE")
2060
2061 for port in no_secured_ports:
2062 port_update = {
2063 "port": {"port_security_enabled": False, "security_groups": None}
2064 }
2065
2066 if port[1] == "allow-address-pairs":
2067 port_update = {
2068 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2069 }
2070
2071 try:
2072 self.neutron.update_port(port[0], port_update)
2073 except Exception:
2074 raise vimconn.VimConnException(
2075 "It was not possible to disable port security for port {}".format(
2076 port[0]
2077 )
2078 )
2079
2080 # print "DONE :-)", server
2081
2082 # pool_id = None
2083 for floating_network in external_network:
2084 try:
2085 assigned = False
2086 floating_ip_retries = 3
2087 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2088 # several times
2089 while not assigned:
2090 floating_ips = self.neutron.list_floatingips().get(
2091 "floatingips", ()
2092 )
2093 random.shuffle(floating_ips) # randomize
2094 for fip in floating_ips:
2095 if (
2096 fip.get("port_id")
2097 or fip.get("tenant_id") != server.tenant_id
2098 ):
2099 continue
2100
2101 if isinstance(floating_network["floating_ip"], str):
2102 if (
2103 fip.get("floating_network_id")
2104 != floating_network["floating_ip"]
2105 ):
2106 continue
2107
2108 free_floating_ip = fip["id"]
2109 break
2110 else:
2111 if (
2112 isinstance(floating_network["floating_ip"], str)
2113 and floating_network["floating_ip"].lower() != "true"
2114 ):
2115 pool_id = floating_network["floating_ip"]
2116 else:
2117 # Find the external network
2118 external_nets = list()
2119
2120 for net in self.neutron.list_networks()["networks"]:
2121 if net["router:external"]:
2122 external_nets.append(net)
2123
2124 if len(external_nets) == 0:
2125 raise vimconn.VimConnException(
2126 "Cannot create floating_ip automatically since "
2127 "no external network is present",
2128 http_code=vimconn.HTTP_Conflict,
2129 )
2130
2131 if len(external_nets) > 1:
2132 raise vimconn.VimConnException(
2133 "Cannot create floating_ip automatically since "
2134 "multiple external networks are present",
2135 http_code=vimconn.HTTP_Conflict,
2136 )
2137
2138 pool_id = external_nets[0].get("id")
2139
2140 param = {
2141 "floatingip": {
2142 "floating_network_id": pool_id,
2143 "tenant_id": server.tenant_id,
2144 }
2145 }
2146
2147 try:
2148 # self.logger.debug("Creating floating IP")
2149 new_floating_ip = self.neutron.create_floatingip(param)
2150 free_floating_ip = new_floating_ip["floatingip"]["id"]
2151 created_items[
2152 "floating_ip:" + str(free_floating_ip)
2153 ] = True
2154 except Exception as e:
2155 raise vimconn.VimConnException(
2156 type(e).__name__
2157 + ": Cannot create new floating_ip "
2158 + str(e),
2159 http_code=vimconn.HTTP_Conflict,
2160 )
2161
2162 try:
2163 # for race condition ensure not already assigned
2164 fip = self.neutron.show_floatingip(free_floating_ip)
2165
2166 if fip["floatingip"]["port_id"]:
2167 continue
2168
2169 # the vim_id key contains the neutron.port_id
2170 self.neutron.update_floatingip(
2171 free_floating_ip,
2172 {"floatingip": {"port_id": floating_network["vim_id"]}},
2173 )
2174 # for race condition ensure not re-assigned to other VM after 5 seconds
2175 time.sleep(5)
2176 fip = self.neutron.show_floatingip(free_floating_ip)
2177
2178 if (
2179 fip["floatingip"]["port_id"]
2180 != floating_network["vim_id"]
2181 ):
2182 self.logger.error(
2183 "floating_ip {} re-assigned to other port".format(
2184 free_floating_ip
2185 )
2186 )
2187 continue
2188
2189 self.logger.debug(
2190 "Assigned floating_ip {} to VM {}".format(
2191 free_floating_ip, server.id
2192 )
2193 )
2194 assigned = True
2195 except Exception as e:
2196 # openstack need some time after VM creation to assign an IP. So retry if fails
2197 vm_status = self.nova.servers.get(server.id).status
2198
2199 if vm_status not in ("ACTIVE", "ERROR"):
2200 if time.time() - vm_start_time < server_timeout:
2201 time.sleep(5)
2202 continue
2203 elif floating_ip_retries > 0:
2204 floating_ip_retries -= 1
2205 continue
2206
2207 raise vimconn.VimConnException(
2208 "Cannot create floating_ip: {} {}".format(
2209 type(e).__name__, e
2210 ),
2211 http_code=vimconn.HTTP_Conflict,
2212 )
2213
2214 except Exception as e:
2215 if not floating_network["exit_on_floating_ip_error"]:
2216 self.logger.error("Cannot create floating_ip. %s", str(e))
2217 continue
2218
2219 raise
2220
2221 return server.id, created_items
2222 # except nvExceptions.NotFound as e:
2223 # error_value=-vimconn.HTTP_Not_Found
2224 # error_text= "vm instance %s not found" % vm_id
2225 # except TypeError as e:
2226 # raise vimconn.VimConnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
2227
2228 except Exception as e:
2229 server_id = None
2230 if server:
2231 server_id = server.id
2232
2233 try:
2234 self.delete_vminstance(server_id, created_items)
2235 except Exception as e2:
2236 self.logger.error("new_vminstance rollback fail {}".format(e2))
2237
2238 self._format_exception(e)
2239
2240 def get_vminstance(self, vm_id):
2241 """Returns the VM instance information from VIM"""
2242 # self.logger.debug("Getting VM from VIM")
2243 try:
2244 self._reload_connection()
2245 server = self.nova.servers.find(id=vm_id)
2246 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2247
2248 return server.to_dict()
2249 except (
2250 ksExceptions.ClientException,
2251 nvExceptions.ClientException,
2252 nvExceptions.NotFound,
2253 ConnectionError,
2254 ) as e:
2255 self._format_exception(e)
2256
2257 def get_vminstance_console(self, vm_id, console_type="vnc"):
2258 """
2259 Get a console for the virtual machine
2260 Params:
2261 vm_id: uuid of the VM
2262 console_type, can be:
2263 "novnc" (by default), "xvpvnc" for VNC types,
2264 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2265 Returns dict with the console parameters:
2266 protocol: ssh, ftp, http, https, ...
2267 server: usually ip address
2268 port: the http, ssh, ... port
2269 suffix: extra text, e.g. the http path and query string
2270 """
2271 self.logger.debug("Getting VM CONSOLE from VIM")
2272
2273 try:
2274 self._reload_connection()
2275 server = self.nova.servers.find(id=vm_id)
2276
2277 if console_type is None or console_type == "novnc":
2278 console_dict = server.get_vnc_console("novnc")
2279 elif console_type == "xvpvnc":
2280 console_dict = server.get_vnc_console(console_type)
2281 elif console_type == "rdp-html5":
2282 console_dict = server.get_rdp_console(console_type)
2283 elif console_type == "spice-html5":
2284 console_dict = server.get_spice_console(console_type)
2285 else:
2286 raise vimconn.VimConnException(
2287 "console type '{}' not allowed".format(console_type),
2288 http_code=vimconn.HTTP_Bad_Request,
2289 )
2290
2291 console_dict1 = console_dict.get("console")
2292
2293 if console_dict1:
2294 console_url = console_dict1.get("url")
2295
2296 if console_url:
2297 # parse console_url
2298 protocol_index = console_url.find("//")
2299 suffix_index = (
2300 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2301 )
2302 port_index = (
2303 console_url[protocol_index + 2 : suffix_index].find(":")
2304 + protocol_index
2305 + 2
2306 )
2307
2308 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2309 return (
2310 -vimconn.HTTP_Internal_Server_Error,
2311 "Unexpected response from VIM",
2312 )
2313
2314 console_dict = {
2315 "protocol": console_url[0:protocol_index],
2316 "server": console_url[protocol_index + 2 : port_index],
2317 "port": console_url[port_index:suffix_index],
2318 "suffix": console_url[suffix_index + 1 :],
2319 }
2320 protocol_index += 2
2321
2322 return console_dict
2323 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2324 except (
2325 nvExceptions.NotFound,
2326 ksExceptions.ClientException,
2327 nvExceptions.ClientException,
2328 nvExceptions.BadRequest,
2329 ConnectionError,
2330 ) as e:
2331 self._format_exception(e)
2332
2333 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
2334 """Removes a VM instance from VIM. Returns the old identifier"""
2335 # print "osconnector: Getting VM from VIM"
2336 if created_items is None:
2337 created_items = {}
2338
2339 try:
2340 self._reload_connection()
2341 # delete VM ports attached to this networks before the virtual machine
2342 for k, v in created_items.items():
2343 if not v: # skip already deleted
2344 continue
2345
2346 try:
2347 k_item, _, k_id = k.partition(":")
2348 if k_item == "port":
2349 port_dict = self.neutron.list_ports()
2350 existing_ports = [
2351 port["id"] for port in port_dict["ports"] if port_dict
2352 ]
2353 if k_id in existing_ports:
2354 self.neutron.delete_port(k_id)
2355 except Exception as e:
2356 self.logger.error(
2357 "Error deleting port: {}: {}".format(type(e).__name__, e)
2358 )
2359
2360 # #commented because detaching the volumes makes the servers.delete not work properly ?!?
2361 # #dettach volumes attached
2362 # server = self.nova.servers.get(vm_id)
2363 # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
2364 # #for volume in volumes_attached_dict:
2365 # # self.cinder.volumes.detach(volume["id"])
2366
2367 if vm_id:
2368 self.nova.servers.delete(vm_id)
2369
2370 # delete volumes. Although having detached, they should have in active status before deleting
2371 # we ensure in this loop
2372 keep_waiting = True
2373 elapsed_time = 0
2374
2375 while keep_waiting and elapsed_time < volume_timeout:
2376 keep_waiting = False
2377
2378 for k, v in created_items.items():
2379 if not v: # skip already deleted
2380 continue
2381
2382 try:
2383 k_item, _, k_id = k.partition(":")
2384 if k_item == "volume":
2385 if self.cinder.volumes.get(k_id).status != "available":
2386 keep_waiting = True
2387 else:
2388 if k_id not in volumes_to_hold:
2389 self.cinder.volumes.delete(k_id)
2390 created_items[k] = None
2391 elif k_item == "floating_ip": # floating ip
2392 self.neutron.delete_floatingip(k_id)
2393 created_items[k] = None
2394
2395 except Exception as e:
2396 self.logger.error("Error deleting {}: {}".format(k, e))
2397
2398 if keep_waiting:
2399 time.sleep(1)
2400 elapsed_time += 1
2401
2402 return None
2403 except (
2404 nvExceptions.NotFound,
2405 ksExceptions.ClientException,
2406 nvExceptions.ClientException,
2407 ConnectionError,
2408 ) as e:
2409 self._format_exception(e)
2410
2411 def refresh_vms_status(self, vm_list):
2412 """Get the status of the virtual machines and their interfaces/ports
2413 Params: the list of VM identifiers
2414 Returns a dictionary with:
2415 vm_id: #VIM id of this Virtual Machine
2416 status: #Mandatory. Text with one of:
2417 # DELETED (not found at vim)
2418 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2419 # OTHER (Vim reported other status not understood)
2420 # ERROR (VIM indicates an ERROR status)
2421 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2422 # CREATING (on building process), ERROR
2423 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2424 #
2425 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2426 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2427 interfaces:
2428 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2429 mac_address: #Text format XX:XX:XX:XX:XX:XX
2430 vim_net_id: #network id where this interface is connected
2431 vim_interface_id: #interface/port VIM id
2432 ip_address: #null, or text with IPv4, IPv6 address
2433 compute_node: #identification of compute node where PF,VF interface is allocated
2434 pci: #PCI address of the NIC that hosts the PF,VF
2435 vlan: #physical VLAN used for VF
2436 """
2437 vm_dict = {}
2438 self.logger.debug(
2439 "refresh_vms status: Getting tenant VM instance information from VIM"
2440 )
2441
2442 for vm_id in vm_list:
2443 vm = {}
2444
2445 try:
2446 vm_vim = self.get_vminstance(vm_id)
2447
2448 if vm_vim["status"] in vmStatus2manoFormat:
2449 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
2450 else:
2451 vm["status"] = "OTHER"
2452 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
2453
2454 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
2455 vm_vim.pop("user_data", None)
2456 vm["vim_info"] = self.serialize(vm_vim)
2457
2458 vm["interfaces"] = []
2459 if vm_vim.get("fault"):
2460 vm["error_msg"] = str(vm_vim["fault"])
2461
2462 # get interfaces
2463 try:
2464 self._reload_connection()
2465 port_dict = self.neutron.list_ports(device_id=vm_id)
2466
2467 for port in port_dict["ports"]:
2468 interface = {}
2469 interface["vim_info"] = self.serialize(port)
2470 interface["mac_address"] = port.get("mac_address")
2471 interface["vim_net_id"] = port["network_id"]
2472 interface["vim_interface_id"] = port["id"]
2473 # check if OS-EXT-SRV-ATTR:host is there,
2474 # in case of non-admin credentials, it will be missing
2475
2476 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
2477 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
2478
2479 interface["pci"] = None
2480
2481 # check if binding:profile is there,
2482 # in case of non-admin credentials, it will be missing
2483 if port.get("binding:profile"):
2484 if port["binding:profile"].get("pci_slot"):
2485 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
2486 # the slot to 0x00
2487 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
2488 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
2489 pci = port["binding:profile"]["pci_slot"]
2490 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
2491 interface["pci"] = pci
2492
2493 interface["vlan"] = None
2494
2495 if port.get("binding:vif_details"):
2496 interface["vlan"] = port["binding:vif_details"].get("vlan")
2497
2498 # Get vlan from network in case not present in port for those old openstacks and cases where
2499 # it is needed vlan at PT
2500 if not interface["vlan"]:
2501 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
2502 network = self.neutron.show_network(port["network_id"])
2503
2504 if (
2505 network["network"].get("provider:network_type")
2506 == "vlan"
2507 ):
2508 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
2509 interface["vlan"] = network["network"].get(
2510 "provider:segmentation_id"
2511 )
2512
2513 ips = []
2514 # look for floating ip address
2515 try:
2516 floating_ip_dict = self.neutron.list_floatingips(
2517 port_id=port["id"]
2518 )
2519
2520 if floating_ip_dict.get("floatingips"):
2521 ips.append(
2522 floating_ip_dict["floatingips"][0].get(
2523 "floating_ip_address"
2524 )
2525 )
2526 except Exception:
2527 pass
2528
2529 for subnet in port["fixed_ips"]:
2530 ips.append(subnet["ip_address"])
2531
2532 interface["ip_address"] = ";".join(ips)
2533 vm["interfaces"].append(interface)
2534 except Exception as e:
2535 self.logger.error(
2536 "Error getting vm interface information {}: {}".format(
2537 type(e).__name__, e
2538 ),
2539 exc_info=True,
2540 )
2541 except vimconn.VimConnNotFoundException as e:
2542 self.logger.error("Exception getting vm status: %s", str(e))
2543 vm["status"] = "DELETED"
2544 vm["error_msg"] = str(e)
2545 except vimconn.VimConnException as e:
2546 self.logger.error("Exception getting vm status: %s", str(e))
2547 vm["status"] = "VIM_ERROR"
2548 vm["error_msg"] = str(e)
2549
2550 vm_dict[vm_id] = vm
2551
2552 return vm_dict
2553
2554 def action_vminstance(self, vm_id, action_dict, created_items={}):
2555 """Send and action over a VM instance from VIM
2556 Returns None or the console dict if the action was successfully sent to the VIM
2557 """
2558 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
2559
2560 try:
2561 self._reload_connection()
2562 server = self.nova.servers.find(id=vm_id)
2563
2564 if "start" in action_dict:
2565 if action_dict["start"] == "rebuild":
2566 server.rebuild()
2567 else:
2568 if server.status == "PAUSED":
2569 server.unpause()
2570 elif server.status == "SUSPENDED":
2571 server.resume()
2572 elif server.status == "SHUTOFF":
2573 server.start()
2574 else:
2575 self.logger.debug(
2576 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
2577 )
2578 raise vimconn.VimConnException(
2579 "Cannot 'start' instance while it is in active state",
2580 http_code=vimconn.HTTP_Bad_Request,
2581 )
2582
2583 elif "pause" in action_dict:
2584 server.pause()
2585 elif "resume" in action_dict:
2586 server.resume()
2587 elif "shutoff" in action_dict or "shutdown" in action_dict:
2588 self.logger.debug("server status %s", server.status)
2589 if server.status == "ACTIVE":
2590 server.stop()
2591 else:
2592 self.logger.debug("ERROR: VM is not in Active state")
2593 raise vimconn.VimConnException(
2594 "VM is not in active state, stop operation is not allowed",
2595 http_code=vimconn.HTTP_Bad_Request,
2596 )
2597 elif "forceOff" in action_dict:
2598 server.stop() # TODO
2599 elif "terminate" in action_dict:
2600 server.delete()
2601 elif "createImage" in action_dict:
2602 server.create_image()
2603 # "path":path_schema,
2604 # "description":description_schema,
2605 # "name":name_schema,
2606 # "metadata":metadata_schema,
2607 # "imageRef": id_schema,
2608 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
2609 elif "rebuild" in action_dict:
2610 server.rebuild(server.image["id"])
2611 elif "reboot" in action_dict:
2612 server.reboot() # reboot_type="SOFT"
2613 elif "console" in action_dict:
2614 console_type = action_dict["console"]
2615
2616 if console_type is None or console_type == "novnc":
2617 console_dict = server.get_vnc_console("novnc")
2618 elif console_type == "xvpvnc":
2619 console_dict = server.get_vnc_console(console_type)
2620 elif console_type == "rdp-html5":
2621 console_dict = server.get_rdp_console(console_type)
2622 elif console_type == "spice-html5":
2623 console_dict = server.get_spice_console(console_type)
2624 else:
2625 raise vimconn.VimConnException(
2626 "console type '{}' not allowed".format(console_type),
2627 http_code=vimconn.HTTP_Bad_Request,
2628 )
2629
2630 try:
2631 console_url = console_dict["console"]["url"]
2632 # parse console_url
2633 protocol_index = console_url.find("//")
2634 suffix_index = (
2635 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2636 )
2637 port_index = (
2638 console_url[protocol_index + 2 : suffix_index].find(":")
2639 + protocol_index
2640 + 2
2641 )
2642
2643 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2644 raise vimconn.VimConnException(
2645 "Unexpected response from VIM " + str(console_dict)
2646 )
2647
2648 console_dict2 = {
2649 "protocol": console_url[0:protocol_index],
2650 "server": console_url[protocol_index + 2 : port_index],
2651 "port": int(console_url[port_index + 1 : suffix_index]),
2652 "suffix": console_url[suffix_index + 1 :],
2653 }
2654
2655 return console_dict2
2656 except Exception:
2657 raise vimconn.VimConnException(
2658 "Unexpected response from VIM " + str(console_dict)
2659 )
2660
2661 return None
2662 except (
2663 ksExceptions.ClientException,
2664 nvExceptions.ClientException,
2665 nvExceptions.NotFound,
2666 ConnectionError,
2667 ) as e:
2668 self._format_exception(e)
2669 # TODO insert exception vimconn.HTTP_Unauthorized
2670
2671 # ###### VIO Specific Changes #########
2672 def _generate_vlanID(self):
2673 """
2674 Method to get unused vlanID
2675 Args:
2676 None
2677 Returns:
2678 vlanID
2679 """
2680 # Get used VLAN IDs
2681 usedVlanIDs = []
2682 networks = self.get_network_list()
2683
2684 for net in networks:
2685 if net.get("provider:segmentation_id"):
2686 usedVlanIDs.append(net.get("provider:segmentation_id"))
2687
2688 used_vlanIDs = set(usedVlanIDs)
2689
2690 # find unused VLAN ID
2691 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
2692 try:
2693 start_vlanid, end_vlanid = map(
2694 int, vlanID_range.replace(" ", "").split("-")
2695 )
2696
2697 for vlanID in range(start_vlanid, end_vlanid + 1):
2698 if vlanID not in used_vlanIDs:
2699 return vlanID
2700 except Exception as exp:
2701 raise vimconn.VimConnException(
2702 "Exception {} occurred while generating VLAN ID.".format(exp)
2703 )
2704 else:
2705 raise vimconn.VimConnConflictException(
2706 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
2707 self.config.get("dataplane_net_vlan_range")
2708 )
2709 )
2710
2711 def _generate_multisegment_vlanID(self):
2712 """
2713 Method to get unused vlanID
2714 Args:
2715 None
2716 Returns:
2717 vlanID
2718 """
2719 # Get used VLAN IDs
2720 usedVlanIDs = []
2721 networks = self.get_network_list()
2722 for net in networks:
2723 if net.get("provider:network_type") == "vlan" and net.get(
2724 "provider:segmentation_id"
2725 ):
2726 usedVlanIDs.append(net.get("provider:segmentation_id"))
2727 elif net.get("segments"):
2728 for segment in net.get("segments"):
2729 if segment.get("provider:network_type") == "vlan" and segment.get(
2730 "provider:segmentation_id"
2731 ):
2732 usedVlanIDs.append(segment.get("provider:segmentation_id"))
2733
2734 used_vlanIDs = set(usedVlanIDs)
2735
2736 # find unused VLAN ID
2737 for vlanID_range in self.config.get("multisegment_vlan_range"):
2738 try:
2739 start_vlanid, end_vlanid = map(
2740 int, vlanID_range.replace(" ", "").split("-")
2741 )
2742
2743 for vlanID in range(start_vlanid, end_vlanid + 1):
2744 if vlanID not in used_vlanIDs:
2745 return vlanID
2746 except Exception as exp:
2747 raise vimconn.VimConnException(
2748 "Exception {} occurred while generating VLAN ID.".format(exp)
2749 )
2750 else:
2751 raise vimconn.VimConnConflictException(
2752 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
2753 self.config.get("multisegment_vlan_range")
2754 )
2755 )
2756
2757 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
2758 """
2759 Method to validate user given vlanID ranges
2760 Args: None
2761 Returns: None
2762 """
2763 for vlanID_range in input_vlan_range:
2764 vlan_range = vlanID_range.replace(" ", "")
2765 # validate format
2766 vlanID_pattern = r"(\d)*-(\d)*$"
2767 match_obj = re.match(vlanID_pattern, vlan_range)
2768 if not match_obj:
2769 raise vimconn.VimConnConflictException(
2770 "Invalid VLAN range for {}: {}.You must provide "
2771 "'{}' in format [start_ID - end_ID].".format(
2772 text_vlan_range, vlanID_range, text_vlan_range
2773 )
2774 )
2775
2776 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
2777 if start_vlanid <= 0:
2778 raise vimconn.VimConnConflictException(
2779 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
2780 "networks valid IDs are 1 to 4094 ".format(
2781 text_vlan_range, vlanID_range
2782 )
2783 )
2784
2785 if end_vlanid > 4094:
2786 raise vimconn.VimConnConflictException(
2787 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
2788 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
2789 text_vlan_range, vlanID_range
2790 )
2791 )
2792
2793 if start_vlanid > end_vlanid:
2794 raise vimconn.VimConnConflictException(
2795 "Invalid VLAN range for {}: {}. You must provide '{}'"
2796 " in format start_ID - end_ID and start_ID < end_ID ".format(
2797 text_vlan_range, vlanID_range, text_vlan_range
2798 )
2799 )
2800
2801 # NOT USED FUNCTIONS
2802
2803 def new_external_port(self, port_data):
2804 """Adds a external port to VIM
2805 Returns the port identifier"""
2806 # TODO openstack if needed
2807 return (
2808 -vimconn.HTTP_Internal_Server_Error,
2809 "osconnector.new_external_port() not implemented",
2810 )
2811
2812 def connect_port_network(self, port_id, network_id, admin=False):
2813 """Connects a external port to a network
2814 Returns status code of the VIM response"""
2815 # TODO openstack if needed
2816 return (
2817 -vimconn.HTTP_Internal_Server_Error,
2818 "osconnector.connect_port_network() not implemented",
2819 )
2820
2821 def new_user(self, user_name, user_passwd, tenant_id=None):
2822 """Adds a new user to openstack VIM
2823 Returns the user identifier"""
2824 self.logger.debug("osconnector: Adding a new user to VIM")
2825
2826 try:
2827 self._reload_connection()
2828 user = self.keystone.users.create(
2829 user_name, password=user_passwd, default_project=tenant_id
2830 )
2831 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
2832
2833 return user.id
2834 except ksExceptions.ConnectionError as e:
2835 error_value = -vimconn.HTTP_Bad_Request
2836 error_text = (
2837 type(e).__name__
2838 + ": "
2839 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2840 )
2841 except ksExceptions.ClientException as e: # TODO remove
2842 error_value = -vimconn.HTTP_Bad_Request
2843 error_text = (
2844 type(e).__name__
2845 + ": "
2846 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2847 )
2848
2849 # TODO insert exception vimconn.HTTP_Unauthorized
2850 # if reaching here is because an exception
2851 self.logger.debug("new_user " + error_text)
2852
2853 return error_value, error_text
2854
2855 def delete_user(self, user_id):
2856 """Delete a user from openstack VIM
2857 Returns the user identifier"""
2858 if self.debug:
2859 print("osconnector: Deleting a user from VIM")
2860
2861 try:
2862 self._reload_connection()
2863 self.keystone.users.delete(user_id)
2864
2865 return 1, user_id
2866 except ksExceptions.ConnectionError as e:
2867 error_value = -vimconn.HTTP_Bad_Request
2868 error_text = (
2869 type(e).__name__
2870 + ": "
2871 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2872 )
2873 except ksExceptions.NotFound as e:
2874 error_value = -vimconn.HTTP_Not_Found
2875 error_text = (
2876 type(e).__name__
2877 + ": "
2878 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2879 )
2880 except ksExceptions.ClientException as e: # TODO remove
2881 error_value = -vimconn.HTTP_Bad_Request
2882 error_text = (
2883 type(e).__name__
2884 + ": "
2885 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2886 )
2887
2888 # TODO insert exception vimconn.HTTP_Unauthorized
2889 # if reaching here is because an exception
2890 self.logger.debug("delete_tenant " + error_text)
2891
2892 return error_value, error_text
2893
2894 def get_hosts_info(self):
2895 """Get the information of deployed hosts
2896 Returns the hosts content"""
2897 if self.debug:
2898 print("osconnector: Getting Host info from VIM")
2899
2900 try:
2901 h_list = []
2902 self._reload_connection()
2903 hypervisors = self.nova.hypervisors.list()
2904
2905 for hype in hypervisors:
2906 h_list.append(hype.to_dict())
2907
2908 return 1, {"hosts": h_list}
2909 except nvExceptions.NotFound as e:
2910 error_value = -vimconn.HTTP_Not_Found
2911 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2912 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2913 error_value = -vimconn.HTTP_Bad_Request
2914 error_text = (
2915 type(e).__name__
2916 + ": "
2917 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2918 )
2919
2920 # TODO insert exception vimconn.HTTP_Unauthorized
2921 # if reaching here is because an exception
2922 self.logger.debug("get_hosts_info " + error_text)
2923
2924 return error_value, error_text
2925
2926 def get_hosts(self, vim_tenant):
2927 """Get the hosts and deployed instances
2928 Returns the hosts content"""
2929 r, hype_dict = self.get_hosts_info()
2930
2931 if r < 0:
2932 return r, hype_dict
2933
2934 hypervisors = hype_dict["hosts"]
2935
2936 try:
2937 servers = self.nova.servers.list()
2938 for hype in hypervisors:
2939 for server in servers:
2940 if (
2941 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
2942 == hype["hypervisor_hostname"]
2943 ):
2944 if "vm" in hype:
2945 hype["vm"].append(server.id)
2946 else:
2947 hype["vm"] = [server.id]
2948
2949 return 1, hype_dict
2950 except nvExceptions.NotFound as e:
2951 error_value = -vimconn.HTTP_Not_Found
2952 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2953 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2954 error_value = -vimconn.HTTP_Bad_Request
2955 error_text = (
2956 type(e).__name__
2957 + ": "
2958 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2959 )
2960
2961 # TODO insert exception vimconn.HTTP_Unauthorized
2962 # if reaching here is because an exception
2963 self.logger.debug("get_hosts " + error_text)
2964
2965 return error_value, error_text
2966
2967 def new_classification(self, name, ctype, definition):
2968 self.logger.debug(
2969 "Adding a new (Traffic) Classification to VIM, named %s", name
2970 )
2971
2972 try:
2973 new_class = None
2974 self._reload_connection()
2975
2976 if ctype not in supportedClassificationTypes:
2977 raise vimconn.VimConnNotSupportedException(
2978 "OpenStack VIM connector does not support provided "
2979 "Classification Type {}, supported ones are: {}".format(
2980 ctype, supportedClassificationTypes
2981 )
2982 )
2983
2984 if not self._validate_classification(ctype, definition):
2985 raise vimconn.VimConnException(
2986 "Incorrect Classification definition for the type specified."
2987 )
2988
2989 classification_dict = definition
2990 classification_dict["name"] = name
2991 new_class = self.neutron.create_sfc_flow_classifier(
2992 {"flow_classifier": classification_dict}
2993 )
2994
2995 return new_class["flow_classifier"]["id"]
2996 except (
2997 neExceptions.ConnectionFailed,
2998 ksExceptions.ClientException,
2999 neExceptions.NeutronException,
3000 ConnectionError,
3001 ) as e:
3002 self.logger.error("Creation of Classification failed.")
3003 self._format_exception(e)
3004
3005 def get_classification(self, class_id):
3006 self.logger.debug(" Getting Classification %s from VIM", class_id)
3007 filter_dict = {"id": class_id}
3008 class_list = self.get_classification_list(filter_dict)
3009
3010 if len(class_list) == 0:
3011 raise vimconn.VimConnNotFoundException(
3012 "Classification '{}' not found".format(class_id)
3013 )
3014 elif len(class_list) > 1:
3015 raise vimconn.VimConnConflictException(
3016 "Found more than one Classification with this criteria"
3017 )
3018
3019 classification = class_list[0]
3020
3021 return classification
3022
3023 def get_classification_list(self, filter_dict={}):
3024 self.logger.debug(
3025 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3026 )
3027
3028 try:
3029 filter_dict_os = filter_dict.copy()
3030 self._reload_connection()
3031
3032 if self.api_version3 and "tenant_id" in filter_dict_os:
3033 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3034
3035 classification_dict = self.neutron.list_sfc_flow_classifiers(
3036 **filter_dict_os
3037 )
3038 classification_list = classification_dict["flow_classifiers"]
3039 self.__classification_os2mano(classification_list)
3040
3041 return classification_list
3042 except (
3043 neExceptions.ConnectionFailed,
3044 ksExceptions.ClientException,
3045 neExceptions.NeutronException,
3046 ConnectionError,
3047 ) as e:
3048 self._format_exception(e)
3049
3050 def delete_classification(self, class_id):
3051 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3052
3053 try:
3054 self._reload_connection()
3055 self.neutron.delete_sfc_flow_classifier(class_id)
3056
3057 return class_id
3058 except (
3059 neExceptions.ConnectionFailed,
3060 neExceptions.NeutronException,
3061 ksExceptions.ClientException,
3062 neExceptions.NeutronException,
3063 ConnectionError,
3064 ) as e:
3065 self._format_exception(e)
3066
3067 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3068 self.logger.debug(
3069 "Adding a new Service Function Instance to VIM, named '%s'", name
3070 )
3071
3072 try:
3073 new_sfi = None
3074 self._reload_connection()
3075 correlation = None
3076
3077 if sfc_encap:
3078 correlation = "nsh"
3079
3080 if len(ingress_ports) != 1:
3081 raise vimconn.VimConnNotSupportedException(
3082 "OpenStack VIM connector can only have 1 ingress port per SFI"
3083 )
3084
3085 if len(egress_ports) != 1:
3086 raise vimconn.VimConnNotSupportedException(
3087 "OpenStack VIM connector can only have 1 egress port per SFI"
3088 )
3089
3090 sfi_dict = {
3091 "name": name,
3092 "ingress": ingress_ports[0],
3093 "egress": egress_ports[0],
3094 "service_function_parameters": {"correlation": correlation},
3095 }
3096 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3097
3098 return new_sfi["port_pair"]["id"]
3099 except (
3100 neExceptions.ConnectionFailed,
3101 ksExceptions.ClientException,
3102 neExceptions.NeutronException,
3103 ConnectionError,
3104 ) as e:
3105 if new_sfi:
3106 try:
3107 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3108 except Exception:
3109 self.logger.error(
3110 "Creation of Service Function Instance failed, with "
3111 "subsequent deletion failure as well."
3112 )
3113
3114 self._format_exception(e)
3115
3116 def get_sfi(self, sfi_id):
3117 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3118 filter_dict = {"id": sfi_id}
3119 sfi_list = self.get_sfi_list(filter_dict)
3120
3121 if len(sfi_list) == 0:
3122 raise vimconn.VimConnNotFoundException(
3123 "Service Function Instance '{}' not found".format(sfi_id)
3124 )
3125 elif len(sfi_list) > 1:
3126 raise vimconn.VimConnConflictException(
3127 "Found more than one Service Function Instance with this criteria"
3128 )
3129
3130 sfi = sfi_list[0]
3131
3132 return sfi
3133
3134 def get_sfi_list(self, filter_dict={}):
3135 self.logger.debug(
3136 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3137 )
3138
3139 try:
3140 self._reload_connection()
3141 filter_dict_os = filter_dict.copy()
3142
3143 if self.api_version3 and "tenant_id" in filter_dict_os:
3144 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3145
3146 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3147 sfi_list = sfi_dict["port_pairs"]
3148 self.__sfi_os2mano(sfi_list)
3149
3150 return sfi_list
3151 except (
3152 neExceptions.ConnectionFailed,
3153 ksExceptions.ClientException,
3154 neExceptions.NeutronException,
3155 ConnectionError,
3156 ) as e:
3157 self._format_exception(e)
3158
3159 def delete_sfi(self, sfi_id):
3160 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3161
3162 try:
3163 self._reload_connection()
3164 self.neutron.delete_sfc_port_pair(sfi_id)
3165
3166 return sfi_id
3167 except (
3168 neExceptions.ConnectionFailed,
3169 neExceptions.NeutronException,
3170 ksExceptions.ClientException,
3171 neExceptions.NeutronException,
3172 ConnectionError,
3173 ) as e:
3174 self._format_exception(e)
3175
3176 def new_sf(self, name, sfis, sfc_encap=True):
3177 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3178
3179 try:
3180 new_sf = None
3181 self._reload_connection()
3182 # correlation = None
3183 # if sfc_encap:
3184 # correlation = "nsh"
3185
3186 for instance in sfis:
3187 sfi = self.get_sfi(instance)
3188
3189 if sfi.get("sfc_encap") != sfc_encap:
3190 raise vimconn.VimConnNotSupportedException(
3191 "OpenStack VIM connector requires all SFIs of the "
3192 "same SF to share the same SFC Encapsulation"
3193 )
3194
3195 sf_dict = {"name": name, "port_pairs": sfis}
3196 new_sf = self.neutron.create_sfc_port_pair_group(
3197 {"port_pair_group": sf_dict}
3198 )
3199
3200 return new_sf["port_pair_group"]["id"]
3201 except (
3202 neExceptions.ConnectionFailed,
3203 ksExceptions.ClientException,
3204 neExceptions.NeutronException,
3205 ConnectionError,
3206 ) as e:
3207 if new_sf:
3208 try:
3209 self.neutron.delete_sfc_port_pair_group(
3210 new_sf["port_pair_group"]["id"]
3211 )
3212 except Exception:
3213 self.logger.error(
3214 "Creation of Service Function failed, with "
3215 "subsequent deletion failure as well."
3216 )
3217
3218 self._format_exception(e)
3219
3220 def get_sf(self, sf_id):
3221 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3222 filter_dict = {"id": sf_id}
3223 sf_list = self.get_sf_list(filter_dict)
3224
3225 if len(sf_list) == 0:
3226 raise vimconn.VimConnNotFoundException(
3227 "Service Function '{}' not found".format(sf_id)
3228 )
3229 elif len(sf_list) > 1:
3230 raise vimconn.VimConnConflictException(
3231 "Found more than one Service Function with this criteria"
3232 )
3233
3234 sf = sf_list[0]
3235
3236 return sf
3237
3238 def get_sf_list(self, filter_dict={}):
3239 self.logger.debug(
3240 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3241 )
3242
3243 try:
3244 self._reload_connection()
3245 filter_dict_os = filter_dict.copy()
3246
3247 if self.api_version3 and "tenant_id" in filter_dict_os:
3248 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3249
3250 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3251 sf_list = sf_dict["port_pair_groups"]
3252 self.__sf_os2mano(sf_list)
3253
3254 return sf_list
3255 except (
3256 neExceptions.ConnectionFailed,
3257 ksExceptions.ClientException,
3258 neExceptions.NeutronException,
3259 ConnectionError,
3260 ) as e:
3261 self._format_exception(e)
3262
3263 def delete_sf(self, sf_id):
3264 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3265
3266 try:
3267 self._reload_connection()
3268 self.neutron.delete_sfc_port_pair_group(sf_id)
3269
3270 return sf_id
3271 except (
3272 neExceptions.ConnectionFailed,
3273 neExceptions.NeutronException,
3274 ksExceptions.ClientException,
3275 neExceptions.NeutronException,
3276 ConnectionError,
3277 ) as e:
3278 self._format_exception(e)
3279
3280 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3281 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3282
3283 try:
3284 new_sfp = None
3285 self._reload_connection()
3286 # In networking-sfc the MPLS encapsulation is legacy
3287 # should be used when no full SFC Encapsulation is intended
3288 correlation = "mpls"
3289
3290 if sfc_encap:
3291 correlation = "nsh"
3292
3293 sfp_dict = {
3294 "name": name,
3295 "flow_classifiers": classifications,
3296 "port_pair_groups": sfs,
3297 "chain_parameters": {"correlation": correlation},
3298 }
3299
3300 if spi:
3301 sfp_dict["chain_id"] = spi
3302
3303 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3304
3305 return new_sfp["port_chain"]["id"]
3306 except (
3307 neExceptions.ConnectionFailed,
3308 ksExceptions.ClientException,
3309 neExceptions.NeutronException,
3310 ConnectionError,
3311 ) as e:
3312 if new_sfp:
3313 try:
3314 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3315 except Exception:
3316 self.logger.error(
3317 "Creation of Service Function Path failed, with "
3318 "subsequent deletion failure as well."
3319 )
3320
3321 self._format_exception(e)
3322
3323 def get_sfp(self, sfp_id):
3324 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3325
3326 filter_dict = {"id": sfp_id}
3327 sfp_list = self.get_sfp_list(filter_dict)
3328
3329 if len(sfp_list) == 0:
3330 raise vimconn.VimConnNotFoundException(
3331 "Service Function Path '{}' not found".format(sfp_id)
3332 )
3333 elif len(sfp_list) > 1:
3334 raise vimconn.VimConnConflictException(
3335 "Found more than one Service Function Path with this criteria"
3336 )
3337
3338 sfp = sfp_list[0]
3339
3340 return sfp
3341
3342 def get_sfp_list(self, filter_dict={}):
3343 self.logger.debug(
3344 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3345 )
3346
3347 try:
3348 self._reload_connection()
3349 filter_dict_os = filter_dict.copy()
3350
3351 if self.api_version3 and "tenant_id" in filter_dict_os:
3352 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3353
3354 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3355 sfp_list = sfp_dict["port_chains"]
3356 self.__sfp_os2mano(sfp_list)
3357
3358 return sfp_list
3359 except (
3360 neExceptions.ConnectionFailed,
3361 ksExceptions.ClientException,
3362 neExceptions.NeutronException,
3363 ConnectionError,
3364 ) as e:
3365 self._format_exception(e)
3366
3367 def delete_sfp(self, sfp_id):
3368 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3369
3370 try:
3371 self._reload_connection()
3372 self.neutron.delete_sfc_port_chain(sfp_id)
3373
3374 return sfp_id
3375 except (
3376 neExceptions.ConnectionFailed,
3377 neExceptions.NeutronException,
3378 ksExceptions.ClientException,
3379 neExceptions.NeutronException,
3380 ConnectionError,
3381 ) as e:
3382 self._format_exception(e)
3383
3384 def refresh_sfps_status(self, sfp_list):
3385 """Get the status of the service function path
3386 Params: the list of sfp identifiers
3387 Returns a dictionary with:
3388 vm_id: #VIM id of this service function path
3389 status: #Mandatory. Text with one of:
3390 # DELETED (not found at vim)
3391 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3392 # OTHER (Vim reported other status not understood)
3393 # ERROR (VIM indicates an ERROR status)
3394 # ACTIVE,
3395 # CREATING (on building process)
3396 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3397 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3398 """
3399 sfp_dict = {}
3400 self.logger.debug(
3401 "refresh_sfps status: Getting tenant SFP information from VIM"
3402 )
3403
3404 for sfp_id in sfp_list:
3405 sfp = {}
3406
3407 try:
3408 sfp_vim = self.get_sfp(sfp_id)
3409
3410 if sfp_vim["spi"]:
3411 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3412 else:
3413 sfp["status"] = "OTHER"
3414 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3415
3416 sfp["vim_info"] = self.serialize(sfp_vim)
3417
3418 if sfp_vim.get("fault"):
3419 sfp["error_msg"] = str(sfp_vim["fault"])
3420 except vimconn.VimConnNotFoundException as e:
3421 self.logger.error("Exception getting sfp status: %s", str(e))
3422 sfp["status"] = "DELETED"
3423 sfp["error_msg"] = str(e)
3424 except vimconn.VimConnException as e:
3425 self.logger.error("Exception getting sfp status: %s", str(e))
3426 sfp["status"] = "VIM_ERROR"
3427 sfp["error_msg"] = str(e)
3428
3429 sfp_dict[sfp_id] = sfp
3430
3431 return sfp_dict
3432
3433 def refresh_sfis_status(self, sfi_list):
3434 """Get the status of the service function instances
3435 Params: the list of sfi identifiers
3436 Returns a dictionary with:
3437 vm_id: #VIM id of this service function instance
3438 status: #Mandatory. Text with one of:
3439 # DELETED (not found at vim)
3440 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3441 # OTHER (Vim reported other status not understood)
3442 # ERROR (VIM indicates an ERROR status)
3443 # ACTIVE,
3444 # CREATING (on building process)
3445 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3446 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3447 """
3448 sfi_dict = {}
3449 self.logger.debug(
3450 "refresh_sfis status: Getting tenant sfi information from VIM"
3451 )
3452
3453 for sfi_id in sfi_list:
3454 sfi = {}
3455
3456 try:
3457 sfi_vim = self.get_sfi(sfi_id)
3458
3459 if sfi_vim:
3460 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
3461 else:
3462 sfi["status"] = "OTHER"
3463 sfi["error_msg"] = "VIM status reported " + sfi["status"]
3464
3465 sfi["vim_info"] = self.serialize(sfi_vim)
3466
3467 if sfi_vim.get("fault"):
3468 sfi["error_msg"] = str(sfi_vim["fault"])
3469 except vimconn.VimConnNotFoundException as e:
3470 self.logger.error("Exception getting sfi status: %s", str(e))
3471 sfi["status"] = "DELETED"
3472 sfi["error_msg"] = str(e)
3473 except vimconn.VimConnException as e:
3474 self.logger.error("Exception getting sfi status: %s", str(e))
3475 sfi["status"] = "VIM_ERROR"
3476 sfi["error_msg"] = str(e)
3477
3478 sfi_dict[sfi_id] = sfi
3479
3480 return sfi_dict
3481
3482 def refresh_sfs_status(self, sf_list):
3483 """Get the status of the service functions
3484 Params: the list of sf identifiers
3485 Returns a dictionary with:
3486 vm_id: #VIM id of this service function
3487 status: #Mandatory. Text with one of:
3488 # DELETED (not found at vim)
3489 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3490 # OTHER (Vim reported other status not understood)
3491 # ERROR (VIM indicates an ERROR status)
3492 # ACTIVE,
3493 # CREATING (on building process)
3494 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3495 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3496 """
3497 sf_dict = {}
3498 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
3499
3500 for sf_id in sf_list:
3501 sf = {}
3502
3503 try:
3504 sf_vim = self.get_sf(sf_id)
3505
3506 if sf_vim:
3507 sf["status"] = vmStatus2manoFormat["ACTIVE"]
3508 else:
3509 sf["status"] = "OTHER"
3510 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
3511
3512 sf["vim_info"] = self.serialize(sf_vim)
3513
3514 if sf_vim.get("fault"):
3515 sf["error_msg"] = str(sf_vim["fault"])
3516 except vimconn.VimConnNotFoundException as e:
3517 self.logger.error("Exception getting sf status: %s", str(e))
3518 sf["status"] = "DELETED"
3519 sf["error_msg"] = str(e)
3520 except vimconn.VimConnException as e:
3521 self.logger.error("Exception getting sf status: %s", str(e))
3522 sf["status"] = "VIM_ERROR"
3523 sf["error_msg"] = str(e)
3524
3525 sf_dict[sf_id] = sf
3526
3527 return sf_dict
3528
3529 def refresh_classifications_status(self, classification_list):
3530 """Get the status of the classifications
3531 Params: the list of classification identifiers
3532 Returns a dictionary with:
3533 vm_id: #VIM id of this classifier
3534 status: #Mandatory. Text with one of:
3535 # DELETED (not found at vim)
3536 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3537 # OTHER (Vim reported other status not understood)
3538 # ERROR (VIM indicates an ERROR status)
3539 # ACTIVE,
3540 # CREATING (on building process)
3541 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3542 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3543 """
3544 classification_dict = {}
3545 self.logger.debug(
3546 "refresh_classifications status: Getting tenant classification information from VIM"
3547 )
3548
3549 for classification_id in classification_list:
3550 classification = {}
3551
3552 try:
3553 classification_vim = self.get_classification(classification_id)
3554
3555 if classification_vim:
3556 classification["status"] = vmStatus2manoFormat["ACTIVE"]
3557 else:
3558 classification["status"] = "OTHER"
3559 classification["error_msg"] = (
3560 "VIM status reported " + classification["status"]
3561 )
3562
3563 classification["vim_info"] = self.serialize(classification_vim)
3564
3565 if classification_vim.get("fault"):
3566 classification["error_msg"] = str(classification_vim["fault"])
3567 except vimconn.VimConnNotFoundException as e:
3568 self.logger.error("Exception getting classification status: %s", str(e))
3569 classification["status"] = "DELETED"
3570 classification["error_msg"] = str(e)
3571 except vimconn.VimConnException as e:
3572 self.logger.error("Exception getting classification status: %s", str(e))
3573 classification["status"] = "VIM_ERROR"
3574 classification["error_msg"] = str(e)
3575
3576 classification_dict[classification_id] = classification
3577
3578 return classification_dict
3579
3580 def new_affinity_group(self, affinity_group_data):
3581 """Adds a server group to VIM
3582 affinity_group_data contains a dictionary with information, keys:
3583 name: name in VIM for the server group
3584 type: affinity or anti-affinity
3585 scope: Only nfvi-node allowed
3586 Returns the server group identifier"""
3587 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3588
3589 try:
3590 name = affinity_group_data["name"]
3591 policy = affinity_group_data["type"]
3592
3593 self._reload_connection()
3594 new_server_group = self.nova.server_groups.create(name, policy)
3595
3596 return new_server_group.id
3597 except (
3598 ksExceptions.ClientException,
3599 nvExceptions.ClientException,
3600 ConnectionError,
3601 KeyError,
3602 ) as e:
3603 self._format_exception(e)
3604
3605 def get_affinity_group(self, affinity_group_id):
3606 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3607 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3608 try:
3609 self._reload_connection()
3610 server_group = self.nova.server_groups.find(id=affinity_group_id)
3611
3612 return server_group.to_dict()
3613 except (
3614 nvExceptions.NotFound,
3615 nvExceptions.ClientException,
3616 ksExceptions.ClientException,
3617 ConnectionError,
3618 ) as e:
3619 self._format_exception(e)
3620
3621 def delete_affinity_group(self, affinity_group_id):
3622 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3623 self.logger.debug("Getting server group '%s'", affinity_group_id)
3624 try:
3625 self._reload_connection()
3626 self.nova.server_groups.delete(affinity_group_id)
3627
3628 return affinity_group_id
3629 except (
3630 nvExceptions.NotFound,
3631 ksExceptions.ClientException,
3632 nvExceptions.ClientException,
3633 ConnectionError,
3634 ) as e:
3635 self._format_exception(e)
3636
3637 def get_vdu_state(self, vm_id):
3638 """
3639 Getting the state of a vdu
3640 param:
3641 vm_id: ID of an instance
3642 """
3643 self.logger.debug("Getting the status of VM")
3644 self.logger.debug("VIM VM ID %s", vm_id)
3645 self._reload_connection()
3646 server = self.nova.servers.find(id=vm_id)
3647 server_dict = server.to_dict()
3648 vdu_data = [
3649 server_dict["status"],
3650 server_dict["flavor"]["id"],
3651 server_dict["OS-EXT-SRV-ATTR:host"],
3652 server_dict["OS-EXT-AZ:availability_zone"],
3653 ]
3654 self.logger.debug("vdu_data %s", vdu_data)
3655 return vdu_data
3656
3657 def check_compute_availability(self, host, server_flavor_details):
3658 self._reload_connection()
3659 hypervisor_search = self.nova.hypervisors.search(
3660 hypervisor_match=host, servers=True
3661 )
3662 for hypervisor in hypervisor_search:
3663 hypervisor_id = hypervisor.to_dict()["id"]
3664 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3665 hypervisor_dict = hypervisor_details.to_dict()
3666 hypervisor_temp = json.dumps(hypervisor_dict)
3667 hypervisor_json = json.loads(hypervisor_temp)
3668 resources_available = [
3669 hypervisor_json["free_ram_mb"],
3670 hypervisor_json["disk_available_least"],
3671 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3672 ]
3673 compute_available = all(
3674 x > y for x, y in zip(resources_available, server_flavor_details)
3675 )
3676 if compute_available:
3677 return host
3678
3679 def check_availability_zone(
3680 self, old_az, server_flavor_details, old_host, host=None
3681 ):
3682 self._reload_connection()
3683 az_check = {"zone_check": False, "compute_availability": None}
3684 aggregates_list = self.nova.aggregates.list()
3685 for aggregate in aggregates_list:
3686 aggregate_details = aggregate.to_dict()
3687 aggregate_temp = json.dumps(aggregate_details)
3688 aggregate_json = json.loads(aggregate_temp)
3689 if aggregate_json["availability_zone"] == old_az:
3690 hosts_list = aggregate_json["hosts"]
3691 if host is not None:
3692 if host in hosts_list:
3693 az_check["zone_check"] = True
3694 available_compute_id = self.check_compute_availability(
3695 host, server_flavor_details
3696 )
3697 if available_compute_id is not None:
3698 az_check["compute_availability"] = available_compute_id
3699 else:
3700 for check_host in hosts_list:
3701 if check_host != old_host:
3702 available_compute_id = self.check_compute_availability(
3703 check_host, server_flavor_details
3704 )
3705 if available_compute_id is not None:
3706 az_check["zone_check"] = True
3707 az_check["compute_availability"] = available_compute_id
3708 break
3709 else:
3710 az_check["zone_check"] = True
3711 return az_check
3712
3713 def migrate_instance(self, vm_id, compute_host=None):
3714 """
3715 Migrate a vdu
3716 param:
3717 vm_id: ID of an instance
3718 compute_host: Host to migrate the vdu to
3719 """
3720 self._reload_connection()
3721 vm_state = False
3722 instance_state = self.get_vdu_state(vm_id)
3723 server_flavor_id = instance_state[1]
3724 server_hypervisor_name = instance_state[2]
3725 server_availability_zone = instance_state[3]
3726 try:
3727 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3728 server_flavor_details = [
3729 server_flavor["ram"],
3730 server_flavor["disk"],
3731 server_flavor["vcpus"],
3732 ]
3733 if compute_host == server_hypervisor_name:
3734 raise vimconn.VimConnException(
3735 "Unable to migrate instance '{}' to the same host '{}'".format(
3736 vm_id, compute_host
3737 ),
3738 http_code=vimconn.HTTP_Bad_Request,
3739 )
3740 az_status = self.check_availability_zone(
3741 server_availability_zone,
3742 server_flavor_details,
3743 server_hypervisor_name,
3744 compute_host,
3745 )
3746 availability_zone_check = az_status["zone_check"]
3747 available_compute_id = az_status.get("compute_availability")
3748
3749 if availability_zone_check is False:
3750 raise vimconn.VimConnException(
3751 "Unable to migrate instance '{}' to a different availability zone".format(
3752 vm_id
3753 ),
3754 http_code=vimconn.HTTP_Bad_Request,
3755 )
3756 if available_compute_id is not None:
3757 self.nova.servers.live_migrate(
3758 server=vm_id,
3759 host=available_compute_id,
3760 block_migration=True,
3761 disk_over_commit=False,
3762 )
3763 state = "MIGRATING"
3764 changed_compute_host = ""
3765 if state == "MIGRATING":
3766 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3767 changed_compute_host = self.get_vdu_state(vm_id)[2]
3768 if vm_state and changed_compute_host == available_compute_id:
3769 self.logger.debug(
3770 "Instance '{}' migrated to the new compute host '{}'".format(
3771 vm_id, changed_compute_host
3772 )
3773 )
3774 return state, available_compute_id
3775 else:
3776 raise vimconn.VimConnException(
3777 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3778 vm_id, available_compute_id
3779 ),
3780 http_code=vimconn.HTTP_Bad_Request,
3781 )
3782 else:
3783 raise vimconn.VimConnException(
3784 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3785 available_compute_id
3786 ),
3787 http_code=vimconn.HTTP_Bad_Request,
3788 )
3789 except (
3790 nvExceptions.BadRequest,
3791 nvExceptions.ClientException,
3792 nvExceptions.NotFound,
3793 ) as e:
3794 self._format_exception(e)
3795
3796 def resize_instance(self, vm_id, new_flavor_id):
3797 """
3798 For resizing the vm based on the given
3799 flavor details
3800 param:
3801 vm_id : ID of an instance
3802 new_flavor_id : Flavor id to be resized
3803 Return the status of a resized instance
3804 """
3805 self._reload_connection()
3806 self.logger.debug("resize the flavor of an instance")
3807 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3808 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3809 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3810 try:
3811 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3812 if old_flavor_disk > new_flavor_disk:
3813 raise nvExceptions.BadRequest(
3814 400,
3815 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3816 )
3817 else:
3818 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3819 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3820 if vm_state:
3821 instance_resized_status = self.confirm_resize(vm_id)
3822 return instance_resized_status
3823 else:
3824 raise nvExceptions.BadRequest(
3825 409,
3826 message="Cannot 'resize' vm_state is in ERROR",
3827 )
3828
3829 else:
3830 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3831 raise nvExceptions.BadRequest(
3832 409,
3833 message="Cannot 'resize' instance while it is in vm_state resized",
3834 )
3835 except (
3836 nvExceptions.BadRequest,
3837 nvExceptions.ClientException,
3838 nvExceptions.NotFound,
3839 ) as e:
3840 self._format_exception(e)
3841
3842 def confirm_resize(self, vm_id):
3843 """
3844 Confirm the resize of an instance
3845 param:
3846 vm_id: ID of an instance
3847 """
3848 self._reload_connection()
3849 self.nova.servers.confirm_resize(server=vm_id)
3850 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3851 self.__wait_for_vm(vm_id, "ACTIVE")
3852 instance_status = self.get_vdu_state(vm_id)[0]
3853 return instance_status