Fix Bug 2012 use existing volumes as instantiation parameters
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41
42 from cinderclient import client as cClient
43 from glanceclient import client as glClient
44 import glanceclient.exc as gl1Exceptions
45 from keystoneauth1 import session
46 from keystoneauth1.identity import v2, v3
47 import keystoneclient.exceptions as ksExceptions
48 import keystoneclient.v2_0.client as ksClient_v2
49 import keystoneclient.v3.client as ksClient_v3
50 import netaddr
51 from neutronclient.common import exceptions as neExceptions
52 from neutronclient.neutron import client as neClient
53 from novaclient import client as nClient, exceptions as nvExceptions
54 from osm_ro_plugin import vimconn
55 from requests.exceptions import ConnectionError
56 import yaml
57
58 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
59 __date__ = "$22-sep-2017 23:59:59$"
60
61 """contain the openstack virtual machine status to openmano status"""
62 vmStatus2manoFormat = {
63 "ACTIVE": "ACTIVE",
64 "PAUSED": "PAUSED",
65 "SUSPENDED": "SUSPENDED",
66 "SHUTOFF": "INACTIVE",
67 "BUILD": "BUILD",
68 "ERROR": "ERROR",
69 "DELETED": "DELETED",
70 }
71 netStatus2manoFormat = {
72 "ACTIVE": "ACTIVE",
73 "PAUSED": "PAUSED",
74 "INACTIVE": "INACTIVE",
75 "BUILD": "BUILD",
76 "ERROR": "ERROR",
77 "DELETED": "DELETED",
78 }
79
80 supportedClassificationTypes = ["legacy_flow_classifier"]
81
82 # global var to have a timeout creating and deleting volumes
83 volume_timeout = 1800
84 server_timeout = 1800
85
86
87 class SafeDumper(yaml.SafeDumper):
88 def represent_data(self, data):
89 # Openstack APIs use custom subclasses of dict and YAML safe dumper
90 # is designed to not handle that (reference issue 142 of pyyaml)
91 if isinstance(data, dict) and data.__class__ != dict:
92 # A simple solution is to convert those items back to dicts
93 data = dict(data.items())
94
95 return super(SafeDumper, self).represent_data(data)
96
97
98 class vimconnector(vimconn.VimConnector):
99 def __init__(
100 self,
101 uuid,
102 name,
103 tenant_id,
104 tenant_name,
105 url,
106 url_admin=None,
107 user=None,
108 passwd=None,
109 log_level=None,
110 config={},
111 persistent_info={},
112 ):
113 """using common constructor parameters. In this case
114 'url' is the keystone authorization url,
115 'url_admin' is not use
116 """
117 api_version = config.get("APIversion")
118
119 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
120 raise vimconn.VimConnException(
121 "Invalid value '{}' for config:APIversion. "
122 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
123 )
124
125 vim_type = config.get("vim_type")
126
127 if vim_type and vim_type not in ("vio", "VIO"):
128 raise vimconn.VimConnException(
129 "Invalid value '{}' for config:vim_type."
130 "Allowed values are 'vio' or 'VIO'".format(vim_type)
131 )
132
133 if config.get("dataplane_net_vlan_range") is not None:
134 # validate vlan ranges provided by user
135 self._validate_vlan_ranges(
136 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
137 )
138
139 if config.get("multisegment_vlan_range") is not None:
140 # validate vlan ranges provided by user
141 self._validate_vlan_ranges(
142 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
143 )
144
145 vimconn.VimConnector.__init__(
146 self,
147 uuid,
148 name,
149 tenant_id,
150 tenant_name,
151 url,
152 url_admin,
153 user,
154 passwd,
155 log_level,
156 config,
157 )
158
159 if self.config.get("insecure") and self.config.get("ca_cert"):
160 raise vimconn.VimConnException(
161 "options insecure and ca_cert are mutually exclusive"
162 )
163
164 self.verify = True
165
166 if self.config.get("insecure"):
167 self.verify = False
168
169 if self.config.get("ca_cert"):
170 self.verify = self.config.get("ca_cert")
171
172 if not url:
173 raise TypeError("url param can not be NoneType")
174
175 self.persistent_info = persistent_info
176 self.availability_zone = persistent_info.get("availability_zone", None)
177 self.session = persistent_info.get("session", {"reload_client": True})
178 self.my_tenant_id = self.session.get("my_tenant_id")
179 self.nova = self.session.get("nova")
180 self.neutron = self.session.get("neutron")
181 self.cinder = self.session.get("cinder")
182 self.glance = self.session.get("glance")
183 # self.glancev1 = self.session.get("glancev1")
184 self.keystone = self.session.get("keystone")
185 self.api_version3 = self.session.get("api_version3")
186 self.vim_type = self.config.get("vim_type")
187
188 if self.vim_type:
189 self.vim_type = self.vim_type.upper()
190
191 if self.config.get("use_internal_endpoint"):
192 self.endpoint_type = "internalURL"
193 else:
194 self.endpoint_type = None
195
196 logging.getLogger("urllib3").setLevel(logging.WARNING)
197 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
198 logging.getLogger("novaclient").setLevel(logging.WARNING)
199 self.logger = logging.getLogger("ro.vim.openstack")
200
201 # allow security_groups to be a list or a single string
202 if isinstance(self.config.get("security_groups"), str):
203 self.config["security_groups"] = [self.config["security_groups"]]
204
205 self.security_groups_id = None
206
207 # ###### VIO Specific Changes #########
208 if self.vim_type == "VIO":
209 self.logger = logging.getLogger("ro.vim.vio")
210
211 if log_level:
212 self.logger.setLevel(getattr(logging, log_level))
213
214 def __getitem__(self, index):
215 """Get individuals parameters.
216 Throw KeyError"""
217 if index == "project_domain_id":
218 return self.config.get("project_domain_id")
219 elif index == "user_domain_id":
220 return self.config.get("user_domain_id")
221 else:
222 return vimconn.VimConnector.__getitem__(self, index)
223
224 def __setitem__(self, index, value):
225 """Set individuals parameters and it is marked as dirty so to force connection reload.
226 Throw KeyError"""
227 if index == "project_domain_id":
228 self.config["project_domain_id"] = value
229 elif index == "user_domain_id":
230 self.config["user_domain_id"] = value
231 else:
232 vimconn.VimConnector.__setitem__(self, index, value)
233
234 self.session["reload_client"] = True
235
236 def serialize(self, value):
237 """Serialization of python basic types.
238
239 In the case value is not serializable a message will be logged and a
240 simple representation of the data that cannot be converted back to
241 python is returned.
242 """
243 if isinstance(value, str):
244 return value
245
246 try:
247 return yaml.dump(
248 value, Dumper=SafeDumper, default_flow_style=True, width=256
249 )
250 except yaml.representer.RepresenterError:
251 self.logger.debug(
252 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
253 pformat(value),
254 exc_info=True,
255 )
256
257 return str(value)
258
259 def _reload_connection(self):
260 """Called before any operation, it check if credentials has changed
261 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
262 """
263 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
264 if self.session["reload_client"]:
265 if self.config.get("APIversion"):
266 self.api_version3 = (
267 self.config["APIversion"] == "v3.3"
268 or self.config["APIversion"] == "3"
269 )
270 else: # get from ending auth_url that end with v3 or with v2.0
271 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
272 "/v3/"
273 )
274
275 self.session["api_version3"] = self.api_version3
276
277 if self.api_version3:
278 if self.config.get("project_domain_id") or self.config.get(
279 "project_domain_name"
280 ):
281 project_domain_id_default = None
282 else:
283 project_domain_id_default = "default"
284
285 if self.config.get("user_domain_id") or self.config.get(
286 "user_domain_name"
287 ):
288 user_domain_id_default = None
289 else:
290 user_domain_id_default = "default"
291 auth = v3.Password(
292 auth_url=self.url,
293 username=self.user,
294 password=self.passwd,
295 project_name=self.tenant_name,
296 project_id=self.tenant_id,
297 project_domain_id=self.config.get(
298 "project_domain_id", project_domain_id_default
299 ),
300 user_domain_id=self.config.get(
301 "user_domain_id", user_domain_id_default
302 ),
303 project_domain_name=self.config.get("project_domain_name"),
304 user_domain_name=self.config.get("user_domain_name"),
305 )
306 else:
307 auth = v2.Password(
308 auth_url=self.url,
309 username=self.user,
310 password=self.passwd,
311 tenant_name=self.tenant_name,
312 tenant_id=self.tenant_id,
313 )
314
315 sess = session.Session(auth=auth, verify=self.verify)
316 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
317 # Titanium cloud and StarlingX
318 region_name = self.config.get("region_name")
319
320 if self.api_version3:
321 self.keystone = ksClient_v3.Client(
322 session=sess,
323 endpoint_type=self.endpoint_type,
324 region_name=region_name,
325 )
326 else:
327 self.keystone = ksClient_v2.Client(
328 session=sess, endpoint_type=self.endpoint_type
329 )
330
331 self.session["keystone"] = self.keystone
332 # In order to enable microversion functionality an explicit microversion must be specified in "config".
333 # This implementation approach is due to the warning message in
334 # https://developer.openstack.org/api-guide/compute/microversions.html
335 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
336 # always require an specific microversion.
337 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
338 version = self.config.get("microversion")
339
340 if not version:
341 version = "2.1"
342
343 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
344 # Titanium cloud and StarlingX
345 self.nova = self.session["nova"] = nClient.Client(
346 str(version),
347 session=sess,
348 endpoint_type=self.endpoint_type,
349 region_name=region_name,
350 )
351 self.neutron = self.session["neutron"] = neClient.Client(
352 "2.0",
353 session=sess,
354 endpoint_type=self.endpoint_type,
355 region_name=region_name,
356 )
357 self.cinder = self.session["cinder"] = cClient.Client(
358 2,
359 session=sess,
360 endpoint_type=self.endpoint_type,
361 region_name=region_name,
362 )
363
364 try:
365 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
366 except Exception:
367 self.logger.error("Cannot get project_id from session", exc_info=True)
368
369 if self.endpoint_type == "internalURL":
370 glance_service_id = self.keystone.services.list(name="glance")[0].id
371 glance_endpoint = self.keystone.endpoints.list(
372 glance_service_id, interface="internal"
373 )[0].url
374 else:
375 glance_endpoint = None
376
377 self.glance = self.session["glance"] = glClient.Client(
378 2, session=sess, endpoint=glance_endpoint
379 )
380 # using version 1 of glance client in new_image()
381 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
382 # endpoint=glance_endpoint)
383 self.session["reload_client"] = False
384 self.persistent_info["session"] = self.session
385 # add availablity zone info inside self.persistent_info
386 self._set_availablity_zones()
387 self.persistent_info["availability_zone"] = self.availability_zone
388 # force to get again security_groups_ids next time they are needed
389 self.security_groups_id = None
390
391 def __net_os2mano(self, net_list_dict):
392 """Transform the net openstack format to mano format
393 net_list_dict can be a list of dict or a single dict"""
394 if type(net_list_dict) is dict:
395 net_list_ = (net_list_dict,)
396 elif type(net_list_dict) is list:
397 net_list_ = net_list_dict
398 else:
399 raise TypeError("param net_list_dict must be a list or a dictionary")
400 for net in net_list_:
401 if net.get("provider:network_type") == "vlan":
402 net["type"] = "data"
403 else:
404 net["type"] = "bridge"
405
406 def __classification_os2mano(self, class_list_dict):
407 """Transform the openstack format (Flow Classifier) to mano format
408 (Classification) class_list_dict can be a list of dict or a single dict
409 """
410 if isinstance(class_list_dict, dict):
411 class_list_ = [class_list_dict]
412 elif isinstance(class_list_dict, list):
413 class_list_ = class_list_dict
414 else:
415 raise TypeError("param class_list_dict must be a list or a dictionary")
416 for classification in class_list_:
417 id = classification.pop("id")
418 name = classification.pop("name")
419 description = classification.pop("description")
420 project_id = classification.pop("project_id")
421 tenant_id = classification.pop("tenant_id")
422 original_classification = copy.deepcopy(classification)
423 classification.clear()
424 classification["ctype"] = "legacy_flow_classifier"
425 classification["definition"] = original_classification
426 classification["id"] = id
427 classification["name"] = name
428 classification["description"] = description
429 classification["project_id"] = project_id
430 classification["tenant_id"] = tenant_id
431
432 def __sfi_os2mano(self, sfi_list_dict):
433 """Transform the openstack format (Port Pair) to mano format (SFI)
434 sfi_list_dict can be a list of dict or a single dict
435 """
436 if isinstance(sfi_list_dict, dict):
437 sfi_list_ = [sfi_list_dict]
438 elif isinstance(sfi_list_dict, list):
439 sfi_list_ = sfi_list_dict
440 else:
441 raise TypeError("param sfi_list_dict must be a list or a dictionary")
442
443 for sfi in sfi_list_:
444 sfi["ingress_ports"] = []
445 sfi["egress_ports"] = []
446
447 if sfi.get("ingress"):
448 sfi["ingress_ports"].append(sfi["ingress"])
449
450 if sfi.get("egress"):
451 sfi["egress_ports"].append(sfi["egress"])
452
453 del sfi["ingress"]
454 del sfi["egress"]
455 params = sfi.get("service_function_parameters")
456 sfc_encap = False
457
458 if params:
459 correlation = params.get("correlation")
460
461 if correlation:
462 sfc_encap = True
463
464 sfi["sfc_encap"] = sfc_encap
465 del sfi["service_function_parameters"]
466
467 def __sf_os2mano(self, sf_list_dict):
468 """Transform the openstack format (Port Pair Group) to mano format (SF)
469 sf_list_dict can be a list of dict or a single dict
470 """
471 if isinstance(sf_list_dict, dict):
472 sf_list_ = [sf_list_dict]
473 elif isinstance(sf_list_dict, list):
474 sf_list_ = sf_list_dict
475 else:
476 raise TypeError("param sf_list_dict must be a list or a dictionary")
477
478 for sf in sf_list_:
479 del sf["port_pair_group_parameters"]
480 sf["sfis"] = sf["port_pairs"]
481 del sf["port_pairs"]
482
483 def __sfp_os2mano(self, sfp_list_dict):
484 """Transform the openstack format (Port Chain) to mano format (SFP)
485 sfp_list_dict can be a list of dict or a single dict
486 """
487 if isinstance(sfp_list_dict, dict):
488 sfp_list_ = [sfp_list_dict]
489 elif isinstance(sfp_list_dict, list):
490 sfp_list_ = sfp_list_dict
491 else:
492 raise TypeError("param sfp_list_dict must be a list or a dictionary")
493
494 for sfp in sfp_list_:
495 params = sfp.pop("chain_parameters")
496 sfc_encap = False
497
498 if params:
499 correlation = params.get("correlation")
500
501 if correlation:
502 sfc_encap = True
503
504 sfp["sfc_encap"] = sfc_encap
505 sfp["spi"] = sfp.pop("chain_id")
506 sfp["classifications"] = sfp.pop("flow_classifiers")
507 sfp["service_functions"] = sfp.pop("port_pair_groups")
508
509 # placeholder for now; read TODO note below
510 def _validate_classification(self, type, definition):
511 # only legacy_flow_classifier Type is supported at this point
512 return True
513 # TODO(igordcard): this method should be an abstract method of an
514 # abstract Classification class to be implemented by the specific
515 # Types. Also, abstract vimconnector should call the validation
516 # method before the implemented VIM connectors are called.
517
518 def _format_exception(self, exception):
519 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
520 message_error = str(exception)
521 tip = ""
522
523 if isinstance(
524 exception,
525 (
526 neExceptions.NetworkNotFoundClient,
527 nvExceptions.NotFound,
528 ksExceptions.NotFound,
529 gl1Exceptions.HTTPNotFound,
530 ),
531 ):
532 raise vimconn.VimConnNotFoundException(
533 type(exception).__name__ + ": " + message_error
534 )
535 elif isinstance(
536 exception,
537 (
538 HTTPException,
539 gl1Exceptions.HTTPException,
540 gl1Exceptions.CommunicationError,
541 ConnectionError,
542 ksExceptions.ConnectionError,
543 neExceptions.ConnectionFailed,
544 ),
545 ):
546 if type(exception).__name__ == "SSLError":
547 tip = " (maybe option 'insecure' must be added to the VIM)"
548
549 raise vimconn.VimConnConnectionException(
550 "Invalid URL or credentials{}: {}".format(tip, message_error)
551 )
552 elif isinstance(
553 exception,
554 (
555 KeyError,
556 nvExceptions.BadRequest,
557 ksExceptions.BadRequest,
558 ),
559 ):
560 raise vimconn.VimConnException(
561 type(exception).__name__ + ": " + message_error
562 )
563 elif isinstance(
564 exception,
565 (
566 nvExceptions.ClientException,
567 ksExceptions.ClientException,
568 neExceptions.NeutronException,
569 ),
570 ):
571 raise vimconn.VimConnUnexpectedResponse(
572 type(exception).__name__ + ": " + message_error
573 )
574 elif isinstance(exception, nvExceptions.Conflict):
575 raise vimconn.VimConnConflictException(
576 type(exception).__name__ + ": " + message_error
577 )
578 elif isinstance(exception, vimconn.VimConnException):
579 raise exception
580 else: # ()
581 self.logger.error("General Exception " + message_error, exc_info=True)
582
583 raise vimconn.VimConnConnectionException(
584 type(exception).__name__ + ": " + message_error
585 )
586
587 def _get_ids_from_name(self):
588 """
589 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
590 :return: None
591 """
592 # get tenant_id if only tenant_name is supplied
593 self._reload_connection()
594
595 if not self.my_tenant_id:
596 raise vimconn.VimConnConnectionException(
597 "Error getting tenant information from name={} id={}".format(
598 self.tenant_name, self.tenant_id
599 )
600 )
601
602 if self.config.get("security_groups") and not self.security_groups_id:
603 # convert from name to id
604 neutron_sg_list = self.neutron.list_security_groups(
605 tenant_id=self.my_tenant_id
606 )["security_groups"]
607
608 self.security_groups_id = []
609 for sg in self.config.get("security_groups"):
610 for neutron_sg in neutron_sg_list:
611 if sg in (neutron_sg["id"], neutron_sg["name"]):
612 self.security_groups_id.append(neutron_sg["id"])
613 break
614 else:
615 self.security_groups_id = None
616
617 raise vimconn.VimConnConnectionException(
618 "Not found security group {} for this tenant".format(sg)
619 )
620
621 def check_vim_connectivity(self):
622 # just get network list to check connectivity and credentials
623 self.get_network_list(filter_dict={})
624
625 def get_tenant_list(self, filter_dict={}):
626 """Obtain tenants of VIM
627 filter_dict can contain the following keys:
628 name: filter by tenant name
629 id: filter by tenant uuid/id
630 <other VIM specific>
631 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
632 """
633 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
634
635 try:
636 self._reload_connection()
637
638 if self.api_version3:
639 project_class_list = self.keystone.projects.list(
640 name=filter_dict.get("name")
641 )
642 else:
643 project_class_list = self.keystone.tenants.findall(**filter_dict)
644
645 project_list = []
646
647 for project in project_class_list:
648 if filter_dict.get("id") and filter_dict["id"] != project.id:
649 continue
650
651 project_list.append(project.to_dict())
652
653 return project_list
654 except (
655 ksExceptions.ConnectionError,
656 ksExceptions.ClientException,
657 ConnectionError,
658 ) as e:
659 self._format_exception(e)
660
661 def new_tenant(self, tenant_name, tenant_description):
662 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
663 self.logger.debug("Adding a new tenant name: %s", tenant_name)
664
665 try:
666 self._reload_connection()
667
668 if self.api_version3:
669 project = self.keystone.projects.create(
670 tenant_name,
671 self.config.get("project_domain_id", "default"),
672 description=tenant_description,
673 is_domain=False,
674 )
675 else:
676 project = self.keystone.tenants.create(tenant_name, tenant_description)
677
678 return project.id
679 except (
680 ksExceptions.ConnectionError,
681 ksExceptions.ClientException,
682 ksExceptions.BadRequest,
683 ConnectionError,
684 ) as e:
685 self._format_exception(e)
686
687 def delete_tenant(self, tenant_id):
688 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
689 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
690
691 try:
692 self._reload_connection()
693
694 if self.api_version3:
695 self.keystone.projects.delete(tenant_id)
696 else:
697 self.keystone.tenants.delete(tenant_id)
698
699 return tenant_id
700 except (
701 ksExceptions.ConnectionError,
702 ksExceptions.ClientException,
703 ksExceptions.NotFound,
704 ConnectionError,
705 ) as e:
706 self._format_exception(e)
707
708 def new_network(
709 self,
710 net_name,
711 net_type,
712 ip_profile=None,
713 shared=False,
714 provider_network_profile=None,
715 ):
716 """Adds a tenant network to VIM
717 Params:
718 'net_name': name of the network
719 'net_type': one of:
720 'bridge': overlay isolated network
721 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
722 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
723 'ip_profile': is a dict containing the IP parameters of the network
724 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
725 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
726 'gateway_address': (Optional) ip_schema, that is X.X.X.X
727 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
728 'dhcp_enabled': True or False
729 'dhcp_start_address': ip_schema, first IP to grant
730 'dhcp_count': number of IPs to grant.
731 'shared': if this network can be seen/use by other tenants/organization
732 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
733 physical-network: physnet-label}
734 Returns a tuple with the network identifier and created_items, or raises an exception on error
735 created_items can be None or a dictionary where this method can include key-values that will be passed to
736 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
737 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
738 as not present.
739 """
740 self.logger.debug(
741 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
742 )
743 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
744
745 try:
746 vlan = None
747
748 if provider_network_profile:
749 vlan = provider_network_profile.get("segmentation-id")
750
751 new_net = None
752 created_items = {}
753 self._reload_connection()
754 network_dict = {"name": net_name, "admin_state_up": True}
755
756 if net_type in ("data", "ptp"):
757 provider_physical_network = None
758
759 if provider_network_profile and provider_network_profile.get(
760 "physical-network"
761 ):
762 provider_physical_network = provider_network_profile.get(
763 "physical-network"
764 )
765
766 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
767 # or not declared, just ignore the checking
768 if (
769 isinstance(
770 self.config.get("dataplane_physical_net"), (tuple, list)
771 )
772 and provider_physical_network
773 not in self.config["dataplane_physical_net"]
774 ):
775 raise vimconn.VimConnConflictException(
776 "Invalid parameter 'provider-network:physical-network' "
777 "for network creation. '{}' is not one of the declared "
778 "list at VIM_config:dataplane_physical_net".format(
779 provider_physical_network
780 )
781 )
782
783 # use the default dataplane_physical_net
784 if not provider_physical_network:
785 provider_physical_network = self.config.get(
786 "dataplane_physical_net"
787 )
788
789 # if it is non empty list, use the first value. If it is a string use the value directly
790 if (
791 isinstance(provider_physical_network, (tuple, list))
792 and provider_physical_network
793 ):
794 provider_physical_network = provider_physical_network[0]
795
796 if not provider_physical_network:
797 raise vimconn.VimConnConflictException(
798 "missing information needed for underlay networks. Provide "
799 "'dataplane_physical_net' configuration at VIM or use the NS "
800 "instantiation parameter 'provider-network.physical-network'"
801 " for the VLD"
802 )
803
804 if not self.config.get("multisegment_support"):
805 network_dict[
806 "provider:physical_network"
807 ] = provider_physical_network
808
809 if (
810 provider_network_profile
811 and "network-type" in provider_network_profile
812 ):
813 network_dict[
814 "provider:network_type"
815 ] = provider_network_profile["network-type"]
816 else:
817 network_dict["provider:network_type"] = self.config.get(
818 "dataplane_network_type", "vlan"
819 )
820
821 if vlan:
822 network_dict["provider:segmentation_id"] = vlan
823 else:
824 # Multi-segment case
825 segment_list = []
826 segment1_dict = {
827 "provider:physical_network": "",
828 "provider:network_type": "vxlan",
829 }
830 segment_list.append(segment1_dict)
831 segment2_dict = {
832 "provider:physical_network": provider_physical_network,
833 "provider:network_type": "vlan",
834 }
835
836 if vlan:
837 segment2_dict["provider:segmentation_id"] = vlan
838 elif self.config.get("multisegment_vlan_range"):
839 vlanID = self._generate_multisegment_vlanID()
840 segment2_dict["provider:segmentation_id"] = vlanID
841
842 # else
843 # raise vimconn.VimConnConflictException(
844 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
845 # network")
846 segment_list.append(segment2_dict)
847 network_dict["segments"] = segment_list
848
849 # VIO Specific Changes. It needs a concrete VLAN
850 if self.vim_type == "VIO" and vlan is None:
851 if self.config.get("dataplane_net_vlan_range") is None:
852 raise vimconn.VimConnConflictException(
853 "You must provide 'dataplane_net_vlan_range' in format "
854 "[start_ID - end_ID] at VIM_config for creating underlay "
855 "networks"
856 )
857
858 network_dict["provider:segmentation_id"] = self._generate_vlanID()
859
860 network_dict["shared"] = shared
861
862 if self.config.get("disable_network_port_security"):
863 network_dict["port_security_enabled"] = False
864
865 if self.config.get("neutron_availability_zone_hints"):
866 hints = self.config.get("neutron_availability_zone_hints")
867
868 if isinstance(hints, str):
869 hints = [hints]
870
871 network_dict["availability_zone_hints"] = hints
872
873 new_net = self.neutron.create_network({"network": network_dict})
874 # print new_net
875 # create subnetwork, even if there is no profile
876
877 if not ip_profile:
878 ip_profile = {}
879
880 if not ip_profile.get("subnet_address"):
881 # Fake subnet is required
882 subnet_rand = random.randint(0, 255)
883 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
884
885 if "ip_version" not in ip_profile:
886 ip_profile["ip_version"] = "IPv4"
887
888 subnet = {
889 "name": net_name + "-subnet",
890 "network_id": new_net["network"]["id"],
891 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
892 "cidr": ip_profile["subnet_address"],
893 }
894
895 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
896 if ip_profile.get("gateway_address"):
897 subnet["gateway_ip"] = ip_profile["gateway_address"]
898 else:
899 subnet["gateway_ip"] = None
900
901 if ip_profile.get("dns_address"):
902 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
903
904 if "dhcp_enabled" in ip_profile:
905 subnet["enable_dhcp"] = (
906 False
907 if ip_profile["dhcp_enabled"] == "false"
908 or ip_profile["dhcp_enabled"] is False
909 else True
910 )
911
912 if ip_profile.get("dhcp_start_address"):
913 subnet["allocation_pools"] = []
914 subnet["allocation_pools"].append(dict())
915 subnet["allocation_pools"][0]["start"] = ip_profile[
916 "dhcp_start_address"
917 ]
918
919 if ip_profile.get("dhcp_count"):
920 # parts = ip_profile["dhcp_start_address"].split(".")
921 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
922 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
923 ip_int += ip_profile["dhcp_count"] - 1
924 ip_str = str(netaddr.IPAddress(ip_int))
925 subnet["allocation_pools"][0]["end"] = ip_str
926
927 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
928 self.neutron.create_subnet({"subnet": subnet})
929
930 if net_type == "data" and self.config.get("multisegment_support"):
931 if self.config.get("l2gw_support"):
932 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
933 for l2gw in l2gw_list:
934 l2gw_conn = {
935 "l2_gateway_id": l2gw["id"],
936 "network_id": new_net["network"]["id"],
937 "segmentation_id": str(vlanID),
938 }
939 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
940 {"l2_gateway_connection": l2gw_conn}
941 )
942 created_items[
943 "l2gwconn:"
944 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
945 ] = True
946
947 return new_net["network"]["id"], created_items
948 except Exception as e:
949 # delete l2gw connections (if any) before deleting the network
950 for k, v in created_items.items():
951 if not v: # skip already deleted
952 continue
953
954 try:
955 k_item, _, k_id = k.partition(":")
956
957 if k_item == "l2gwconn":
958 self.neutron.delete_l2_gateway_connection(k_id)
959 except Exception as e2:
960 self.logger.error(
961 "Error deleting l2 gateway connection: {}: {}".format(
962 type(e2).__name__, e2
963 )
964 )
965
966 if new_net:
967 self.neutron.delete_network(new_net["network"]["id"])
968
969 self._format_exception(e)
970
971 def get_network_list(self, filter_dict={}):
972 """Obtain tenant networks of VIM
973 Filter_dict can be:
974 name: network name
975 id: network uuid
976 shared: boolean
977 tenant_id: tenant
978 admin_state_up: boolean
979 status: 'ACTIVE'
980 Returns the network list of dictionaries
981 """
982 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
983
984 try:
985 self._reload_connection()
986 filter_dict_os = filter_dict.copy()
987
988 if self.api_version3 and "tenant_id" in filter_dict_os:
989 # TODO check
990 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
991
992 net_dict = self.neutron.list_networks(**filter_dict_os)
993 net_list = net_dict["networks"]
994 self.__net_os2mano(net_list)
995
996 return net_list
997 except (
998 neExceptions.ConnectionFailed,
999 ksExceptions.ClientException,
1000 neExceptions.NeutronException,
1001 ConnectionError,
1002 ) as e:
1003 self._format_exception(e)
1004
1005 def get_network(self, net_id):
1006 """Obtain details of network from VIM
1007 Returns the network information from a network id"""
1008 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1009 filter_dict = {"id": net_id}
1010 net_list = self.get_network_list(filter_dict)
1011
1012 if len(net_list) == 0:
1013 raise vimconn.VimConnNotFoundException(
1014 "Network '{}' not found".format(net_id)
1015 )
1016 elif len(net_list) > 1:
1017 raise vimconn.VimConnConflictException(
1018 "Found more than one network with this criteria"
1019 )
1020
1021 net = net_list[0]
1022 subnets = []
1023 for subnet_id in net.get("subnets", ()):
1024 try:
1025 subnet = self.neutron.show_subnet(subnet_id)
1026 except Exception as e:
1027 self.logger.error(
1028 "osconnector.get_network(): Error getting subnet %s %s"
1029 % (net_id, str(e))
1030 )
1031 subnet = {"id": subnet_id, "fault": str(e)}
1032
1033 subnets.append(subnet)
1034
1035 net["subnets"] = subnets
1036 net["encapsulation"] = net.get("provider:network_type")
1037 net["encapsulation_type"] = net.get("provider:network_type")
1038 net["segmentation_id"] = net.get("provider:segmentation_id")
1039 net["encapsulation_id"] = net.get("provider:segmentation_id")
1040
1041 return net
1042
1043 def delete_network(self, net_id, created_items=None):
1044 """
1045 Removes a tenant network from VIM and its associated elements
1046 :param net_id: VIM identifier of the network, provided by method new_network
1047 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1048 Returns the network identifier or raises an exception upon error or when network is not found
1049 """
1050 self.logger.debug("Deleting network '%s' from VIM", net_id)
1051
1052 if created_items is None:
1053 created_items = {}
1054
1055 try:
1056 self._reload_connection()
1057 # delete l2gw connections (if any) before deleting the network
1058 for k, v in created_items.items():
1059 if not v: # skip already deleted
1060 continue
1061
1062 try:
1063 k_item, _, k_id = k.partition(":")
1064 if k_item == "l2gwconn":
1065 self.neutron.delete_l2_gateway_connection(k_id)
1066 except Exception as e:
1067 self.logger.error(
1068 "Error deleting l2 gateway connection: {}: {}".format(
1069 type(e).__name__, e
1070 )
1071 )
1072
1073 # delete VM ports attached to this networks before the network
1074 ports = self.neutron.list_ports(network_id=net_id)
1075 for p in ports["ports"]:
1076 try:
1077 self.neutron.delete_port(p["id"])
1078 except Exception as e:
1079 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1080
1081 self.neutron.delete_network(net_id)
1082
1083 return net_id
1084 except (
1085 neExceptions.ConnectionFailed,
1086 neExceptions.NetworkNotFoundClient,
1087 neExceptions.NeutronException,
1088 ksExceptions.ClientException,
1089 neExceptions.NeutronException,
1090 ConnectionError,
1091 ) as e:
1092 self._format_exception(e)
1093
1094 def refresh_nets_status(self, net_list):
1095 """Get the status of the networks
1096 Params: the list of network identifiers
1097 Returns a dictionary with:
1098 net_id: #VIM id of this network
1099 status: #Mandatory. Text with one of:
1100 # DELETED (not found at vim)
1101 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1102 # OTHER (Vim reported other status not understood)
1103 # ERROR (VIM indicates an ERROR status)
1104 # ACTIVE, INACTIVE, DOWN (admin down),
1105 # BUILD (on building process)
1106 #
1107 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1108 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1109 """
1110 net_dict = {}
1111
1112 for net_id in net_list:
1113 net = {}
1114
1115 try:
1116 net_vim = self.get_network(net_id)
1117
1118 if net_vim["status"] in netStatus2manoFormat:
1119 net["status"] = netStatus2manoFormat[net_vim["status"]]
1120 else:
1121 net["status"] = "OTHER"
1122 net["error_msg"] = "VIM status reported " + net_vim["status"]
1123
1124 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1125 net["status"] = "DOWN"
1126
1127 net["vim_info"] = self.serialize(net_vim)
1128
1129 if net_vim.get("fault"): # TODO
1130 net["error_msg"] = str(net_vim["fault"])
1131 except vimconn.VimConnNotFoundException as e:
1132 self.logger.error("Exception getting net status: %s", str(e))
1133 net["status"] = "DELETED"
1134 net["error_msg"] = str(e)
1135 except vimconn.VimConnException as e:
1136 self.logger.error("Exception getting net status: %s", str(e))
1137 net["status"] = "VIM_ERROR"
1138 net["error_msg"] = str(e)
1139 net_dict[net_id] = net
1140 return net_dict
1141
1142 def get_flavor(self, flavor_id):
1143 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1144 self.logger.debug("Getting flavor '%s'", flavor_id)
1145
1146 try:
1147 self._reload_connection()
1148 flavor = self.nova.flavors.find(id=flavor_id)
1149 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1150
1151 return flavor.to_dict()
1152 except (
1153 nvExceptions.NotFound,
1154 nvExceptions.ClientException,
1155 ksExceptions.ClientException,
1156 ConnectionError,
1157 ) as e:
1158 self._format_exception(e)
1159
1160 def get_flavor_id_from_data(self, flavor_dict):
1161 """Obtain flavor id that match the flavor description
1162 Returns the flavor_id or raises a vimconnNotFoundException
1163 flavor_dict: contains the required ram, vcpus, disk
1164 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1165 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1166 vimconnNotFoundException is raised
1167 """
1168 exact_match = False if self.config.get("use_existing_flavors") else True
1169
1170 try:
1171 self._reload_connection()
1172 flavor_candidate_id = None
1173 flavor_candidate_data = (10000, 10000, 10000)
1174 flavor_target = (
1175 flavor_dict["ram"],
1176 flavor_dict["vcpus"],
1177 flavor_dict["disk"],
1178 flavor_dict.get("ephemeral", 0),
1179 flavor_dict.get("swap", 0),
1180 )
1181 # numa=None
1182 extended = flavor_dict.get("extended", {})
1183 if extended:
1184 # TODO
1185 raise vimconn.VimConnNotFoundException(
1186 "Flavor with EPA still not implemented"
1187 )
1188 # if len(numas) > 1:
1189 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1190 # numa=numas[0]
1191 # numas = extended.get("numas")
1192 for flavor in self.nova.flavors.list():
1193 epa = flavor.get_keys()
1194
1195 if epa:
1196 continue
1197 # TODO
1198
1199 flavor_data = (
1200 flavor.ram,
1201 flavor.vcpus,
1202 flavor.disk,
1203 flavor.ephemeral,
1204 flavor.swap if isinstance(flavor.swap, int) else 0,
1205 )
1206 if flavor_data == flavor_target:
1207 return flavor.id
1208 elif (
1209 not exact_match
1210 and flavor_target < flavor_data < flavor_candidate_data
1211 ):
1212 flavor_candidate_id = flavor.id
1213 flavor_candidate_data = flavor_data
1214
1215 if not exact_match and flavor_candidate_id:
1216 return flavor_candidate_id
1217
1218 raise vimconn.VimConnNotFoundException(
1219 "Cannot find any flavor matching '{}'".format(flavor_dict)
1220 )
1221 except (
1222 nvExceptions.NotFound,
1223 nvExceptions.ClientException,
1224 ksExceptions.ClientException,
1225 ConnectionError,
1226 ) as e:
1227 self._format_exception(e)
1228
1229 def process_resource_quota(self, quota, prefix, extra_specs):
1230 """
1231 :param prefix:
1232 :param extra_specs:
1233 :return:
1234 """
1235 if "limit" in quota:
1236 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1237
1238 if "reserve" in quota:
1239 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1240
1241 if "shares" in quota:
1242 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1243 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1244
1245 def new_flavor(self, flavor_data, change_name_if_used=True):
1246 """Adds a tenant flavor to openstack VIM
1247 if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1248 repetition
1249 Returns the flavor identifier
1250 """
1251 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1252 retry = 0
1253 max_retries = 3
1254 name_suffix = 0
1255
1256 try:
1257 name = flavor_data["name"]
1258 while retry < max_retries:
1259 retry += 1
1260 try:
1261 self._reload_connection()
1262
1263 if change_name_if_used:
1264 # get used names
1265 fl_names = []
1266 fl = self.nova.flavors.list()
1267
1268 for f in fl:
1269 fl_names.append(f.name)
1270
1271 while name in fl_names:
1272 name_suffix += 1
1273 name = flavor_data["name"] + "-" + str(name_suffix)
1274
1275 ram = flavor_data.get("ram", 64)
1276 vcpus = flavor_data.get("vcpus", 1)
1277 extra_specs = {}
1278
1279 extended = flavor_data.get("extended")
1280 if extended:
1281 numas = extended.get("numas")
1282
1283 if numas:
1284 numa_nodes = len(numas)
1285
1286 if numa_nodes > 1:
1287 return -1, "Can not add flavor with more than one numa"
1288
1289 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1290 extra_specs["hw:mem_page_size"] = "large"
1291 extra_specs["hw:cpu_policy"] = "dedicated"
1292 extra_specs["hw:numa_mempolicy"] = "strict"
1293
1294 if self.vim_type == "VIO":
1295 extra_specs[
1296 "vmware:extra_config"
1297 ] = '{"numa.nodeAffinity":"0"}'
1298 extra_specs["vmware:latency_sensitivity_level"] = "high"
1299
1300 for numa in numas:
1301 # overwrite ram and vcpus
1302 # check if key "memory" is present in numa else use ram value at flavor
1303 if "memory" in numa:
1304 ram = numa["memory"] * 1024
1305 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1306 # implemented/virt-driver-cpu-thread-pinning.html
1307 extra_specs["hw:cpu_sockets"] = 1
1308
1309 if "paired-threads" in numa:
1310 vcpus = numa["paired-threads"] * 2
1311 # cpu_thread_policy "require" implies that the compute node must have an
1312 # STM architecture
1313 extra_specs["hw:cpu_thread_policy"] = "require"
1314 extra_specs["hw:cpu_policy"] = "dedicated"
1315 elif "cores" in numa:
1316 vcpus = numa["cores"]
1317 # cpu_thread_policy "prefer" implies that the host must not have an SMT
1318 # architecture, or a non-SMT architecture will be emulated
1319 extra_specs["hw:cpu_thread_policy"] = "isolate"
1320 extra_specs["hw:cpu_policy"] = "dedicated"
1321 elif "threads" in numa:
1322 vcpus = numa["threads"]
1323 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1324 # architecture
1325 extra_specs["hw:cpu_thread_policy"] = "prefer"
1326 extra_specs["hw:cpu_policy"] = "dedicated"
1327 # for interface in numa.get("interfaces",() ):
1328 # if interface["dedicated"]=="yes":
1329 # raise vimconn.VimConnException("Passthrough interfaces are not supported
1330 # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1331 # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1332 # when a way to connect it is available
1333 elif extended.get("cpu-quota"):
1334 self.process_resource_quota(
1335 extended.get("cpu-quota"), "cpu", extra_specs
1336 )
1337
1338 if extended.get("mem-quota"):
1339 self.process_resource_quota(
1340 extended.get("mem-quota"), "memory", extra_specs
1341 )
1342
1343 if extended.get("vif-quota"):
1344 self.process_resource_quota(
1345 extended.get("vif-quota"), "vif", extra_specs
1346 )
1347
1348 if extended.get("disk-io-quota"):
1349 self.process_resource_quota(
1350 extended.get("disk-io-quota"), "disk_io", extra_specs
1351 )
1352
1353 # Set the mempage size as specified in the descriptor
1354 if extended.get("mempage-size"):
1355 if extended.get("mempage-size") == "LARGE":
1356 extra_specs["hw:mem_page_size"] = "large"
1357 elif extended.get("mempage-size") == "SMALL":
1358 extra_specs["hw:mem_page_size"] = "small"
1359 elif extended.get("mempage-size") == "SIZE_2MB":
1360 extra_specs["hw:mem_page_size"] = "2MB"
1361 elif extended.get("mempage-size") == "SIZE_1GB":
1362 extra_specs["hw:mem_page_size"] = "1GB"
1363 elif extended.get("mempage-size") == "PREFER_LARGE":
1364 extra_specs["hw:mem_page_size"] = "any"
1365 else:
1366 # The validations in NBI should make reaching here not possible.
1367 # If this message is shown, check validations
1368 self.logger.debug(
1369 "Invalid mempage-size %s. Will be ignored",
1370 extended.get("mempage-size"),
1371 )
1372 # create flavor
1373 new_flavor = self.nova.flavors.create(
1374 name=name,
1375 ram=ram,
1376 vcpus=vcpus,
1377 disk=flavor_data.get("disk", 0),
1378 ephemeral=flavor_data.get("ephemeral", 0),
1379 swap=flavor_data.get("swap", 0),
1380 is_public=flavor_data.get("is_public", True),
1381 )
1382 # add metadata
1383 if extra_specs:
1384 new_flavor.set_keys(extra_specs)
1385
1386 return new_flavor.id
1387 except nvExceptions.Conflict as e:
1388 if change_name_if_used and retry < max_retries:
1389 continue
1390
1391 self._format_exception(e)
1392 # except nvExceptions.BadRequest as e:
1393 except (
1394 ksExceptions.ClientException,
1395 nvExceptions.ClientException,
1396 ConnectionError,
1397 KeyError,
1398 ) as e:
1399 self._format_exception(e)
1400
1401 def delete_flavor(self, flavor_id):
1402 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1403 try:
1404 self._reload_connection()
1405 self.nova.flavors.delete(flavor_id)
1406
1407 return flavor_id
1408 # except nvExceptions.BadRequest as e:
1409 except (
1410 nvExceptions.NotFound,
1411 ksExceptions.ClientException,
1412 nvExceptions.ClientException,
1413 ConnectionError,
1414 ) as e:
1415 self._format_exception(e)
1416
1417 def new_image(self, image_dict):
1418 """
1419 Adds a tenant image to VIM. imge_dict is a dictionary with:
1420 name: name
1421 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1422 location: path or URI
1423 public: "yes" or "no"
1424 metadata: metadata of the image
1425 Returns the image_id
1426 """
1427 retry = 0
1428 max_retries = 3
1429
1430 while retry < max_retries:
1431 retry += 1
1432 try:
1433 self._reload_connection()
1434
1435 # determine format http://docs.openstack.org/developer/glance/formats.html
1436 if "disk_format" in image_dict:
1437 disk_format = image_dict["disk_format"]
1438 else: # autodiscover based on extension
1439 if image_dict["location"].endswith(".qcow2"):
1440 disk_format = "qcow2"
1441 elif image_dict["location"].endswith(".vhd"):
1442 disk_format = "vhd"
1443 elif image_dict["location"].endswith(".vmdk"):
1444 disk_format = "vmdk"
1445 elif image_dict["location"].endswith(".vdi"):
1446 disk_format = "vdi"
1447 elif image_dict["location"].endswith(".iso"):
1448 disk_format = "iso"
1449 elif image_dict["location"].endswith(".aki"):
1450 disk_format = "aki"
1451 elif image_dict["location"].endswith(".ari"):
1452 disk_format = "ari"
1453 elif image_dict["location"].endswith(".ami"):
1454 disk_format = "ami"
1455 else:
1456 disk_format = "raw"
1457
1458 self.logger.debug(
1459 "new_image: '%s' loading from '%s'",
1460 image_dict["name"],
1461 image_dict["location"],
1462 )
1463 if self.vim_type == "VIO":
1464 container_format = "bare"
1465 if "container_format" in image_dict:
1466 container_format = image_dict["container_format"]
1467
1468 new_image = self.glance.images.create(
1469 name=image_dict["name"],
1470 container_format=container_format,
1471 disk_format=disk_format,
1472 )
1473 else:
1474 new_image = self.glance.images.create(name=image_dict["name"])
1475
1476 if image_dict["location"].startswith("http"):
1477 # TODO there is not a method to direct download. It must be downloaded locally with requests
1478 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1479 else: # local path
1480 with open(image_dict["location"]) as fimage:
1481 self.glance.images.upload(new_image.id, fimage)
1482 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1483 # image_dict.get("public","yes")=="yes",
1484 # container_format="bare", data=fimage, disk_format=disk_format)
1485
1486 metadata_to_load = image_dict.get("metadata")
1487
1488 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1489 # for openstack
1490 if self.vim_type == "VIO":
1491 metadata_to_load["upload_location"] = image_dict["location"]
1492 else:
1493 metadata_to_load["location"] = image_dict["location"]
1494
1495 self.glance.images.update(new_image.id, **metadata_to_load)
1496
1497 return new_image.id
1498 except (
1499 nvExceptions.Conflict,
1500 ksExceptions.ClientException,
1501 nvExceptions.ClientException,
1502 ) as e:
1503 self._format_exception(e)
1504 except (
1505 HTTPException,
1506 gl1Exceptions.HTTPException,
1507 gl1Exceptions.CommunicationError,
1508 ConnectionError,
1509 ) as e:
1510 if retry == max_retries:
1511 continue
1512
1513 self._format_exception(e)
1514 except IOError as e: # can not open the file
1515 raise vimconn.VimConnConnectionException(
1516 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1517 http_code=vimconn.HTTP_Bad_Request,
1518 )
1519
1520 def delete_image(self, image_id):
1521 """Deletes a tenant image from openstack VIM. Returns the old id"""
1522 try:
1523 self._reload_connection()
1524 self.glance.images.delete(image_id)
1525
1526 return image_id
1527 except (
1528 nvExceptions.NotFound,
1529 ksExceptions.ClientException,
1530 nvExceptions.ClientException,
1531 gl1Exceptions.CommunicationError,
1532 gl1Exceptions.HTTPNotFound,
1533 ConnectionError,
1534 ) as e: # TODO remove
1535 self._format_exception(e)
1536
1537 def get_image_id_from_path(self, path):
1538 """Get the image id from image path in the VIM database. Returns the image_id"""
1539 try:
1540 self._reload_connection()
1541 images = self.glance.images.list()
1542
1543 for image in images:
1544 if image.metadata.get("location") == path:
1545 return image.id
1546
1547 raise vimconn.VimConnNotFoundException(
1548 "image with location '{}' not found".format(path)
1549 )
1550 except (
1551 ksExceptions.ClientException,
1552 nvExceptions.ClientException,
1553 gl1Exceptions.CommunicationError,
1554 ConnectionError,
1555 ) as e:
1556 self._format_exception(e)
1557
1558 def get_image_list(self, filter_dict={}):
1559 """Obtain tenant images from VIM
1560 Filter_dict can be:
1561 id: image id
1562 name: image name
1563 checksum: image checksum
1564 Returns the image list of dictionaries:
1565 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1566 List can be empty
1567 """
1568 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1569
1570 try:
1571 self._reload_connection()
1572 # filter_dict_os = filter_dict.copy()
1573 # First we filter by the available filter fields: name, id. The others are removed.
1574 image_list = self.glance.images.list()
1575 filtered_list = []
1576
1577 for image in image_list:
1578 try:
1579 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1580 continue
1581
1582 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1583 continue
1584
1585 if (
1586 filter_dict.get("checksum")
1587 and image["checksum"] != filter_dict["checksum"]
1588 ):
1589 continue
1590
1591 filtered_list.append(image.copy())
1592 except gl1Exceptions.HTTPNotFound:
1593 pass
1594
1595 return filtered_list
1596 except (
1597 ksExceptions.ClientException,
1598 nvExceptions.ClientException,
1599 gl1Exceptions.CommunicationError,
1600 ConnectionError,
1601 ) as e:
1602 self._format_exception(e)
1603
1604 def __wait_for_vm(self, vm_id, status):
1605 """wait until vm is in the desired status and return True.
1606 If the VM gets in ERROR status, return false.
1607 If the timeout is reached generate an exception"""
1608 elapsed_time = 0
1609 while elapsed_time < server_timeout:
1610 vm_status = self.nova.servers.get(vm_id).status
1611
1612 if vm_status == status:
1613 return True
1614
1615 if vm_status == "ERROR":
1616 return False
1617
1618 time.sleep(5)
1619 elapsed_time += 5
1620
1621 # if we exceeded the timeout rollback
1622 if elapsed_time >= server_timeout:
1623 raise vimconn.VimConnException(
1624 "Timeout waiting for instance " + vm_id + " to get " + status,
1625 http_code=vimconn.HTTP_Request_Timeout,
1626 )
1627
1628 def _get_openstack_availablity_zones(self):
1629 """
1630 Get from openstack availability zones available
1631 :return:
1632 """
1633 try:
1634 openstack_availability_zone = self.nova.availability_zones.list()
1635 openstack_availability_zone = [
1636 str(zone.zoneName)
1637 for zone in openstack_availability_zone
1638 if zone.zoneName != "internal"
1639 ]
1640
1641 return openstack_availability_zone
1642 except Exception:
1643 return None
1644
1645 def _set_availablity_zones(self):
1646 """
1647 Set vim availablity zone
1648 :return:
1649 """
1650 if "availability_zone" in self.config:
1651 vim_availability_zones = self.config.get("availability_zone")
1652
1653 if isinstance(vim_availability_zones, str):
1654 self.availability_zone = [vim_availability_zones]
1655 elif isinstance(vim_availability_zones, list):
1656 self.availability_zone = vim_availability_zones
1657 else:
1658 self.availability_zone = self._get_openstack_availablity_zones()
1659
1660 def _get_vm_availability_zone(
1661 self, availability_zone_index, availability_zone_list
1662 ):
1663 """
1664 Return thge availability zone to be used by the created VM.
1665 :return: The VIM availability zone to be used or None
1666 """
1667 if availability_zone_index is None:
1668 if not self.config.get("availability_zone"):
1669 return None
1670 elif isinstance(self.config.get("availability_zone"), str):
1671 return self.config["availability_zone"]
1672 else:
1673 # TODO consider using a different parameter at config for default AV and AV list match
1674 return self.config["availability_zone"][0]
1675
1676 vim_availability_zones = self.availability_zone
1677 # check if VIM offer enough availability zones describe in the VNFD
1678 if vim_availability_zones and len(availability_zone_list) <= len(
1679 vim_availability_zones
1680 ):
1681 # check if all the names of NFV AV match VIM AV names
1682 match_by_index = False
1683 for av in availability_zone_list:
1684 if av not in vim_availability_zones:
1685 match_by_index = True
1686 break
1687
1688 if match_by_index:
1689 return vim_availability_zones[availability_zone_index]
1690 else:
1691 return availability_zone_list[availability_zone_index]
1692 else:
1693 raise vimconn.VimConnConflictException(
1694 "No enough availability zones at VIM for this deployment"
1695 )
1696
1697 def new_vminstance(
1698 self,
1699 name,
1700 description,
1701 start,
1702 image_id,
1703 flavor_id,
1704 affinity_group_list,
1705 net_list,
1706 cloud_config=None,
1707 disk_list=None,
1708 availability_zone_index=None,
1709 availability_zone_list=None,
1710 ):
1711 """Adds a VM instance to VIM
1712 Params:
1713 start: indicates if VM must start or boot in pause mode. Ignored
1714 image_id,flavor_id: image and flavor uuid
1715 affinity_group_list: list of affinity groups, each one is a dictionary.
1716 Ignore if empty.
1717 net_list: list of interfaces, each one is a dictionary with:
1718 name:
1719 net_id: network uuid to connect
1720 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
1721 model: interface model, ignored #TODO
1722 mac_address: used for SR-IOV ifaces #TODO for other types
1723 use: 'data', 'bridge', 'mgmt'
1724 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
1725 vim_id: filled/added by this function
1726 floating_ip: True/False (or it can be None)
1727 port_security: True/False
1728 'cloud_config': (optional) dictionary with:
1729 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1730 'users': (optional) list of users to be inserted, each item is a dict with:
1731 'name': (mandatory) user name,
1732 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1733 'user-data': (optional) string is a text script to be passed directly to cloud-init
1734 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1735 'dest': (mandatory) string with the destination absolute path
1736 'encoding': (optional, by default text). Can be one of:
1737 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1738 'content' (mandatory): string with the content of the file
1739 'permissions': (optional) string with file permissions, typically octal notation '0644'
1740 'owner': (optional) file owner, string with the format 'owner:group'
1741 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1742 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1743 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1744 'size': (mandatory) string with the size of the disk in GB
1745 'vim_id' (optional) should use this existing volume id
1746 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1747 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1748 availability_zone_index is None
1749 #TODO ip, security groups
1750 Returns a tuple with the instance identifier and created_items or raises an exception on error
1751 created_items can be None or a dictionary where this method can include key-values that will be passed to
1752 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1753 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1754 as not present.
1755 """
1756 self.logger.debug(
1757 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
1758 image_id,
1759 flavor_id,
1760 str(net_list),
1761 )
1762
1763 try:
1764 server = None
1765 created_items = {}
1766 # metadata = {}
1767 net_list_vim = []
1768 external_network = []
1769 # ^list of external networks to be connected to instance, later on used to create floating_ip
1770 no_secured_ports = [] # List of port-is with port-security disabled
1771 self._reload_connection()
1772 # metadata_vpci = {} # For a specific neutron plugin
1773 block_device_mapping = None
1774
1775 for net in net_list:
1776 if not net.get("net_id"): # skip non connected iface
1777 continue
1778
1779 port_dict = {
1780 "network_id": net["net_id"],
1781 "name": net.get("name"),
1782 "admin_state_up": True,
1783 }
1784
1785 if (
1786 self.config.get("security_groups")
1787 and net.get("port_security") is not False
1788 and not self.config.get("no_port_security_extension")
1789 ):
1790 if not self.security_groups_id:
1791 self._get_ids_from_name()
1792
1793 port_dict["security_groups"] = self.security_groups_id
1794
1795 if net["type"] == "virtual":
1796 pass
1797 # if "vpci" in net:
1798 # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
1799 elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF
1800 # if "vpci" in net:
1801 # if "VF" not in metadata_vpci:
1802 # metadata_vpci["VF"]=[]
1803 # metadata_vpci["VF"].append([ net["vpci"], "" ])
1804 port_dict["binding:vnic_type"] = "direct"
1805
1806 # VIO specific Changes
1807 if self.vim_type == "VIO":
1808 # Need to create port with port_security_enabled = False and no-security-groups
1809 port_dict["port_security_enabled"] = False
1810 port_dict["provider_security_groups"] = []
1811 port_dict["security_groups"] = []
1812 else: # For PT PCI-PASSTHROUGH
1813 # if "vpci" in net:
1814 # if "PF" not in metadata_vpci:
1815 # metadata_vpci["PF"]=[]
1816 # metadata_vpci["PF"].append([ net["vpci"], "" ])
1817 port_dict["binding:vnic_type"] = "direct-physical"
1818
1819 if not port_dict["name"]:
1820 port_dict["name"] = name
1821
1822 if net.get("mac_address"):
1823 port_dict["mac_address"] = net["mac_address"]
1824
1825 if net.get("ip_address"):
1826 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1827 # TODO add "subnet_id": <subnet_id>
1828
1829 new_port = self.neutron.create_port({"port": port_dict})
1830 created_items["port:" + str(new_port["port"]["id"])] = True
1831 net["mac_adress"] = new_port["port"]["mac_address"]
1832 net["vim_id"] = new_port["port"]["id"]
1833 # if try to use a network without subnetwork, it will return a emtpy list
1834 fixed_ips = new_port["port"].get("fixed_ips")
1835
1836 if fixed_ips:
1837 net["ip"] = fixed_ips[0].get("ip_address")
1838 else:
1839 net["ip"] = None
1840
1841 port = {"port-id": new_port["port"]["id"]}
1842 if float(self.nova.api_version.get_string()) >= 2.32:
1843 port["tag"] = new_port["port"]["name"]
1844
1845 net_list_vim.append(port)
1846
1847 if net.get("floating_ip", False):
1848 net["exit_on_floating_ip_error"] = True
1849 external_network.append(net)
1850 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
1851 net["exit_on_floating_ip_error"] = False
1852 external_network.append(net)
1853 net["floating_ip"] = self.config.get("use_floating_ip")
1854
1855 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1856 # is dropped.
1857 # As a workaround we wait until the VM is active and then disable the port-security
1858 if net.get("port_security") is False and not self.config.get(
1859 "no_port_security_extension"
1860 ):
1861 no_secured_ports.append(
1862 (
1863 new_port["port"]["id"],
1864 net.get("port_security_disable_strategy"),
1865 )
1866 )
1867
1868 # if metadata_vpci:
1869 # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
1870 # if len(metadata["pci_assignement"]) >255:
1871 # #limit the metadata size
1872 # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
1873 # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
1874 # metadata = {}
1875
1876 self.logger.debug(
1877 "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
1878 name,
1879 image_id,
1880 flavor_id,
1881 str(net_list_vim),
1882 description,
1883 )
1884
1885 # cloud config
1886 config_drive, userdata = self._create_user_data(cloud_config)
1887
1888 # get availability Zone
1889 vm_av_zone = self._get_vm_availability_zone(
1890 availability_zone_index, availability_zone_list
1891 )
1892
1893 # Create additional volumes in case these are present in disk_list
1894 existing_vim_volumes = []
1895 base_disk_index = ord("b")
1896 boot_volume_id = None
1897 if disk_list:
1898 block_device_mapping = {}
1899 for disk in disk_list:
1900 if "image_id" in disk:
1901 # persistent root volume
1902 base_disk_index = ord("a")
1903 image_id = ""
1904 if disk.get("vim_volume_id"):
1905
1906 # use existing persistent root volume
1907 block_device_mapping["vd" + chr(base_disk_index)] = disk[
1908 "vim_volume_id"
1909 ]
1910 existing_vim_volumes.append({"id": disk["vim_volume_id"]})
1911
1912 else:
1913 # create persistent root volume
1914 volume = self.cinder.volumes.create(
1915 size=disk["size"],
1916 name=name + "vd" + chr(base_disk_index),
1917 imageRef=disk["image_id"],
1918 # Make sure volume is in the same AZ as the VM to be attached to
1919 availability_zone=vm_av_zone,
1920 )
1921 boot_volume_id = volume.id
1922 created_items["volume:" + str(volume.id)] = True
1923 block_device_mapping[
1924 "vd" + chr(base_disk_index)
1925 ] = volume.id
1926 else:
1927 # non-root persistent volume
1928 if disk.get("vim_volume_id"):
1929
1930 # use existing persistent volume
1931 block_device_mapping["vd" + chr(base_disk_index)] = disk[
1932 "vim_volume_id"
1933 ]
1934 existing_vim_volumes.append({"id": disk["vim_volume_id"]})
1935
1936 else:
1937
1938 # create persistent volume
1939 volume = self.cinder.volumes.create(
1940 size=disk["size"],
1941 name=name + "vd" + chr(base_disk_index),
1942 # Make sure volume is in the same AZ as the VM to be attached to
1943 availability_zone=vm_av_zone,
1944 )
1945 created_items["volume:" + str(volume.id)] = True
1946 block_device_mapping[
1947 "vd" + chr(base_disk_index)
1948 ] = volume.id
1949
1950 base_disk_index += 1
1951
1952 # Wait until created volumes are with status available
1953 elapsed_time = 0
1954 while elapsed_time < volume_timeout:
1955 for created_item in created_items:
1956 v, _, volume_id = created_item.partition(":")
1957 if v == "volume":
1958 if self.cinder.volumes.get(volume_id).status != "available":
1959 break
1960 else: # all ready: break from while
1961 break
1962
1963 time.sleep(5)
1964 elapsed_time += 5
1965
1966 # Wait until existing volumes in vim are with status available
1967 while elapsed_time < volume_timeout:
1968 for volume in existing_vim_volumes:
1969 if self.cinder.volumes.get(volume["id"]).status != "available":
1970 break
1971 else: # all ready: break from while
1972 break
1973
1974 time.sleep(5)
1975 elapsed_time += 5
1976
1977 # If we exceeded the timeout rollback
1978 if elapsed_time >= volume_timeout:
1979 raise vimconn.VimConnException(
1980 "Timeout creating volumes for instance " + name,
1981 http_code=vimconn.HTTP_Request_Timeout,
1982 )
1983 if boot_volume_id:
1984 self.cinder.volumes.set_bootable(boot_volume_id, True)
1985
1986 # Manage affinity groups/server groups
1987 server_group_id = None
1988 scheduller_hints = {}
1989
1990 if affinity_group_list:
1991 # Only first id on the list will be used. Openstack restriction
1992 server_group_id = affinity_group_list[0]["affinity_group_id"]
1993 scheduller_hints["group"] = server_group_id
1994
1995 self.logger.debug(
1996 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
1997 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
1998 "block_device_mapping={}, server_group={})".format(
1999 name,
2000 image_id,
2001 flavor_id,
2002 net_list_vim,
2003 self.config.get("security_groups"),
2004 vm_av_zone,
2005 self.config.get("keypair"),
2006 userdata,
2007 config_drive,
2008 block_device_mapping,
2009 server_group_id,
2010 )
2011 )
2012 server = self.nova.servers.create(
2013 name=name,
2014 image=image_id,
2015 flavor=flavor_id,
2016 nics=net_list_vim,
2017 security_groups=self.config.get("security_groups"),
2018 # TODO remove security_groups in future versions. Already at neutron port
2019 availability_zone=vm_av_zone,
2020 key_name=self.config.get("keypair"),
2021 userdata=userdata,
2022 config_drive=config_drive,
2023 block_device_mapping=block_device_mapping,
2024 scheduler_hints=scheduller_hints,
2025 ) # , description=description)
2026
2027 vm_start_time = time.time()
2028 # Previously mentioned workaround to wait until the VM is active and then disable the port-security
2029 if no_secured_ports:
2030 self.__wait_for_vm(server.id, "ACTIVE")
2031
2032 for port in no_secured_ports:
2033 port_update = {
2034 "port": {"port_security_enabled": False, "security_groups": None}
2035 }
2036
2037 if port[1] == "allow-address-pairs":
2038 port_update = {
2039 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2040 }
2041
2042 try:
2043 self.neutron.update_port(port[0], port_update)
2044 except Exception:
2045 raise vimconn.VimConnException(
2046 "It was not possible to disable port security for port {}".format(
2047 port[0]
2048 )
2049 )
2050
2051 # print "DONE :-)", server
2052
2053 # pool_id = None
2054 for floating_network in external_network:
2055 try:
2056 assigned = False
2057 floating_ip_retries = 3
2058 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2059 # several times
2060 while not assigned:
2061 floating_ips = self.neutron.list_floatingips().get(
2062 "floatingips", ()
2063 )
2064 random.shuffle(floating_ips) # randomize
2065 for fip in floating_ips:
2066 if (
2067 fip.get("port_id")
2068 or fip.get("tenant_id") != server.tenant_id
2069 ):
2070 continue
2071
2072 if isinstance(floating_network["floating_ip"], str):
2073 if (
2074 fip.get("floating_network_id")
2075 != floating_network["floating_ip"]
2076 ):
2077 continue
2078
2079 free_floating_ip = fip["id"]
2080 break
2081 else:
2082 if (
2083 isinstance(floating_network["floating_ip"], str)
2084 and floating_network["floating_ip"].lower() != "true"
2085 ):
2086 pool_id = floating_network["floating_ip"]
2087 else:
2088 # Find the external network
2089 external_nets = list()
2090
2091 for net in self.neutron.list_networks()["networks"]:
2092 if net["router:external"]:
2093 external_nets.append(net)
2094
2095 if len(external_nets) == 0:
2096 raise vimconn.VimConnException(
2097 "Cannot create floating_ip automatically since "
2098 "no external network is present",
2099 http_code=vimconn.HTTP_Conflict,
2100 )
2101
2102 if len(external_nets) > 1:
2103 raise vimconn.VimConnException(
2104 "Cannot create floating_ip automatically since "
2105 "multiple external networks are present",
2106 http_code=vimconn.HTTP_Conflict,
2107 )
2108
2109 pool_id = external_nets[0].get("id")
2110
2111 param = {
2112 "floatingip": {
2113 "floating_network_id": pool_id,
2114 "tenant_id": server.tenant_id,
2115 }
2116 }
2117
2118 try:
2119 # self.logger.debug("Creating floating IP")
2120 new_floating_ip = self.neutron.create_floatingip(param)
2121 free_floating_ip = new_floating_ip["floatingip"]["id"]
2122 created_items[
2123 "floating_ip:" + str(free_floating_ip)
2124 ] = True
2125 except Exception as e:
2126 raise vimconn.VimConnException(
2127 type(e).__name__
2128 + ": Cannot create new floating_ip "
2129 + str(e),
2130 http_code=vimconn.HTTP_Conflict,
2131 )
2132
2133 try:
2134 # for race condition ensure not already assigned
2135 fip = self.neutron.show_floatingip(free_floating_ip)
2136
2137 if fip["floatingip"]["port_id"]:
2138 continue
2139
2140 # the vim_id key contains the neutron.port_id
2141 self.neutron.update_floatingip(
2142 free_floating_ip,
2143 {"floatingip": {"port_id": floating_network["vim_id"]}},
2144 )
2145 # for race condition ensure not re-assigned to other VM after 5 seconds
2146 time.sleep(5)
2147 fip = self.neutron.show_floatingip(free_floating_ip)
2148
2149 if (
2150 fip["floatingip"]["port_id"]
2151 != floating_network["vim_id"]
2152 ):
2153 self.logger.error(
2154 "floating_ip {} re-assigned to other port".format(
2155 free_floating_ip
2156 )
2157 )
2158 continue
2159
2160 self.logger.debug(
2161 "Assigned floating_ip {} to VM {}".format(
2162 free_floating_ip, server.id
2163 )
2164 )
2165 assigned = True
2166 except Exception as e:
2167 # openstack need some time after VM creation to assign an IP. So retry if fails
2168 vm_status = self.nova.servers.get(server.id).status
2169
2170 if vm_status not in ("ACTIVE", "ERROR"):
2171 if time.time() - vm_start_time < server_timeout:
2172 time.sleep(5)
2173 continue
2174 elif floating_ip_retries > 0:
2175 floating_ip_retries -= 1
2176 continue
2177
2178 raise vimconn.VimConnException(
2179 "Cannot create floating_ip: {} {}".format(
2180 type(e).__name__, e
2181 ),
2182 http_code=vimconn.HTTP_Conflict,
2183 )
2184
2185 except Exception as e:
2186 if not floating_network["exit_on_floating_ip_error"]:
2187 self.logger.error("Cannot create floating_ip. %s", str(e))
2188 continue
2189
2190 raise
2191
2192 return server.id, created_items
2193 # except nvExceptions.NotFound as e:
2194 # error_value=-vimconn.HTTP_Not_Found
2195 # error_text= "vm instance %s not found" % vm_id
2196 # except TypeError as e:
2197 # raise vimconn.VimConnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
2198
2199 except Exception as e:
2200 server_id = None
2201 if server:
2202 server_id = server.id
2203
2204 try:
2205 self.delete_vminstance(server_id, created_items)
2206 except Exception as e2:
2207 self.logger.error("new_vminstance rollback fail {}".format(e2))
2208
2209 self._format_exception(e)
2210
2211 def get_vminstance(self, vm_id):
2212 """Returns the VM instance information from VIM"""
2213 # self.logger.debug("Getting VM from VIM")
2214 try:
2215 self._reload_connection()
2216 server = self.nova.servers.find(id=vm_id)
2217 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2218
2219 return server.to_dict()
2220 except (
2221 ksExceptions.ClientException,
2222 nvExceptions.ClientException,
2223 nvExceptions.NotFound,
2224 ConnectionError,
2225 ) as e:
2226 self._format_exception(e)
2227
2228 def get_vminstance_console(self, vm_id, console_type="vnc"):
2229 """
2230 Get a console for the virtual machine
2231 Params:
2232 vm_id: uuid of the VM
2233 console_type, can be:
2234 "novnc" (by default), "xvpvnc" for VNC types,
2235 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2236 Returns dict with the console parameters:
2237 protocol: ssh, ftp, http, https, ...
2238 server: usually ip address
2239 port: the http, ssh, ... port
2240 suffix: extra text, e.g. the http path and query string
2241 """
2242 self.logger.debug("Getting VM CONSOLE from VIM")
2243
2244 try:
2245 self._reload_connection()
2246 server = self.nova.servers.find(id=vm_id)
2247
2248 if console_type is None or console_type == "novnc":
2249 console_dict = server.get_vnc_console("novnc")
2250 elif console_type == "xvpvnc":
2251 console_dict = server.get_vnc_console(console_type)
2252 elif console_type == "rdp-html5":
2253 console_dict = server.get_rdp_console(console_type)
2254 elif console_type == "spice-html5":
2255 console_dict = server.get_spice_console(console_type)
2256 else:
2257 raise vimconn.VimConnException(
2258 "console type '{}' not allowed".format(console_type),
2259 http_code=vimconn.HTTP_Bad_Request,
2260 )
2261
2262 console_dict1 = console_dict.get("console")
2263
2264 if console_dict1:
2265 console_url = console_dict1.get("url")
2266
2267 if console_url:
2268 # parse console_url
2269 protocol_index = console_url.find("//")
2270 suffix_index = (
2271 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2272 )
2273 port_index = (
2274 console_url[protocol_index + 2 : suffix_index].find(":")
2275 + protocol_index
2276 + 2
2277 )
2278
2279 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2280 return (
2281 -vimconn.HTTP_Internal_Server_Error,
2282 "Unexpected response from VIM",
2283 )
2284
2285 console_dict = {
2286 "protocol": console_url[0:protocol_index],
2287 "server": console_url[protocol_index + 2 : port_index],
2288 "port": console_url[port_index:suffix_index],
2289 "suffix": console_url[suffix_index + 1 :],
2290 }
2291 protocol_index += 2
2292
2293 return console_dict
2294 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2295 except (
2296 nvExceptions.NotFound,
2297 ksExceptions.ClientException,
2298 nvExceptions.ClientException,
2299 nvExceptions.BadRequest,
2300 ConnectionError,
2301 ) as e:
2302 self._format_exception(e)
2303
2304 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
2305 """Removes a VM instance from VIM. Returns the old identifier"""
2306 # print "osconnector: Getting VM from VIM"
2307 if created_items is None:
2308 created_items = {}
2309
2310 try:
2311 self._reload_connection()
2312 # delete VM ports attached to this networks before the virtual machine
2313 for k, v in created_items.items():
2314 if not v: # skip already deleted
2315 continue
2316
2317 try:
2318 k_item, _, k_id = k.partition(":")
2319 if k_item == "port":
2320 port_dict = self.neutron.list_ports()
2321 existing_ports = [
2322 port["id"] for port in port_dict["ports"] if port_dict
2323 ]
2324 if k_id in existing_ports:
2325 self.neutron.delete_port(k_id)
2326 except Exception as e:
2327 self.logger.error(
2328 "Error deleting port: {}: {}".format(type(e).__name__, e)
2329 )
2330
2331 # #commented because detaching the volumes makes the servers.delete not work properly ?!?
2332 # #dettach volumes attached
2333 # server = self.nova.servers.get(vm_id)
2334 # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
2335 # #for volume in volumes_attached_dict:
2336 # # self.cinder.volumes.detach(volume["id"])
2337
2338 if vm_id:
2339 self.nova.servers.delete(vm_id)
2340
2341 # delete volumes. Although having detached, they should have in active status before deleting
2342 # we ensure in this loop
2343 keep_waiting = True
2344 elapsed_time = 0
2345
2346 while keep_waiting and elapsed_time < volume_timeout:
2347 keep_waiting = False
2348
2349 for k, v in created_items.items():
2350 if not v: # skip already deleted
2351 continue
2352
2353 try:
2354 k_item, _, k_id = k.partition(":")
2355 if k_item == "volume":
2356 if self.cinder.volumes.get(k_id).status != "available":
2357 keep_waiting = True
2358 else:
2359 if k_id not in volumes_to_hold:
2360 self.cinder.volumes.delete(k_id)
2361 created_items[k] = None
2362 elif k_item == "floating_ip": # floating ip
2363 self.neutron.delete_floatingip(k_id)
2364 created_items[k] = None
2365
2366 except Exception as e:
2367 self.logger.error("Error deleting {}: {}".format(k, e))
2368
2369 if keep_waiting:
2370 time.sleep(1)
2371 elapsed_time += 1
2372
2373 return None
2374 except (
2375 nvExceptions.NotFound,
2376 ksExceptions.ClientException,
2377 nvExceptions.ClientException,
2378 ConnectionError,
2379 ) as e:
2380 self._format_exception(e)
2381
2382 def refresh_vms_status(self, vm_list):
2383 """Get the status of the virtual machines and their interfaces/ports
2384 Params: the list of VM identifiers
2385 Returns a dictionary with:
2386 vm_id: #VIM id of this Virtual Machine
2387 status: #Mandatory. Text with one of:
2388 # DELETED (not found at vim)
2389 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2390 # OTHER (Vim reported other status not understood)
2391 # ERROR (VIM indicates an ERROR status)
2392 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2393 # CREATING (on building process), ERROR
2394 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2395 #
2396 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2397 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2398 interfaces:
2399 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2400 mac_address: #Text format XX:XX:XX:XX:XX:XX
2401 vim_net_id: #network id where this interface is connected
2402 vim_interface_id: #interface/port VIM id
2403 ip_address: #null, or text with IPv4, IPv6 address
2404 compute_node: #identification of compute node where PF,VF interface is allocated
2405 pci: #PCI address of the NIC that hosts the PF,VF
2406 vlan: #physical VLAN used for VF
2407 """
2408 vm_dict = {}
2409 self.logger.debug(
2410 "refresh_vms status: Getting tenant VM instance information from VIM"
2411 )
2412
2413 for vm_id in vm_list:
2414 vm = {}
2415
2416 try:
2417 vm_vim = self.get_vminstance(vm_id)
2418
2419 if vm_vim["status"] in vmStatus2manoFormat:
2420 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
2421 else:
2422 vm["status"] = "OTHER"
2423 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
2424
2425 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
2426 vm_vim.pop("user_data", None)
2427 vm["vim_info"] = self.serialize(vm_vim)
2428
2429 vm["interfaces"] = []
2430 if vm_vim.get("fault"):
2431 vm["error_msg"] = str(vm_vim["fault"])
2432
2433 # get interfaces
2434 try:
2435 self._reload_connection()
2436 port_dict = self.neutron.list_ports(device_id=vm_id)
2437
2438 for port in port_dict["ports"]:
2439 interface = {}
2440 interface["vim_info"] = self.serialize(port)
2441 interface["mac_address"] = port.get("mac_address")
2442 interface["vim_net_id"] = port["network_id"]
2443 interface["vim_interface_id"] = port["id"]
2444 # check if OS-EXT-SRV-ATTR:host is there,
2445 # in case of non-admin credentials, it will be missing
2446
2447 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
2448 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
2449
2450 interface["pci"] = None
2451
2452 # check if binding:profile is there,
2453 # in case of non-admin credentials, it will be missing
2454 if port.get("binding:profile"):
2455 if port["binding:profile"].get("pci_slot"):
2456 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
2457 # the slot to 0x00
2458 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
2459 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
2460 pci = port["binding:profile"]["pci_slot"]
2461 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
2462 interface["pci"] = pci
2463
2464 interface["vlan"] = None
2465
2466 if port.get("binding:vif_details"):
2467 interface["vlan"] = port["binding:vif_details"].get("vlan")
2468
2469 # Get vlan from network in case not present in port for those old openstacks and cases where
2470 # it is needed vlan at PT
2471 if not interface["vlan"]:
2472 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
2473 network = self.neutron.show_network(port["network_id"])
2474
2475 if (
2476 network["network"].get("provider:network_type")
2477 == "vlan"
2478 ):
2479 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
2480 interface["vlan"] = network["network"].get(
2481 "provider:segmentation_id"
2482 )
2483
2484 ips = []
2485 # look for floating ip address
2486 try:
2487 floating_ip_dict = self.neutron.list_floatingips(
2488 port_id=port["id"]
2489 )
2490
2491 if floating_ip_dict.get("floatingips"):
2492 ips.append(
2493 floating_ip_dict["floatingips"][0].get(
2494 "floating_ip_address"
2495 )
2496 )
2497 except Exception:
2498 pass
2499
2500 for subnet in port["fixed_ips"]:
2501 ips.append(subnet["ip_address"])
2502
2503 interface["ip_address"] = ";".join(ips)
2504 vm["interfaces"].append(interface)
2505 except Exception as e:
2506 self.logger.error(
2507 "Error getting vm interface information {}: {}".format(
2508 type(e).__name__, e
2509 ),
2510 exc_info=True,
2511 )
2512 except vimconn.VimConnNotFoundException as e:
2513 self.logger.error("Exception getting vm status: %s", str(e))
2514 vm["status"] = "DELETED"
2515 vm["error_msg"] = str(e)
2516 except vimconn.VimConnException as e:
2517 self.logger.error("Exception getting vm status: %s", str(e))
2518 vm["status"] = "VIM_ERROR"
2519 vm["error_msg"] = str(e)
2520
2521 vm_dict[vm_id] = vm
2522
2523 return vm_dict
2524
2525 def action_vminstance(self, vm_id, action_dict, created_items={}):
2526 """Send and action over a VM instance from VIM
2527 Returns None or the console dict if the action was successfully sent to the VIM"""
2528 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
2529
2530 try:
2531 self._reload_connection()
2532 server = self.nova.servers.find(id=vm_id)
2533
2534 if "start" in action_dict:
2535 if action_dict["start"] == "rebuild":
2536 server.rebuild()
2537 else:
2538 if server.status == "PAUSED":
2539 server.unpause()
2540 elif server.status == "SUSPENDED":
2541 server.resume()
2542 elif server.status == "SHUTOFF":
2543 server.start()
2544 else:
2545 self.logger.debug(
2546 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
2547 )
2548 raise vimconn.VimConnException(
2549 "Cannot 'start' instance while it is in active state",
2550 http_code=vimconn.HTTP_Bad_Request,
2551 )
2552
2553 elif "pause" in action_dict:
2554 server.pause()
2555 elif "resume" in action_dict:
2556 server.resume()
2557 elif "shutoff" in action_dict or "shutdown" in action_dict:
2558 self.logger.debug("server status %s", server.status)
2559 if server.status == "ACTIVE":
2560 server.stop()
2561 else:
2562 self.logger.debug("ERROR: VM is not in Active state")
2563 raise vimconn.VimConnException(
2564 "VM is not in active state, stop operation is not allowed",
2565 http_code=vimconn.HTTP_Bad_Request,
2566 )
2567 elif "forceOff" in action_dict:
2568 server.stop() # TODO
2569 elif "terminate" in action_dict:
2570 server.delete()
2571 elif "createImage" in action_dict:
2572 server.create_image()
2573 # "path":path_schema,
2574 # "description":description_schema,
2575 # "name":name_schema,
2576 # "metadata":metadata_schema,
2577 # "imageRef": id_schema,
2578 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
2579 elif "rebuild" in action_dict:
2580 server.rebuild(server.image["id"])
2581 elif "reboot" in action_dict:
2582 server.reboot() # reboot_type="SOFT"
2583 elif "console" in action_dict:
2584 console_type = action_dict["console"]
2585
2586 if console_type is None or console_type == "novnc":
2587 console_dict = server.get_vnc_console("novnc")
2588 elif console_type == "xvpvnc":
2589 console_dict = server.get_vnc_console(console_type)
2590 elif console_type == "rdp-html5":
2591 console_dict = server.get_rdp_console(console_type)
2592 elif console_type == "spice-html5":
2593 console_dict = server.get_spice_console(console_type)
2594 else:
2595 raise vimconn.VimConnException(
2596 "console type '{}' not allowed".format(console_type),
2597 http_code=vimconn.HTTP_Bad_Request,
2598 )
2599
2600 try:
2601 console_url = console_dict["console"]["url"]
2602 # parse console_url
2603 protocol_index = console_url.find("//")
2604 suffix_index = (
2605 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2606 )
2607 port_index = (
2608 console_url[protocol_index + 2 : suffix_index].find(":")
2609 + protocol_index
2610 + 2
2611 )
2612
2613 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2614 raise vimconn.VimConnException(
2615 "Unexpected response from VIM " + str(console_dict)
2616 )
2617
2618 console_dict2 = {
2619 "protocol": console_url[0:protocol_index],
2620 "server": console_url[protocol_index + 2 : port_index],
2621 "port": int(console_url[port_index + 1 : suffix_index]),
2622 "suffix": console_url[suffix_index + 1 :],
2623 }
2624
2625 return console_dict2
2626 except Exception:
2627 raise vimconn.VimConnException(
2628 "Unexpected response from VIM " + str(console_dict)
2629 )
2630
2631 return None
2632 except (
2633 ksExceptions.ClientException,
2634 nvExceptions.ClientException,
2635 nvExceptions.NotFound,
2636 ConnectionError,
2637 ) as e:
2638 self._format_exception(e)
2639 # TODO insert exception vimconn.HTTP_Unauthorized
2640
2641 # ###### VIO Specific Changes #########
2642 def _generate_vlanID(self):
2643 """
2644 Method to get unused vlanID
2645 Args:
2646 None
2647 Returns:
2648 vlanID
2649 """
2650 # Get used VLAN IDs
2651 usedVlanIDs = []
2652 networks = self.get_network_list()
2653
2654 for net in networks:
2655 if net.get("provider:segmentation_id"):
2656 usedVlanIDs.append(net.get("provider:segmentation_id"))
2657
2658 used_vlanIDs = set(usedVlanIDs)
2659
2660 # find unused VLAN ID
2661 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
2662 try:
2663 start_vlanid, end_vlanid = map(
2664 int, vlanID_range.replace(" ", "").split("-")
2665 )
2666
2667 for vlanID in range(start_vlanid, end_vlanid + 1):
2668 if vlanID not in used_vlanIDs:
2669 return vlanID
2670 except Exception as exp:
2671 raise vimconn.VimConnException(
2672 "Exception {} occurred while generating VLAN ID.".format(exp)
2673 )
2674 else:
2675 raise vimconn.VimConnConflictException(
2676 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
2677 self.config.get("dataplane_net_vlan_range")
2678 )
2679 )
2680
2681 def _generate_multisegment_vlanID(self):
2682 """
2683 Method to get unused vlanID
2684 Args:
2685 None
2686 Returns:
2687 vlanID
2688 """
2689 # Get used VLAN IDs
2690 usedVlanIDs = []
2691 networks = self.get_network_list()
2692 for net in networks:
2693 if net.get("provider:network_type") == "vlan" and net.get(
2694 "provider:segmentation_id"
2695 ):
2696 usedVlanIDs.append(net.get("provider:segmentation_id"))
2697 elif net.get("segments"):
2698 for segment in net.get("segments"):
2699 if segment.get("provider:network_type") == "vlan" and segment.get(
2700 "provider:segmentation_id"
2701 ):
2702 usedVlanIDs.append(segment.get("provider:segmentation_id"))
2703
2704 used_vlanIDs = set(usedVlanIDs)
2705
2706 # find unused VLAN ID
2707 for vlanID_range in self.config.get("multisegment_vlan_range"):
2708 try:
2709 start_vlanid, end_vlanid = map(
2710 int, vlanID_range.replace(" ", "").split("-")
2711 )
2712
2713 for vlanID in range(start_vlanid, end_vlanid + 1):
2714 if vlanID not in used_vlanIDs:
2715 return vlanID
2716 except Exception as exp:
2717 raise vimconn.VimConnException(
2718 "Exception {} occurred while generating VLAN ID.".format(exp)
2719 )
2720 else:
2721 raise vimconn.VimConnConflictException(
2722 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
2723 self.config.get("multisegment_vlan_range")
2724 )
2725 )
2726
2727 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
2728 """
2729 Method to validate user given vlanID ranges
2730 Args: None
2731 Returns: None
2732 """
2733 for vlanID_range in input_vlan_range:
2734 vlan_range = vlanID_range.replace(" ", "")
2735 # validate format
2736 vlanID_pattern = r"(\d)*-(\d)*$"
2737 match_obj = re.match(vlanID_pattern, vlan_range)
2738 if not match_obj:
2739 raise vimconn.VimConnConflictException(
2740 "Invalid VLAN range for {}: {}.You must provide "
2741 "'{}' in format [start_ID - end_ID].".format(
2742 text_vlan_range, vlanID_range, text_vlan_range
2743 )
2744 )
2745
2746 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
2747 if start_vlanid <= 0:
2748 raise vimconn.VimConnConflictException(
2749 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
2750 "networks valid IDs are 1 to 4094 ".format(
2751 text_vlan_range, vlanID_range
2752 )
2753 )
2754
2755 if end_vlanid > 4094:
2756 raise vimconn.VimConnConflictException(
2757 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
2758 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
2759 text_vlan_range, vlanID_range
2760 )
2761 )
2762
2763 if start_vlanid > end_vlanid:
2764 raise vimconn.VimConnConflictException(
2765 "Invalid VLAN range for {}: {}. You must provide '{}'"
2766 " in format start_ID - end_ID and start_ID < end_ID ".format(
2767 text_vlan_range, vlanID_range, text_vlan_range
2768 )
2769 )
2770
2771 # NOT USED FUNCTIONS
2772
2773 def new_external_port(self, port_data):
2774 """Adds a external port to VIM
2775 Returns the port identifier"""
2776 # TODO openstack if needed
2777 return (
2778 -vimconn.HTTP_Internal_Server_Error,
2779 "osconnector.new_external_port() not implemented",
2780 )
2781
2782 def connect_port_network(self, port_id, network_id, admin=False):
2783 """Connects a external port to a network
2784 Returns status code of the VIM response"""
2785 # TODO openstack if needed
2786 return (
2787 -vimconn.HTTP_Internal_Server_Error,
2788 "osconnector.connect_port_network() not implemented",
2789 )
2790
2791 def new_user(self, user_name, user_passwd, tenant_id=None):
2792 """Adds a new user to openstack VIM
2793 Returns the user identifier"""
2794 self.logger.debug("osconnector: Adding a new user to VIM")
2795
2796 try:
2797 self._reload_connection()
2798 user = self.keystone.users.create(
2799 user_name, password=user_passwd, default_project=tenant_id
2800 )
2801 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
2802
2803 return user.id
2804 except ksExceptions.ConnectionError as e:
2805 error_value = -vimconn.HTTP_Bad_Request
2806 error_text = (
2807 type(e).__name__
2808 + ": "
2809 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2810 )
2811 except ksExceptions.ClientException as e: # TODO remove
2812 error_value = -vimconn.HTTP_Bad_Request
2813 error_text = (
2814 type(e).__name__
2815 + ": "
2816 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2817 )
2818
2819 # TODO insert exception vimconn.HTTP_Unauthorized
2820 # if reaching here is because an exception
2821 self.logger.debug("new_user " + error_text)
2822
2823 return error_value, error_text
2824
2825 def delete_user(self, user_id):
2826 """Delete a user from openstack VIM
2827 Returns the user identifier"""
2828 if self.debug:
2829 print("osconnector: Deleting a user from VIM")
2830
2831 try:
2832 self._reload_connection()
2833 self.keystone.users.delete(user_id)
2834
2835 return 1, user_id
2836 except ksExceptions.ConnectionError as e:
2837 error_value = -vimconn.HTTP_Bad_Request
2838 error_text = (
2839 type(e).__name__
2840 + ": "
2841 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2842 )
2843 except ksExceptions.NotFound as e:
2844 error_value = -vimconn.HTTP_Not_Found
2845 error_text = (
2846 type(e).__name__
2847 + ": "
2848 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2849 )
2850 except ksExceptions.ClientException as e: # TODO remove
2851 error_value = -vimconn.HTTP_Bad_Request
2852 error_text = (
2853 type(e).__name__
2854 + ": "
2855 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2856 )
2857
2858 # TODO insert exception vimconn.HTTP_Unauthorized
2859 # if reaching here is because an exception
2860 self.logger.debug("delete_tenant " + error_text)
2861
2862 return error_value, error_text
2863
2864 def get_hosts_info(self):
2865 """Get the information of deployed hosts
2866 Returns the hosts content"""
2867 if self.debug:
2868 print("osconnector: Getting Host info from VIM")
2869
2870 try:
2871 h_list = []
2872 self._reload_connection()
2873 hypervisors = self.nova.hypervisors.list()
2874
2875 for hype in hypervisors:
2876 h_list.append(hype.to_dict())
2877
2878 return 1, {"hosts": h_list}
2879 except nvExceptions.NotFound as e:
2880 error_value = -vimconn.HTTP_Not_Found
2881 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2882 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2883 error_value = -vimconn.HTTP_Bad_Request
2884 error_text = (
2885 type(e).__name__
2886 + ": "
2887 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2888 )
2889
2890 # TODO insert exception vimconn.HTTP_Unauthorized
2891 # if reaching here is because an exception
2892 self.logger.debug("get_hosts_info " + error_text)
2893
2894 return error_value, error_text
2895
2896 def get_hosts(self, vim_tenant):
2897 """Get the hosts and deployed instances
2898 Returns the hosts content"""
2899 r, hype_dict = self.get_hosts_info()
2900
2901 if r < 0:
2902 return r, hype_dict
2903
2904 hypervisors = hype_dict["hosts"]
2905
2906 try:
2907 servers = self.nova.servers.list()
2908 for hype in hypervisors:
2909 for server in servers:
2910 if (
2911 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
2912 == hype["hypervisor_hostname"]
2913 ):
2914 if "vm" in hype:
2915 hype["vm"].append(server.id)
2916 else:
2917 hype["vm"] = [server.id]
2918
2919 return 1, hype_dict
2920 except nvExceptions.NotFound as e:
2921 error_value = -vimconn.HTTP_Not_Found
2922 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2923 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2924 error_value = -vimconn.HTTP_Bad_Request
2925 error_text = (
2926 type(e).__name__
2927 + ": "
2928 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2929 )
2930
2931 # TODO insert exception vimconn.HTTP_Unauthorized
2932 # if reaching here is because an exception
2933 self.logger.debug("get_hosts " + error_text)
2934
2935 return error_value, error_text
2936
2937 def new_classification(self, name, ctype, definition):
2938 self.logger.debug(
2939 "Adding a new (Traffic) Classification to VIM, named %s", name
2940 )
2941
2942 try:
2943 new_class = None
2944 self._reload_connection()
2945
2946 if ctype not in supportedClassificationTypes:
2947 raise vimconn.VimConnNotSupportedException(
2948 "OpenStack VIM connector does not support provided "
2949 "Classification Type {}, supported ones are: {}".format(
2950 ctype, supportedClassificationTypes
2951 )
2952 )
2953
2954 if not self._validate_classification(ctype, definition):
2955 raise vimconn.VimConnException(
2956 "Incorrect Classification definition for the type specified."
2957 )
2958
2959 classification_dict = definition
2960 classification_dict["name"] = name
2961 new_class = self.neutron.create_sfc_flow_classifier(
2962 {"flow_classifier": classification_dict}
2963 )
2964
2965 return new_class["flow_classifier"]["id"]
2966 except (
2967 neExceptions.ConnectionFailed,
2968 ksExceptions.ClientException,
2969 neExceptions.NeutronException,
2970 ConnectionError,
2971 ) as e:
2972 self.logger.error("Creation of Classification failed.")
2973 self._format_exception(e)
2974
2975 def get_classification(self, class_id):
2976 self.logger.debug(" Getting Classification %s from VIM", class_id)
2977 filter_dict = {"id": class_id}
2978 class_list = self.get_classification_list(filter_dict)
2979
2980 if len(class_list) == 0:
2981 raise vimconn.VimConnNotFoundException(
2982 "Classification '{}' not found".format(class_id)
2983 )
2984 elif len(class_list) > 1:
2985 raise vimconn.VimConnConflictException(
2986 "Found more than one Classification with this criteria"
2987 )
2988
2989 classification = class_list[0]
2990
2991 return classification
2992
2993 def get_classification_list(self, filter_dict={}):
2994 self.logger.debug(
2995 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
2996 )
2997
2998 try:
2999 filter_dict_os = filter_dict.copy()
3000 self._reload_connection()
3001
3002 if self.api_version3 and "tenant_id" in filter_dict_os:
3003 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3004
3005 classification_dict = self.neutron.list_sfc_flow_classifiers(
3006 **filter_dict_os
3007 )
3008 classification_list = classification_dict["flow_classifiers"]
3009 self.__classification_os2mano(classification_list)
3010
3011 return classification_list
3012 except (
3013 neExceptions.ConnectionFailed,
3014 ksExceptions.ClientException,
3015 neExceptions.NeutronException,
3016 ConnectionError,
3017 ) as e:
3018 self._format_exception(e)
3019
3020 def delete_classification(self, class_id):
3021 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3022
3023 try:
3024 self._reload_connection()
3025 self.neutron.delete_sfc_flow_classifier(class_id)
3026
3027 return class_id
3028 except (
3029 neExceptions.ConnectionFailed,
3030 neExceptions.NeutronException,
3031 ksExceptions.ClientException,
3032 neExceptions.NeutronException,
3033 ConnectionError,
3034 ) as e:
3035 self._format_exception(e)
3036
3037 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3038 self.logger.debug(
3039 "Adding a new Service Function Instance to VIM, named '%s'", name
3040 )
3041
3042 try:
3043 new_sfi = None
3044 self._reload_connection()
3045 correlation = None
3046
3047 if sfc_encap:
3048 correlation = "nsh"
3049
3050 if len(ingress_ports) != 1:
3051 raise vimconn.VimConnNotSupportedException(
3052 "OpenStack VIM connector can only have 1 ingress port per SFI"
3053 )
3054
3055 if len(egress_ports) != 1:
3056 raise vimconn.VimConnNotSupportedException(
3057 "OpenStack VIM connector can only have 1 egress port per SFI"
3058 )
3059
3060 sfi_dict = {
3061 "name": name,
3062 "ingress": ingress_ports[0],
3063 "egress": egress_ports[0],
3064 "service_function_parameters": {"correlation": correlation},
3065 }
3066 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3067
3068 return new_sfi["port_pair"]["id"]
3069 except (
3070 neExceptions.ConnectionFailed,
3071 ksExceptions.ClientException,
3072 neExceptions.NeutronException,
3073 ConnectionError,
3074 ) as e:
3075 if new_sfi:
3076 try:
3077 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3078 except Exception:
3079 self.logger.error(
3080 "Creation of Service Function Instance failed, with "
3081 "subsequent deletion failure as well."
3082 )
3083
3084 self._format_exception(e)
3085
3086 def get_sfi(self, sfi_id):
3087 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3088 filter_dict = {"id": sfi_id}
3089 sfi_list = self.get_sfi_list(filter_dict)
3090
3091 if len(sfi_list) == 0:
3092 raise vimconn.VimConnNotFoundException(
3093 "Service Function Instance '{}' not found".format(sfi_id)
3094 )
3095 elif len(sfi_list) > 1:
3096 raise vimconn.VimConnConflictException(
3097 "Found more than one Service Function Instance with this criteria"
3098 )
3099
3100 sfi = sfi_list[0]
3101
3102 return sfi
3103
3104 def get_sfi_list(self, filter_dict={}):
3105 self.logger.debug(
3106 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3107 )
3108
3109 try:
3110 self._reload_connection()
3111 filter_dict_os = filter_dict.copy()
3112
3113 if self.api_version3 and "tenant_id" in filter_dict_os:
3114 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3115
3116 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3117 sfi_list = sfi_dict["port_pairs"]
3118 self.__sfi_os2mano(sfi_list)
3119
3120 return sfi_list
3121 except (
3122 neExceptions.ConnectionFailed,
3123 ksExceptions.ClientException,
3124 neExceptions.NeutronException,
3125 ConnectionError,
3126 ) as e:
3127 self._format_exception(e)
3128
3129 def delete_sfi(self, sfi_id):
3130 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3131
3132 try:
3133 self._reload_connection()
3134 self.neutron.delete_sfc_port_pair(sfi_id)
3135
3136 return sfi_id
3137 except (
3138 neExceptions.ConnectionFailed,
3139 neExceptions.NeutronException,
3140 ksExceptions.ClientException,
3141 neExceptions.NeutronException,
3142 ConnectionError,
3143 ) as e:
3144 self._format_exception(e)
3145
3146 def new_sf(self, name, sfis, sfc_encap=True):
3147 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3148
3149 try:
3150 new_sf = None
3151 self._reload_connection()
3152 # correlation = None
3153 # if sfc_encap:
3154 # correlation = "nsh"
3155
3156 for instance in sfis:
3157 sfi = self.get_sfi(instance)
3158
3159 if sfi.get("sfc_encap") != sfc_encap:
3160 raise vimconn.VimConnNotSupportedException(
3161 "OpenStack VIM connector requires all SFIs of the "
3162 "same SF to share the same SFC Encapsulation"
3163 )
3164
3165 sf_dict = {"name": name, "port_pairs": sfis}
3166 new_sf = self.neutron.create_sfc_port_pair_group(
3167 {"port_pair_group": sf_dict}
3168 )
3169
3170 return new_sf["port_pair_group"]["id"]
3171 except (
3172 neExceptions.ConnectionFailed,
3173 ksExceptions.ClientException,
3174 neExceptions.NeutronException,
3175 ConnectionError,
3176 ) as e:
3177 if new_sf:
3178 try:
3179 self.neutron.delete_sfc_port_pair_group(
3180 new_sf["port_pair_group"]["id"]
3181 )
3182 except Exception:
3183 self.logger.error(
3184 "Creation of Service Function failed, with "
3185 "subsequent deletion failure as well."
3186 )
3187
3188 self._format_exception(e)
3189
3190 def get_sf(self, sf_id):
3191 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3192 filter_dict = {"id": sf_id}
3193 sf_list = self.get_sf_list(filter_dict)
3194
3195 if len(sf_list) == 0:
3196 raise vimconn.VimConnNotFoundException(
3197 "Service Function '{}' not found".format(sf_id)
3198 )
3199 elif len(sf_list) > 1:
3200 raise vimconn.VimConnConflictException(
3201 "Found more than one Service Function with this criteria"
3202 )
3203
3204 sf = sf_list[0]
3205
3206 return sf
3207
3208 def get_sf_list(self, filter_dict={}):
3209 self.logger.debug(
3210 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3211 )
3212
3213 try:
3214 self._reload_connection()
3215 filter_dict_os = filter_dict.copy()
3216
3217 if self.api_version3 and "tenant_id" in filter_dict_os:
3218 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3219
3220 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3221 sf_list = sf_dict["port_pair_groups"]
3222 self.__sf_os2mano(sf_list)
3223
3224 return sf_list
3225 except (
3226 neExceptions.ConnectionFailed,
3227 ksExceptions.ClientException,
3228 neExceptions.NeutronException,
3229 ConnectionError,
3230 ) as e:
3231 self._format_exception(e)
3232
3233 def delete_sf(self, sf_id):
3234 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3235
3236 try:
3237 self._reload_connection()
3238 self.neutron.delete_sfc_port_pair_group(sf_id)
3239
3240 return sf_id
3241 except (
3242 neExceptions.ConnectionFailed,
3243 neExceptions.NeutronException,
3244 ksExceptions.ClientException,
3245 neExceptions.NeutronException,
3246 ConnectionError,
3247 ) as e:
3248 self._format_exception(e)
3249
3250 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3251 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3252
3253 try:
3254 new_sfp = None
3255 self._reload_connection()
3256 # In networking-sfc the MPLS encapsulation is legacy
3257 # should be used when no full SFC Encapsulation is intended
3258 correlation = "mpls"
3259
3260 if sfc_encap:
3261 correlation = "nsh"
3262
3263 sfp_dict = {
3264 "name": name,
3265 "flow_classifiers": classifications,
3266 "port_pair_groups": sfs,
3267 "chain_parameters": {"correlation": correlation},
3268 }
3269
3270 if spi:
3271 sfp_dict["chain_id"] = spi
3272
3273 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3274
3275 return new_sfp["port_chain"]["id"]
3276 except (
3277 neExceptions.ConnectionFailed,
3278 ksExceptions.ClientException,
3279 neExceptions.NeutronException,
3280 ConnectionError,
3281 ) as e:
3282 if new_sfp:
3283 try:
3284 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3285 except Exception:
3286 self.logger.error(
3287 "Creation of Service Function Path failed, with "
3288 "subsequent deletion failure as well."
3289 )
3290
3291 self._format_exception(e)
3292
3293 def get_sfp(self, sfp_id):
3294 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3295
3296 filter_dict = {"id": sfp_id}
3297 sfp_list = self.get_sfp_list(filter_dict)
3298
3299 if len(sfp_list) == 0:
3300 raise vimconn.VimConnNotFoundException(
3301 "Service Function Path '{}' not found".format(sfp_id)
3302 )
3303 elif len(sfp_list) > 1:
3304 raise vimconn.VimConnConflictException(
3305 "Found more than one Service Function Path with this criteria"
3306 )
3307
3308 sfp = sfp_list[0]
3309
3310 return sfp
3311
3312 def get_sfp_list(self, filter_dict={}):
3313 self.logger.debug(
3314 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3315 )
3316
3317 try:
3318 self._reload_connection()
3319 filter_dict_os = filter_dict.copy()
3320
3321 if self.api_version3 and "tenant_id" in filter_dict_os:
3322 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3323
3324 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3325 sfp_list = sfp_dict["port_chains"]
3326 self.__sfp_os2mano(sfp_list)
3327
3328 return sfp_list
3329 except (
3330 neExceptions.ConnectionFailed,
3331 ksExceptions.ClientException,
3332 neExceptions.NeutronException,
3333 ConnectionError,
3334 ) as e:
3335 self._format_exception(e)
3336
3337 def delete_sfp(self, sfp_id):
3338 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3339
3340 try:
3341 self._reload_connection()
3342 self.neutron.delete_sfc_port_chain(sfp_id)
3343
3344 return sfp_id
3345 except (
3346 neExceptions.ConnectionFailed,
3347 neExceptions.NeutronException,
3348 ksExceptions.ClientException,
3349 neExceptions.NeutronException,
3350 ConnectionError,
3351 ) as e:
3352 self._format_exception(e)
3353
3354 def refresh_sfps_status(self, sfp_list):
3355 """Get the status of the service function path
3356 Params: the list of sfp identifiers
3357 Returns a dictionary with:
3358 vm_id: #VIM id of this service function path
3359 status: #Mandatory. Text with one of:
3360 # DELETED (not found at vim)
3361 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3362 # OTHER (Vim reported other status not understood)
3363 # ERROR (VIM indicates an ERROR status)
3364 # ACTIVE,
3365 # CREATING (on building process)
3366 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3367 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3368 """
3369 sfp_dict = {}
3370 self.logger.debug(
3371 "refresh_sfps status: Getting tenant SFP information from VIM"
3372 )
3373
3374 for sfp_id in sfp_list:
3375 sfp = {}
3376
3377 try:
3378 sfp_vim = self.get_sfp(sfp_id)
3379
3380 if sfp_vim["spi"]:
3381 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3382 else:
3383 sfp["status"] = "OTHER"
3384 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3385
3386 sfp["vim_info"] = self.serialize(sfp_vim)
3387
3388 if sfp_vim.get("fault"):
3389 sfp["error_msg"] = str(sfp_vim["fault"])
3390 except vimconn.VimConnNotFoundException as e:
3391 self.logger.error("Exception getting sfp status: %s", str(e))
3392 sfp["status"] = "DELETED"
3393 sfp["error_msg"] = str(e)
3394 except vimconn.VimConnException as e:
3395 self.logger.error("Exception getting sfp status: %s", str(e))
3396 sfp["status"] = "VIM_ERROR"
3397 sfp["error_msg"] = str(e)
3398
3399 sfp_dict[sfp_id] = sfp
3400
3401 return sfp_dict
3402
3403 def refresh_sfis_status(self, sfi_list):
3404 """Get the status of the service function instances
3405 Params: the list of sfi identifiers
3406 Returns a dictionary with:
3407 vm_id: #VIM id of this service function instance
3408 status: #Mandatory. Text with one of:
3409 # DELETED (not found at vim)
3410 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3411 # OTHER (Vim reported other status not understood)
3412 # ERROR (VIM indicates an ERROR status)
3413 # ACTIVE,
3414 # CREATING (on building process)
3415 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3416 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3417 """
3418 sfi_dict = {}
3419 self.logger.debug(
3420 "refresh_sfis status: Getting tenant sfi information from VIM"
3421 )
3422
3423 for sfi_id in sfi_list:
3424 sfi = {}
3425
3426 try:
3427 sfi_vim = self.get_sfi(sfi_id)
3428
3429 if sfi_vim:
3430 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
3431 else:
3432 sfi["status"] = "OTHER"
3433 sfi["error_msg"] = "VIM status reported " + sfi["status"]
3434
3435 sfi["vim_info"] = self.serialize(sfi_vim)
3436
3437 if sfi_vim.get("fault"):
3438 sfi["error_msg"] = str(sfi_vim["fault"])
3439 except vimconn.VimConnNotFoundException as e:
3440 self.logger.error("Exception getting sfi status: %s", str(e))
3441 sfi["status"] = "DELETED"
3442 sfi["error_msg"] = str(e)
3443 except vimconn.VimConnException as e:
3444 self.logger.error("Exception getting sfi status: %s", str(e))
3445 sfi["status"] = "VIM_ERROR"
3446 sfi["error_msg"] = str(e)
3447
3448 sfi_dict[sfi_id] = sfi
3449
3450 return sfi_dict
3451
3452 def refresh_sfs_status(self, sf_list):
3453 """Get the status of the service functions
3454 Params: the list of sf identifiers
3455 Returns a dictionary with:
3456 vm_id: #VIM id of this service function
3457 status: #Mandatory. Text with one of:
3458 # DELETED (not found at vim)
3459 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3460 # OTHER (Vim reported other status not understood)
3461 # ERROR (VIM indicates an ERROR status)
3462 # ACTIVE,
3463 # CREATING (on building process)
3464 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3465 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3466 """
3467 sf_dict = {}
3468 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
3469
3470 for sf_id in sf_list:
3471 sf = {}
3472
3473 try:
3474 sf_vim = self.get_sf(sf_id)
3475
3476 if sf_vim:
3477 sf["status"] = vmStatus2manoFormat["ACTIVE"]
3478 else:
3479 sf["status"] = "OTHER"
3480 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
3481
3482 sf["vim_info"] = self.serialize(sf_vim)
3483
3484 if sf_vim.get("fault"):
3485 sf["error_msg"] = str(sf_vim["fault"])
3486 except vimconn.VimConnNotFoundException as e:
3487 self.logger.error("Exception getting sf status: %s", str(e))
3488 sf["status"] = "DELETED"
3489 sf["error_msg"] = str(e)
3490 except vimconn.VimConnException as e:
3491 self.logger.error("Exception getting sf status: %s", str(e))
3492 sf["status"] = "VIM_ERROR"
3493 sf["error_msg"] = str(e)
3494
3495 sf_dict[sf_id] = sf
3496
3497 return sf_dict
3498
3499 def refresh_classifications_status(self, classification_list):
3500 """Get the status of the classifications
3501 Params: the list of classification identifiers
3502 Returns a dictionary with:
3503 vm_id: #VIM id of this classifier
3504 status: #Mandatory. Text with one of:
3505 # DELETED (not found at vim)
3506 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3507 # OTHER (Vim reported other status not understood)
3508 # ERROR (VIM indicates an ERROR status)
3509 # ACTIVE,
3510 # CREATING (on building process)
3511 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3512 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3513 """
3514 classification_dict = {}
3515 self.logger.debug(
3516 "refresh_classifications status: Getting tenant classification information from VIM"
3517 )
3518
3519 for classification_id in classification_list:
3520 classification = {}
3521
3522 try:
3523 classification_vim = self.get_classification(classification_id)
3524
3525 if classification_vim:
3526 classification["status"] = vmStatus2manoFormat["ACTIVE"]
3527 else:
3528 classification["status"] = "OTHER"
3529 classification["error_msg"] = (
3530 "VIM status reported " + classification["status"]
3531 )
3532
3533 classification["vim_info"] = self.serialize(classification_vim)
3534
3535 if classification_vim.get("fault"):
3536 classification["error_msg"] = str(classification_vim["fault"])
3537 except vimconn.VimConnNotFoundException as e:
3538 self.logger.error("Exception getting classification status: %s", str(e))
3539 classification["status"] = "DELETED"
3540 classification["error_msg"] = str(e)
3541 except vimconn.VimConnException as e:
3542 self.logger.error("Exception getting classification status: %s", str(e))
3543 classification["status"] = "VIM_ERROR"
3544 classification["error_msg"] = str(e)
3545
3546 classification_dict[classification_id] = classification
3547
3548 return classification_dict
3549
3550 def new_affinity_group(self, affinity_group_data):
3551 """Adds a server group to VIM
3552 affinity_group_data contains a dictionary with information, keys:
3553 name: name in VIM for the server group
3554 type: affinity or anti-affinity
3555 scope: Only nfvi-node allowed
3556 Returns the server group identifier"""
3557 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3558
3559 try:
3560 name = affinity_group_data["name"]
3561 policy = affinity_group_data["type"]
3562
3563 self._reload_connection()
3564 new_server_group = self.nova.server_groups.create(name, policy)
3565
3566 return new_server_group.id
3567 except (
3568 ksExceptions.ClientException,
3569 nvExceptions.ClientException,
3570 ConnectionError,
3571 KeyError,
3572 ) as e:
3573 self._format_exception(e)
3574
3575 def get_affinity_group(self, affinity_group_id):
3576 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3577 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3578 try:
3579 self._reload_connection()
3580 server_group = self.nova.server_groups.find(id=affinity_group_id)
3581
3582 return server_group.to_dict()
3583 except (
3584 nvExceptions.NotFound,
3585 nvExceptions.ClientException,
3586 ksExceptions.ClientException,
3587 ConnectionError,
3588 ) as e:
3589 self._format_exception(e)
3590
3591 def delete_affinity_group(self, affinity_group_id):
3592 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3593 self.logger.debug("Getting server group '%s'", affinity_group_id)
3594 try:
3595 self._reload_connection()
3596 self.nova.server_groups.delete(affinity_group_id)
3597
3598 return affinity_group_id
3599 except (
3600 nvExceptions.NotFound,
3601 ksExceptions.ClientException,
3602 nvExceptions.ClientException,
3603 ConnectionError,
3604 ) as e:
3605 self._format_exception(e)
3606
3607 def get_vdu_state(self, vm_id):
3608 """
3609 Getting the state of a vdu
3610 param:
3611 vm_id: ID of an instance
3612 """
3613 self.logger.debug("Getting the status of VM")
3614 self.logger.debug("VIM VM ID %s", vm_id)
3615 self._reload_connection()
3616 server = self.nova.servers.find(id=vm_id)
3617 server_dict = server.to_dict()
3618 vdu_data = [
3619 server_dict["status"],
3620 server_dict["flavor"]["id"],
3621 server_dict["OS-EXT-SRV-ATTR:host"],
3622 server_dict["OS-EXT-AZ:availability_zone"],
3623 ]
3624 self.logger.debug("vdu_data %s", vdu_data)
3625 return vdu_data
3626
3627 def check_compute_availability(self, host, server_flavor_details):
3628 self._reload_connection()
3629 hypervisor_search = self.nova.hypervisors.search(
3630 hypervisor_match=host, servers=True
3631 )
3632 for hypervisor in hypervisor_search:
3633 hypervisor_id = hypervisor.to_dict()["id"]
3634 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3635 hypervisor_dict = hypervisor_details.to_dict()
3636 hypervisor_temp = json.dumps(hypervisor_dict)
3637 hypervisor_json = json.loads(hypervisor_temp)
3638 resources_available = [
3639 hypervisor_json["free_ram_mb"],
3640 hypervisor_json["disk_available_least"],
3641 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3642 ]
3643 compute_available = all(
3644 x > y for x, y in zip(resources_available, server_flavor_details)
3645 )
3646 if compute_available:
3647 return host
3648
3649 def check_availability_zone(
3650 self, old_az, server_flavor_details, old_host, host=None
3651 ):
3652 self._reload_connection()
3653 az_check = {"zone_check": False, "compute_availability": None}
3654 aggregates_list = self.nova.aggregates.list()
3655 for aggregate in aggregates_list:
3656 aggregate_details = aggregate.to_dict()
3657 aggregate_temp = json.dumps(aggregate_details)
3658 aggregate_json = json.loads(aggregate_temp)
3659 if aggregate_json["availability_zone"] == old_az:
3660 hosts_list = aggregate_json["hosts"]
3661 if host is not None:
3662 if host in hosts_list:
3663 az_check["zone_check"] = True
3664 available_compute_id = self.check_compute_availability(
3665 host, server_flavor_details
3666 )
3667 if available_compute_id is not None:
3668 az_check["compute_availability"] = available_compute_id
3669 else:
3670 for check_host in hosts_list:
3671 if check_host != old_host:
3672 available_compute_id = self.check_compute_availability(
3673 check_host, server_flavor_details
3674 )
3675 if available_compute_id is not None:
3676 az_check["zone_check"] = True
3677 az_check["compute_availability"] = available_compute_id
3678 break
3679 else:
3680 az_check["zone_check"] = True
3681 return az_check
3682
3683 def migrate_instance(self, vm_id, compute_host=None):
3684 """
3685 Migrate a vdu
3686 param:
3687 vm_id: ID of an instance
3688 compute_host: Host to migrate the vdu to
3689 """
3690 self._reload_connection()
3691 vm_state = False
3692 instance_state = self.get_vdu_state(vm_id)
3693 server_flavor_id = instance_state[1]
3694 server_hypervisor_name = instance_state[2]
3695 server_availability_zone = instance_state[3]
3696 try:
3697 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3698 server_flavor_details = [
3699 server_flavor["ram"],
3700 server_flavor["disk"],
3701 server_flavor["vcpus"],
3702 ]
3703 if compute_host == server_hypervisor_name:
3704 raise vimconn.VimConnException(
3705 "Unable to migrate instance '{}' to the same host '{}'".format(
3706 vm_id, compute_host
3707 ),
3708 http_code=vimconn.HTTP_Bad_Request,
3709 )
3710 az_status = self.check_availability_zone(
3711 server_availability_zone,
3712 server_flavor_details,
3713 server_hypervisor_name,
3714 compute_host,
3715 )
3716 availability_zone_check = az_status["zone_check"]
3717 available_compute_id = az_status.get("compute_availability")
3718
3719 if availability_zone_check is False:
3720 raise vimconn.VimConnException(
3721 "Unable to migrate instance '{}' to a different availability zone".format(
3722 vm_id
3723 ),
3724 http_code=vimconn.HTTP_Bad_Request,
3725 )
3726 if available_compute_id is not None:
3727 self.nova.servers.live_migrate(
3728 server=vm_id,
3729 host=available_compute_id,
3730 block_migration=True,
3731 disk_over_commit=False,
3732 )
3733 state = "MIGRATING"
3734 changed_compute_host = ""
3735 if state == "MIGRATING":
3736 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3737 changed_compute_host = self.get_vdu_state(vm_id)[2]
3738 if vm_state and changed_compute_host == available_compute_id:
3739 self.logger.debug(
3740 "Instance '{}' migrated to the new compute host '{}'".format(
3741 vm_id, changed_compute_host
3742 )
3743 )
3744 return state, available_compute_id
3745 else:
3746 raise vimconn.VimConnException(
3747 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3748 vm_id, available_compute_id
3749 ),
3750 http_code=vimconn.HTTP_Bad_Request,
3751 )
3752 else:
3753 raise vimconn.VimConnException(
3754 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3755 available_compute_id
3756 ),
3757 http_code=vimconn.HTTP_Bad_Request,
3758 )
3759 except (
3760 nvExceptions.BadRequest,
3761 nvExceptions.ClientException,
3762 nvExceptions.NotFound,
3763 ) as e:
3764 self._format_exception(e)
3765
3766 def resize_instance(self, vm_id, new_flavor_id):
3767 """
3768 For resizing the vm based on the given
3769 flavor details
3770 param:
3771 vm_id : ID of an instance
3772 new_flavor_id : Flavor id to be resized
3773 Return the status of a resized instance
3774 """
3775 self._reload_connection()
3776 self.logger.debug("resize the flavor of an instance")
3777 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3778 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3779 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3780 try:
3781 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3782 if old_flavor_disk > new_flavor_disk:
3783 raise nvExceptions.BadRequest(
3784 400,
3785 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3786 )
3787 else:
3788 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3789 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3790 if vm_state:
3791 instance_resized_status = self.confirm_resize(vm_id)
3792 return instance_resized_status
3793 else:
3794 raise nvExceptions.BadRequest(
3795 409,
3796 message="Cannot 'resize' vm_state is in ERROR",
3797 )
3798
3799 else:
3800 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3801 raise nvExceptions.BadRequest(
3802 409,
3803 message="Cannot 'resize' instance while it is in vm_state resized",
3804 )
3805 except (
3806 nvExceptions.BadRequest,
3807 nvExceptions.ClientException,
3808 nvExceptions.NotFound,
3809 ) as e:
3810 self._format_exception(e)
3811
3812 def confirm_resize(self, vm_id):
3813 """
3814 Confirm the resize of an instance
3815 param:
3816 vm_id: ID of an instance
3817 """
3818 self._reload_connection()
3819 self.nova.servers.confirm_resize(server=vm_id)
3820 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3821 self.__wait_for_vm(vm_id, "ACTIVE")
3822 instance_status = self.get_vdu_state(vm_id)[0]
3823 return instance_status