Coverage for RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py: 36%

1920 statements  

« prev     ^ index     » next       coverage.py v7.3.1, created at 2024-07-02 09:11 +0000

1# -*- coding: utf-8 -*- 

2 

3## 

4# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. 

5# This file is part of openmano 

6# All Rights Reserved. 

7# 

8# Licensed under the Apache License, Version 2.0 (the "License"); you may 

9# not use this file except in compliance with the License. You may obtain 

10# a copy of the License at 

11# 

12# http://www.apache.org/licenses/LICENSE-2.0 

13# 

14# Unless required by applicable law or agreed to in writing, software 

15# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

16# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

17# License for the specific language governing permissions and limitations 

18# under the License. 

19## 

20 

21""" 

22osconnector implements all the methods to interact with openstack using the python-neutronclient. 

23 

24For the VNF forwarding graph, The OpenStack VIM connector calls the 

25networking-sfc Neutron extension methods, whose resources are mapped 

26to the VIM connector's SFC resources as follows: 

27- Classification (OSM) -> Flow Classifier (Neutron) 

28- Service Function Instance (OSM) -> Port Pair (Neutron) 

29- Service Function (OSM) -> Port Pair Group (Neutron) 

30- Service Function Path (OSM) -> Port Chain (Neutron) 

31""" 

32 

33import copy 

34from http.client import HTTPException 

35import json 

36import logging 

37from pprint import pformat 

38import random 

39import re 

40import time 

41from typing import Dict, List, Optional, Tuple 

42 

43from cinderclient import client as cClient 

44import cinderclient.exceptions as cExceptions 

45from glanceclient import client as glClient 

46import glanceclient.exc as gl1Exceptions 

47from keystoneauth1 import session 

48from keystoneauth1.identity import v2, v3 

49import keystoneclient.exceptions as ksExceptions 

50import keystoneclient.v2_0.client as ksClient_v2 

51import keystoneclient.v3.client as ksClient_v3 

52import netaddr 

53from neutronclient.common import exceptions as neExceptions 

54from neutronclient.neutron import client as neClient 

55from novaclient import client as nClient, exceptions as nvExceptions 

56from osm_ro_plugin import vimconn 

57from requests.exceptions import ConnectionError 

58import yaml 

59 

60__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa" 

61__date__ = "$22-sep-2017 23:59:59$" 

62 

63"""contain the openstack virtual machine status to openmano status""" 

64vmStatus2manoFormat = { 

65 "ACTIVE": "ACTIVE", 

66 "PAUSED": "PAUSED", 

67 "SUSPENDED": "SUSPENDED", 

68 "SHUTOFF": "INACTIVE", 

69 "BUILD": "BUILD", 

70 "ERROR": "ERROR", 

71 "DELETED": "DELETED", 

72} 

73netStatus2manoFormat = { 

74 "ACTIVE": "ACTIVE", 

75 "PAUSED": "PAUSED", 

76 "INACTIVE": "INACTIVE", 

77 "BUILD": "BUILD", 

78 "ERROR": "ERROR", 

79 "DELETED": "DELETED", 

80} 

81 

82supportedClassificationTypes = ["legacy_flow_classifier"] 

83 

84# global var to have a timeout creating and deleting volumes 

85volume_timeout = 1800 

86server_timeout = 1800 

87 

88 

89def catch_any_exception(func): 

90 def format_exception(*args, **kwargs): 

91 try: 

92 return func(*args, *kwargs) 

93 except Exception as e: 

94 vimconnector._format_exception(e) 

95 

96 return format_exception 

97 

98 

99class SafeDumper(yaml.SafeDumper): 

100 def represent_data(self, data): 

101 # Openstack APIs use custom subclasses of dict and YAML safe dumper 

102 # is designed to not handle that (reference issue 142 of pyyaml) 

103 if isinstance(data, dict) and data.__class__ != dict: 

104 # A simple solution is to convert those items back to dicts 

105 data = dict(data.items()) 

106 

107 return super(SafeDumper, self).represent_data(data) 

108 

109 

110class vimconnector(vimconn.VimConnector): 

111 def __init__( 

112 self, 

113 uuid, 

114 name, 

115 tenant_id, 

116 tenant_name, 

117 url, 

118 url_admin=None, 

119 user=None, 

120 passwd=None, 

121 log_level=None, 

122 config={}, 

123 persistent_info={}, 

124 ): 

125 """using common constructor parameters. In this case 

126 'url' is the keystone authorization url, 

127 'url_admin' is not use 

128 """ 

129 api_version = config.get("APIversion") 

130 

131 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"): 

132 raise vimconn.VimConnException( 

133 "Invalid value '{}' for config:APIversion. " 

134 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version) 

135 ) 

136 

137 vim_type = config.get("vim_type") 

138 

139 if vim_type and vim_type not in ("vio", "VIO"): 

140 raise vimconn.VimConnException( 

141 "Invalid value '{}' for config:vim_type." 

142 "Allowed values are 'vio' or 'VIO'".format(vim_type) 

143 ) 

144 

145 if config.get("dataplane_net_vlan_range") is not None: 

146 # validate vlan ranges provided by user 

147 self._validate_vlan_ranges( 

148 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range" 

149 ) 

150 

151 if config.get("multisegment_vlan_range") is not None: 

152 # validate vlan ranges provided by user 

153 self._validate_vlan_ranges( 

154 config.get("multisegment_vlan_range"), "multisegment_vlan_range" 

155 ) 

156 

157 vimconn.VimConnector.__init__( 

158 self, 

159 uuid, 

160 name, 

161 tenant_id, 

162 tenant_name, 

163 url, 

164 url_admin, 

165 user, 

166 passwd, 

167 log_level, 

168 config, 

169 ) 

170 

171 if self.config.get("insecure") and self.config.get("ca_cert"): 

172 raise vimconn.VimConnException( 

173 "options insecure and ca_cert are mutually exclusive" 

174 ) 

175 

176 self.verify = True 

177 

178 if self.config.get("insecure"): 

179 self.verify = False 

180 

181 if self.config.get("ca_cert"): 

182 self.verify = self.config.get("ca_cert") 

183 

184 if not url: 

185 raise TypeError("url param can not be NoneType") 

186 

187 self.persistent_info = persistent_info 

188 self.availability_zone = persistent_info.get("availability_zone", None) 

189 self.storage_availability_zone = None 

190 self.vm_av_zone = None 

191 self.session = persistent_info.get("session", {"reload_client": True}) 

192 self.my_tenant_id = self.session.get("my_tenant_id") 

193 self.nova = self.session.get("nova") 

194 self.neutron = self.session.get("neutron") 

195 self.cinder = self.session.get("cinder") 

196 self.glance = self.session.get("glance") 

197 # self.glancev1 = self.session.get("glancev1") 

198 self.keystone = self.session.get("keystone") 

199 self.api_version3 = self.session.get("api_version3") 

200 self.vim_type = self.config.get("vim_type") 

201 

202 if self.vim_type: 

203 self.vim_type = self.vim_type.upper() 

204 

205 if self.config.get("use_internal_endpoint"): 

206 self.endpoint_type = "internalURL" 

207 else: 

208 self.endpoint_type = None 

209 

210 logging.getLogger("urllib3").setLevel(logging.WARNING) 

211 logging.getLogger("keystoneauth").setLevel(logging.WARNING) 

212 logging.getLogger("novaclient").setLevel(logging.WARNING) 

213 self.logger = logging.getLogger("ro.vim.openstack") 

214 

215 # allow security_groups to be a list or a single string 

216 if isinstance(self.config.get("security_groups"), str): 

217 self.config["security_groups"] = [self.config["security_groups"]] 

218 

219 self.security_groups_id = None 

220 

221 # ###### VIO Specific Changes ######### 

222 if self.vim_type == "VIO": 

223 self.logger = logging.getLogger("ro.vim.vio") 

224 

225 if log_level: 

226 self.logger.setLevel(getattr(logging, log_level)) 

227 

228 def __getitem__(self, index): 

229 """Get individuals parameters. 

230 Throw KeyError""" 

231 if index == "project_domain_id": 

232 return self.config.get("project_domain_id") 

233 elif index == "user_domain_id": 

234 return self.config.get("user_domain_id") 

235 else: 

236 return vimconn.VimConnector.__getitem__(self, index) 

237 

238 def __setitem__(self, index, value): 

239 """Set individuals parameters and it is marked as dirty so to force connection reload. 

240 Throw KeyError""" 

241 if index == "project_domain_id": 

242 self.config["project_domain_id"] = value 

243 elif index == "user_domain_id": 

244 self.config["user_domain_id"] = value 

245 else: 

246 vimconn.VimConnector.__setitem__(self, index, value) 

247 

248 self.session["reload_client"] = True 

249 

250 def serialize(self, value): 

251 """Serialization of python basic types. 

252 

253 In the case value is not serializable a message will be logged and a 

254 simple representation of the data that cannot be converted back to 

255 python is returned. 

256 """ 

257 if isinstance(value, str): 

258 return value 

259 

260 try: 

261 return yaml.dump( 

262 value, Dumper=SafeDumper, default_flow_style=True, width=256 

263 ) 

264 except yaml.representer.RepresenterError: 

265 self.logger.debug( 

266 "The following entity cannot be serialized in YAML:\n\n%s\n\n", 

267 pformat(value), 

268 exc_info=True, 

269 ) 

270 

271 return str(value) 

272 

273 def _reload_connection(self): 

274 """Called before any operation, it check if credentials has changed 

275 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure 

276 """ 

277 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) 

278 if self.session["reload_client"]: 

279 if self.config.get("APIversion"): 

280 self.api_version3 = ( 

281 self.config["APIversion"] == "v3.3" 

282 or self.config["APIversion"] == "3" 

283 ) 

284 else: # get from ending auth_url that end with v3 or with v2.0 

285 self.api_version3 = self.url.endswith("/v3") or self.url.endswith( 

286 "/v3/" 

287 ) 

288 

289 self.session["api_version3"] = self.api_version3 

290 

291 if self.api_version3: 

292 if self.config.get("project_domain_id") or self.config.get( 

293 "project_domain_name" 

294 ): 

295 project_domain_id_default = None 

296 else: 

297 project_domain_id_default = "default" 

298 

299 if self.config.get("user_domain_id") or self.config.get( 

300 "user_domain_name" 

301 ): 

302 user_domain_id_default = None 

303 else: 

304 user_domain_id_default = "default" 

305 auth = v3.Password( 

306 auth_url=self.url, 

307 username=self.user, 

308 password=self.passwd, 

309 project_name=self.tenant_name, 

310 project_id=self.tenant_id, 

311 project_domain_id=self.config.get( 

312 "project_domain_id", project_domain_id_default 

313 ), 

314 user_domain_id=self.config.get( 

315 "user_domain_id", user_domain_id_default 

316 ), 

317 project_domain_name=self.config.get("project_domain_name"), 

318 user_domain_name=self.config.get("user_domain_name"), 

319 ) 

320 else: 

321 auth = v2.Password( 

322 auth_url=self.url, 

323 username=self.user, 

324 password=self.passwd, 

325 tenant_name=self.tenant_name, 

326 tenant_id=self.tenant_id, 

327 ) 

328 

329 sess = session.Session(auth=auth, verify=self.verify) 

330 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River 

331 # Titanium cloud and StarlingX 

332 region_name = self.config.get("region_name") 

333 

334 if self.api_version3: 

335 self.keystone = ksClient_v3.Client( 

336 session=sess, 

337 endpoint_type=self.endpoint_type, 

338 region_name=region_name, 

339 ) 

340 else: 

341 self.keystone = ksClient_v2.Client( 

342 session=sess, endpoint_type=self.endpoint_type 

343 ) 

344 

345 self.session["keystone"] = self.keystone 

346 # In order to enable microversion functionality an explicit microversion must be specified in "config". 

347 # This implementation approach is due to the warning message in 

348 # https://developer.openstack.org/api-guide/compute/microversions.html 

349 # where it is stated that microversion backwards compatibility is not guaranteed and clients should 

350 # always require an specific microversion. 

351 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config 

352 version = self.config.get("microversion") 

353 

354 if not version: 

355 version = "2.60" 

356 

357 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River 

358 # Titanium cloud and StarlingX 

359 self.nova = self.session["nova"] = nClient.Client( 

360 str(version), 

361 session=sess, 

362 endpoint_type=self.endpoint_type, 

363 region_name=region_name, 

364 ) 

365 self.neutron = self.session["neutron"] = neClient.Client( 

366 "2.0", 

367 session=sess, 

368 endpoint_type=self.endpoint_type, 

369 region_name=region_name, 

370 ) 

371 

372 if sess.get_all_version_data(service_type="volumev2"): 

373 self.cinder = self.session["cinder"] = cClient.Client( 

374 2, 

375 session=sess, 

376 endpoint_type=self.endpoint_type, 

377 region_name=region_name, 

378 ) 

379 else: 

380 self.cinder = self.session["cinder"] = cClient.Client( 

381 3, 

382 session=sess, 

383 endpoint_type=self.endpoint_type, 

384 region_name=region_name, 

385 ) 

386 

387 try: 

388 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id() 

389 except Exception: 

390 self.logger.error("Cannot get project_id from session", exc_info=True) 

391 

392 if self.endpoint_type == "internalURL": 

393 glance_service_id = self.keystone.services.list(name="glance")[0].id 

394 glance_endpoint = self.keystone.endpoints.list( 

395 glance_service_id, interface="internal" 

396 )[0].url 

397 else: 

398 glance_endpoint = None 

399 

400 self.glance = self.session["glance"] = glClient.Client( 

401 2, session=sess, endpoint=glance_endpoint 

402 ) 

403 # using version 1 of glance client in new_image() 

404 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess, 

405 # endpoint=glance_endpoint) 

406 self.session["reload_client"] = False 

407 self.persistent_info["session"] = self.session 

408 # add availablity zone info inside self.persistent_info 

409 self._set_availablity_zones() 

410 self.persistent_info["availability_zone"] = self.availability_zone 

411 # force to get again security_groups_ids next time they are needed 

412 self.security_groups_id = None 

413 

414 def __net_os2mano(self, net_list_dict): 

415 """Transform the net openstack format to mano format 

416 net_list_dict can be a list of dict or a single dict""" 

417 if type(net_list_dict) is dict: 

418 net_list_ = (net_list_dict,) 

419 elif type(net_list_dict) is list: 

420 net_list_ = net_list_dict 

421 else: 

422 raise TypeError("param net_list_dict must be a list or a dictionary") 

423 for net in net_list_: 

424 if net.get("provider:network_type") == "vlan": 

425 net["type"] = "data" 

426 else: 

427 net["type"] = "bridge" 

428 

429 def __classification_os2mano(self, class_list_dict): 

430 """Transform the openstack format (Flow Classifier) to mano format 

431 (Classification) class_list_dict can be a list of dict or a single dict 

432 """ 

433 if isinstance(class_list_dict, dict): 

434 class_list_ = [class_list_dict] 

435 elif isinstance(class_list_dict, list): 

436 class_list_ = class_list_dict 

437 else: 

438 raise TypeError("param class_list_dict must be a list or a dictionary") 

439 for classification in class_list_: 

440 id = classification.pop("id") 

441 name = classification.pop("name") 

442 description = classification.pop("description") 

443 project_id = classification.pop("project_id") 

444 tenant_id = classification.pop("tenant_id") 

445 original_classification = copy.deepcopy(classification) 

446 classification.clear() 

447 classification["ctype"] = "legacy_flow_classifier" 

448 classification["definition"] = original_classification 

449 classification["id"] = id 

450 classification["name"] = name 

451 classification["description"] = description 

452 classification["project_id"] = project_id 

453 classification["tenant_id"] = tenant_id 

454 

455 def __sfi_os2mano(self, sfi_list_dict): 

456 """Transform the openstack format (Port Pair) to mano format (SFI) 

457 sfi_list_dict can be a list of dict or a single dict 

458 """ 

459 if isinstance(sfi_list_dict, dict): 

460 sfi_list_ = [sfi_list_dict] 

461 elif isinstance(sfi_list_dict, list): 

462 sfi_list_ = sfi_list_dict 

463 else: 

464 raise TypeError("param sfi_list_dict must be a list or a dictionary") 

465 

466 for sfi in sfi_list_: 

467 sfi["ingress_ports"] = [] 

468 sfi["egress_ports"] = [] 

469 

470 if sfi.get("ingress"): 

471 sfi["ingress_ports"].append(sfi["ingress"]) 

472 

473 if sfi.get("egress"): 

474 sfi["egress_ports"].append(sfi["egress"]) 

475 

476 del sfi["ingress"] 

477 del sfi["egress"] 

478 params = sfi.get("service_function_parameters") 

479 sfc_encap = False 

480 

481 if params: 

482 correlation = params.get("correlation") 

483 

484 if correlation: 

485 sfc_encap = True 

486 

487 sfi["sfc_encap"] = sfc_encap 

488 del sfi["service_function_parameters"] 

489 

490 def __sf_os2mano(self, sf_list_dict): 

491 """Transform the openstack format (Port Pair Group) to mano format (SF) 

492 sf_list_dict can be a list of dict or a single dict 

493 """ 

494 if isinstance(sf_list_dict, dict): 

495 sf_list_ = [sf_list_dict] 

496 elif isinstance(sf_list_dict, list): 

497 sf_list_ = sf_list_dict 

498 else: 

499 raise TypeError("param sf_list_dict must be a list or a dictionary") 

500 

501 for sf in sf_list_: 

502 del sf["port_pair_group_parameters"] 

503 sf["sfis"] = sf["port_pairs"] 

504 del sf["port_pairs"] 

505 

506 def __sfp_os2mano(self, sfp_list_dict): 

507 """Transform the openstack format (Port Chain) to mano format (SFP) 

508 sfp_list_dict can be a list of dict or a single dict 

509 """ 

510 if isinstance(sfp_list_dict, dict): 

511 sfp_list_ = [sfp_list_dict] 

512 elif isinstance(sfp_list_dict, list): 

513 sfp_list_ = sfp_list_dict 

514 else: 

515 raise TypeError("param sfp_list_dict must be a list or a dictionary") 

516 

517 for sfp in sfp_list_: 

518 params = sfp.pop("chain_parameters") 

519 sfc_encap = False 

520 

521 if params: 

522 correlation = params.get("correlation") 

523 

524 if correlation: 

525 sfc_encap = True 

526 

527 sfp["sfc_encap"] = sfc_encap 

528 sfp["spi"] = sfp.pop("chain_id") 

529 sfp["classifications"] = sfp.pop("flow_classifiers") 

530 sfp["service_functions"] = sfp.pop("port_pair_groups") 

531 

532 # placeholder for now; read TODO note below 

533 def _validate_classification(self, type, definition): 

534 # only legacy_flow_classifier Type is supported at this point 

535 return True 

536 # TODO(igordcard): this method should be an abstract method of an 

537 # abstract Classification class to be implemented by the specific 

538 # Types. Also, abstract vimconnector should call the validation 

539 # method before the implemented VIM connectors are called. 

540 

541 @staticmethod 

542 def _format_exception(exception): 

543 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause""" 

544 message_error = str(exception) 

545 tip = "" 

546 

547 if isinstance( 

548 exception, 

549 ( 

550 neExceptions.NetworkNotFoundClient, 

551 nvExceptions.NotFound, 

552 nvExceptions.ResourceNotFound, 

553 ksExceptions.NotFound, 

554 gl1Exceptions.HTTPNotFound, 

555 cExceptions.NotFound, 

556 ), 

557 ): 

558 raise vimconn.VimConnNotFoundException( 

559 type(exception).__name__ + ": " + message_error 

560 ) 

561 elif isinstance( 

562 exception, 

563 ( 

564 HTTPException, 

565 gl1Exceptions.HTTPException, 

566 gl1Exceptions.CommunicationError, 

567 ConnectionError, 

568 ksExceptions.ConnectionError, 

569 neExceptions.ConnectionFailed, 

570 cExceptions.ConnectionError, 

571 ), 

572 ): 

573 if type(exception).__name__ == "SSLError": 

574 tip = " (maybe option 'insecure' must be added to the VIM)" 

575 

576 raise vimconn.VimConnConnectionException( 

577 "Invalid URL or credentials{}: {}".format(tip, message_error) 

578 ) 

579 elif isinstance( 

580 exception, 

581 ( 

582 KeyError, 

583 nvExceptions.BadRequest, 

584 ksExceptions.BadRequest, 

585 gl1Exceptions.BadRequest, 

586 cExceptions.BadRequest, 

587 ), 

588 ): 

589 if message_error == "OS-EXT-SRV-ATTR:host": 

590 tip = " (If the user does not have non-admin credentials, this attribute will be missing)" 

591 raise vimconn.VimConnInsufficientCredentials( 

592 type(exception).__name__ + ": " + message_error + tip 

593 ) 

594 raise vimconn.VimConnException( 

595 type(exception).__name__ + ": " + message_error 

596 ) 

597 

598 elif isinstance( 

599 exception, 

600 ( 

601 nvExceptions.ClientException, 

602 ksExceptions.ClientException, 

603 neExceptions.NeutronException, 

604 cExceptions.ClientException, 

605 ), 

606 ): 

607 raise vimconn.VimConnUnexpectedResponse( 

608 type(exception).__name__ + ": " + message_error 

609 ) 

610 elif isinstance(exception, nvExceptions.Conflict): 

611 raise vimconn.VimConnConflictException( 

612 type(exception).__name__ + ": " + message_error 

613 ) 

614 elif isinstance(exception, vimconn.VimConnException): 

615 raise exception 

616 else: # () 

617 logger = logging.getLogger("ro.vim.openstack") 

618 logger.error("General Exception " + message_error, exc_info=True) 

619 

620 raise vimconn.VimConnException( 

621 type(exception).__name__ + ": " + message_error 

622 ) 

623 

624 def _get_ids_from_name(self): 

625 """ 

626 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id" 

627 :return: None 

628 """ 

629 # get tenant_id if only tenant_name is supplied 

630 self._reload_connection() 

631 

632 if not self.my_tenant_id: 

633 raise vimconn.VimConnConnectionException( 

634 "Error getting tenant information from name={} id={}".format( 

635 self.tenant_name, self.tenant_id 

636 ) 

637 ) 

638 

639 if self.config.get("security_groups") and not self.security_groups_id: 

640 # convert from name to id 

641 neutron_sg_list = self.neutron.list_security_groups( 

642 tenant_id=self.my_tenant_id 

643 )["security_groups"] 

644 

645 self.security_groups_id = [] 

646 for sg in self.config.get("security_groups"): 

647 for neutron_sg in neutron_sg_list: 

648 if sg in (neutron_sg["id"], neutron_sg["name"]): 

649 self.security_groups_id.append(neutron_sg["id"]) 

650 break 

651 else: 

652 self.security_groups_id = None 

653 

654 raise vimconn.VimConnConnectionException( 

655 "Not found security group {} for this tenant".format(sg) 

656 ) 

657 

658 def _find_nova_server(self, vm_id): 

659 """ 

660 Returns the VM instance from Openstack and completes it with flavor ID 

661 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47 

662 """ 

663 try: 

664 self._reload_connection() 

665 server = self.nova.servers.find(id=vm_id) 

666 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) 

667 server_dict = server.to_dict() 

668 try: 

669 if server_dict["flavor"].get("original_name"): 

670 server_dict["flavor"]["id"] = self.nova.flavors.find( 

671 name=server_dict["flavor"]["original_name"] 

672 ).id 

673 except nClient.exceptions.NotFound as e: 

674 self.logger.warning(str(e.message)) 

675 return server_dict 

676 except ( 

677 ksExceptions.ClientException, 

678 nvExceptions.ClientException, 

679 nvExceptions.NotFound, 

680 ConnectionError, 

681 ) as e: 

682 self._format_exception(e) 

683 

684 def check_vim_connectivity(self): 

685 # just get network list to check connectivity and credentials 

686 self.get_network_list(filter_dict={}) 

687 

688 def get_tenant_list(self, filter_dict={}): 

689 """Obtain tenants of VIM 

690 filter_dict can contain the following keys: 

691 name: filter by tenant name 

692 id: filter by tenant uuid/id 

693 <other VIM specific> 

694 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...] 

695 """ 

696 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) 

697 try: 

698 self._reload_connection() 

699 

700 if self.api_version3: 

701 project_class_list = self.keystone.projects.list( 

702 name=filter_dict.get("name") 

703 ) 

704 else: 

705 project_class_list = self.keystone.tenants.findall(**filter_dict) 

706 

707 project_list = [] 

708 

709 for project in project_class_list: 

710 if filter_dict.get("id") and filter_dict["id"] != project.id: 

711 continue 

712 

713 project_list.append(project.to_dict()) 

714 

715 return project_list 

716 except ( 

717 ksExceptions.ConnectionError, 

718 ksExceptions.ClientException, 

719 ConnectionError, 

720 ) as e: 

721 self._format_exception(e) 

722 

723 def new_tenant(self, tenant_name, tenant_description): 

724 """Adds a new tenant to openstack VIM. Returns the tenant identifier""" 

725 self.logger.debug("Adding a new tenant name: %s", tenant_name) 

726 try: 

727 self._reload_connection() 

728 

729 if self.api_version3: 

730 project = self.keystone.projects.create( 

731 tenant_name, 

732 self.config.get("project_domain_id", "default"), 

733 description=tenant_description, 

734 is_domain=False, 

735 ) 

736 else: 

737 project = self.keystone.tenants.create(tenant_name, tenant_description) 

738 

739 return project.id 

740 except ( 

741 ksExceptions.ConnectionError, 

742 ksExceptions.ClientException, 

743 ksExceptions.BadRequest, 

744 ConnectionError, 

745 ) as e: 

746 self._format_exception(e) 

747 

748 def delete_tenant(self, tenant_id): 

749 """Delete a tenant from openstack VIM. Returns the old tenant identifier""" 

750 self.logger.debug("Deleting tenant %s from VIM", tenant_id) 

751 try: 

752 self._reload_connection() 

753 

754 if self.api_version3: 

755 self.keystone.projects.delete(tenant_id) 

756 else: 

757 self.keystone.tenants.delete(tenant_id) 

758 

759 return tenant_id 

760 

761 except ( 

762 ksExceptions.ConnectionError, 

763 ksExceptions.ClientException, 

764 ksExceptions.NotFound, 

765 ConnectionError, 

766 ) as e: 

767 self._format_exception(e) 

768 

769 def new_network( 

770 self, 

771 net_name, 

772 net_type, 

773 ip_profile=None, 

774 shared=False, 

775 provider_network_profile=None, 

776 ): 

777 """Adds a tenant network to VIM 

778 Params: 

779 'net_name': name of the network 

780 'net_type': one of: 

781 'bridge': overlay isolated network 

782 'data': underlay E-LAN network for Passthrough and SRIOV interfaces 

783 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces. 

784 'ip_profile': is a dict containing the IP parameters of the network 

785 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented) 

786 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y 

787 'gateway_address': (Optional) ip_schema, that is X.X.X.X 

788 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X] 

789 'dhcp_enabled': True or False 

790 'dhcp_start_address': ip_schema, first IP to grant 

791 'dhcp_count': number of IPs to grant. 

792 'shared': if this network can be seen/use by other tenants/organization 

793 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan, 

794 physical-network: physnet-label} 

795 Returns a tuple with the network identifier and created_items, or raises an exception on error 

796 created_items can be None or a dictionary where this method can include key-values that will be passed to 

797 the method delete_network. Can be used to store created segments, created l2gw connections, etc. 

798 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same 

799 as not present. 

800 """ 

801 self.logger.debug( 

802 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type 

803 ) 

804 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile)) 

805 

806 try: 

807 vlan = None 

808 

809 if provider_network_profile: 

810 vlan = provider_network_profile.get("segmentation-id") 

811 

812 new_net = None 

813 created_items = {} 

814 self._reload_connection() 

815 network_dict = {"name": net_name, "admin_state_up": True} 

816 

817 if net_type in ("data", "ptp") or provider_network_profile: 

818 provider_physical_network = None 

819 

820 if provider_network_profile and provider_network_profile.get( 

821 "physical-network" 

822 ): 

823 provider_physical_network = provider_network_profile.get( 

824 "physical-network" 

825 ) 

826 

827 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string 

828 # or not declared, just ignore the checking 

829 if ( 

830 isinstance( 

831 self.config.get("dataplane_physical_net"), (tuple, list) 

832 ) 

833 and provider_physical_network 

834 not in self.config["dataplane_physical_net"] 

835 ): 

836 raise vimconn.VimConnConflictException( 

837 "Invalid parameter 'provider-network:physical-network' " 

838 "for network creation. '{}' is not one of the declared " 

839 "list at VIM_config:dataplane_physical_net".format( 

840 provider_physical_network 

841 ) 

842 ) 

843 

844 # use the default dataplane_physical_net 

845 if not provider_physical_network: 

846 provider_physical_network = self.config.get( 

847 "dataplane_physical_net" 

848 ) 

849 

850 # if it is non-empty list, use the first value. If it is a string use the value directly 

851 if ( 

852 isinstance(provider_physical_network, (tuple, list)) 

853 and provider_physical_network 

854 ): 

855 provider_physical_network = provider_physical_network[0] 

856 

857 if not provider_physical_network: 

858 raise vimconn.VimConnConflictException( 

859 "missing information needed for underlay networks. Provide " 

860 "'dataplane_physical_net' configuration at VIM or use the NS " 

861 "instantiation parameter 'provider-network.physical-network'" 

862 " for the VLD" 

863 ) 

864 

865 if not self.config.get("multisegment_support"): 

866 network_dict["provider:physical_network"] = ( 

867 provider_physical_network 

868 ) 

869 

870 if ( 

871 provider_network_profile 

872 and "network-type" in provider_network_profile 

873 ): 

874 network_dict["provider:network_type"] = ( 

875 provider_network_profile["network-type"] 

876 ) 

877 else: 

878 network_dict["provider:network_type"] = self.config.get( 

879 "dataplane_network_type", "vlan" 

880 ) 

881 

882 if vlan: 

883 network_dict["provider:segmentation_id"] = vlan 

884 else: 

885 # Multi-segment case 

886 segment_list = [] 

887 segment1_dict = { 

888 "provider:physical_network": "", 

889 "provider:network_type": "vxlan", 

890 } 

891 segment_list.append(segment1_dict) 

892 segment2_dict = { 

893 "provider:physical_network": provider_physical_network, 

894 "provider:network_type": "vlan", 

895 } 

896 

897 if vlan: 

898 segment2_dict["provider:segmentation_id"] = vlan 

899 elif self.config.get("multisegment_vlan_range"): 

900 vlanID = self._generate_multisegment_vlanID() 

901 segment2_dict["provider:segmentation_id"] = vlanID 

902 

903 # else 

904 # raise vimconn.VimConnConflictException( 

905 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment 

906 # network") 

907 segment_list.append(segment2_dict) 

908 network_dict["segments"] = segment_list 

909 

910 # VIO Specific Changes. It needs a concrete VLAN 

911 if self.vim_type == "VIO" and vlan is None: 

912 if self.config.get("dataplane_net_vlan_range") is None: 

913 raise vimconn.VimConnConflictException( 

914 "You must provide 'dataplane_net_vlan_range' in format " 

915 "[start_ID - end_ID] at VIM_config for creating underlay " 

916 "networks" 

917 ) 

918 

919 network_dict["provider:segmentation_id"] = self._generate_vlanID() 

920 

921 network_dict["shared"] = shared 

922 

923 if self.config.get("disable_network_port_security"): 

924 network_dict["port_security_enabled"] = False 

925 

926 if self.config.get("neutron_availability_zone_hints"): 

927 hints = self.config.get("neutron_availability_zone_hints") 

928 

929 if isinstance(hints, str): 

930 hints = [hints] 

931 

932 network_dict["availability_zone_hints"] = hints 

933 

934 new_net = self.neutron.create_network({"network": network_dict}) 

935 # print new_net 

936 # create subnetwork, even if there is no profile 

937 

938 if not ip_profile: 

939 ip_profile = {} 

940 

941 if not ip_profile.get("subnet_address"): 

942 # Fake subnet is required 

943 subnet_rand = random.SystemRandom().randint(0, 255) 

944 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand) 

945 

946 if "ip_version" not in ip_profile: 

947 ip_profile["ip_version"] = "IPv4" 

948 

949 subnet = { 

950 "name": net_name + "-subnet", 

951 "network_id": new_net["network"]["id"], 

952 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6, 

953 "cidr": ip_profile["subnet_address"], 

954 } 

955 

956 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default 

957 if ip_profile.get("gateway_address"): 

958 subnet["gateway_ip"] = ip_profile["gateway_address"] 

959 else: 

960 subnet["gateway_ip"] = None 

961 

962 if ip_profile.get("dns_address"): 

963 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";") 

964 

965 if "dhcp_enabled" in ip_profile: 

966 subnet["enable_dhcp"] = ( 

967 False 

968 if ip_profile["dhcp_enabled"] == "false" 

969 or ip_profile["dhcp_enabled"] is False 

970 else True 

971 ) 

972 

973 if ip_profile.get("dhcp_start_address"): 

974 subnet["allocation_pools"] = [] 

975 subnet["allocation_pools"].append(dict()) 

976 subnet["allocation_pools"][0]["start"] = ip_profile[ 

977 "dhcp_start_address" 

978 ] 

979 

980 if ip_profile.get("dhcp_count"): 

981 # parts = ip_profile["dhcp_start_address"].split(".") 

982 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3]) 

983 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"])) 

984 ip_int += ip_profile["dhcp_count"] - 1 

985 ip_str = str(netaddr.IPAddress(ip_int)) 

986 subnet["allocation_pools"][0]["end"] = ip_str 

987 

988 if ( 

989 ip_profile.get("ipv6_address_mode") 

990 and ip_profile["ip_version"] != "IPv4" 

991 ): 

992 subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"] 

993 # ipv6_ra_mode can be set to the same value for most use cases, see documentation: 

994 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations 

995 subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"] 

996 

997 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) 

998 self.neutron.create_subnet({"subnet": subnet}) 

999 

1000 if net_type == "data" and self.config.get("multisegment_support"): 

1001 if self.config.get("l2gw_support"): 

1002 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ()) 

1003 for l2gw in l2gw_list: 

1004 l2gw_conn = { 

1005 "l2_gateway_id": l2gw["id"], 

1006 "network_id": new_net["network"]["id"], 

1007 "segmentation_id": str(vlanID), 

1008 } 

1009 new_l2gw_conn = self.neutron.create_l2_gateway_connection( 

1010 {"l2_gateway_connection": l2gw_conn} 

1011 ) 

1012 created_items[ 

1013 "l2gwconn:" 

1014 + str(new_l2gw_conn["l2_gateway_connection"]["id"]) 

1015 ] = True 

1016 

1017 return new_net["network"]["id"], created_items 

1018 except Exception as e: 

1019 # delete l2gw connections (if any) before deleting the network 

1020 for k, v in created_items.items(): 

1021 if not v: # skip already deleted 

1022 continue 

1023 

1024 try: 

1025 k_item, _, k_id = k.partition(":") 

1026 

1027 if k_item == "l2gwconn": 

1028 self.neutron.delete_l2_gateway_connection(k_id) 

1029 

1030 except (neExceptions.ConnectionFailed, ConnectionError) as e2: 

1031 self.logger.error( 

1032 "Error deleting l2 gateway connection: {}: {}".format( 

1033 type(e2).__name__, e2 

1034 ) 

1035 ) 

1036 self._format_exception(e2) 

1037 except Exception as e2: 

1038 self.logger.error( 

1039 "Error deleting l2 gateway connection: {}: {}".format( 

1040 type(e2).__name__, e2 

1041 ) 

1042 ) 

1043 

1044 if new_net: 

1045 self.neutron.delete_network(new_net["network"]["id"]) 

1046 

1047 self._format_exception(e) 

1048 

1049 def get_network_list(self, filter_dict={}): 

1050 """Obtain tenant networks of VIM 

1051 Filter_dict can be: 

1052 name: network name 

1053 id: network uuid 

1054 shared: boolean 

1055 tenant_id: tenant 

1056 admin_state_up: boolean 

1057 status: 'ACTIVE' 

1058 Returns the network list of dictionaries 

1059 """ 

1060 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) 

1061 try: 

1062 self._reload_connection() 

1063 filter_dict_os = filter_dict.copy() 

1064 

1065 if self.api_version3 and "tenant_id" in filter_dict_os: 

1066 # TODO check 

1067 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") 

1068 

1069 net_dict = self.neutron.list_networks(**filter_dict_os) 

1070 net_list = net_dict["networks"] 

1071 self.__net_os2mano(net_list) 

1072 

1073 return net_list 

1074 except ( 

1075 neExceptions.ConnectionFailed, 

1076 ksExceptions.ClientException, 

1077 neExceptions.NeutronException, 

1078 ConnectionError, 

1079 ) as e: 

1080 self._format_exception(e) 

1081 

1082 def get_network(self, net_id): 

1083 """Obtain details of network from VIM 

1084 Returns the network information from a network id""" 

1085 self.logger.debug(" Getting tenant network %s from VIM", net_id) 

1086 filter_dict = {"id": net_id} 

1087 net_list = self.get_network_list(filter_dict) 

1088 

1089 if len(net_list) == 0: 

1090 raise vimconn.VimConnNotFoundException( 

1091 "Network '{}' not found".format(net_id) 

1092 ) 

1093 elif len(net_list) > 1: 

1094 raise vimconn.VimConnConflictException( 

1095 "Found more than one network with this criteria" 

1096 ) 

1097 

1098 net = net_list[0] 

1099 subnets = [] 

1100 for subnet_id in net.get("subnets", ()): 

1101 try: 

1102 subnet = self.neutron.show_subnet(subnet_id) 

1103 except Exception as e: 

1104 self.logger.error( 

1105 "osconnector.get_network(): Error getting subnet %s %s" 

1106 % (net_id, str(e)) 

1107 ) 

1108 subnet = {"id": subnet_id, "fault": str(e)} 

1109 

1110 subnets.append(subnet) 

1111 

1112 net["subnets"] = subnets 

1113 net["encapsulation"] = net.get("provider:network_type") 

1114 net["encapsulation_type"] = net.get("provider:network_type") 

1115 net["segmentation_id"] = net.get("provider:segmentation_id") 

1116 net["encapsulation_id"] = net.get("provider:segmentation_id") 

1117 

1118 return net 

1119 

1120 @catch_any_exception 

1121 def delete_network(self, net_id, created_items=None): 

1122 """ 

1123 Removes a tenant network from VIM and its associated elements 

1124 :param net_id: VIM identifier of the network, provided by method new_network 

1125 :param created_items: dictionary with extra items to be deleted. provided by method new_network 

1126 Returns the network identifier or raises an exception upon error or when network is not found 

1127 """ 

1128 self.logger.debug("Deleting network '%s' from VIM", net_id) 

1129 

1130 if created_items is None: 

1131 created_items = {} 

1132 

1133 try: 

1134 self._reload_connection() 

1135 # delete l2gw connections (if any) before deleting the network 

1136 for k, v in created_items.items(): 

1137 if not v: # skip already deleted 

1138 continue 

1139 

1140 try: 

1141 k_item, _, k_id = k.partition(":") 

1142 if k_item == "l2gwconn": 

1143 self.neutron.delete_l2_gateway_connection(k_id) 

1144 

1145 except (neExceptions.ConnectionFailed, ConnectionError) as e: 

1146 self.logger.error( 

1147 "Error deleting l2 gateway connection: {}: {}".format( 

1148 type(e).__name__, e 

1149 ) 

1150 ) 

1151 self._format_exception(e) 

1152 except Exception as e: 

1153 self.logger.error( 

1154 "Error deleting l2 gateway connection: {}: {}".format( 

1155 type(e).__name__, e 

1156 ) 

1157 ) 

1158 

1159 # delete VM ports attached to this networks before the network 

1160 ports = self.neutron.list_ports(network_id=net_id) 

1161 for p in ports["ports"]: 

1162 try: 

1163 self.neutron.delete_port(p["id"]) 

1164 

1165 except (neExceptions.ConnectionFailed, ConnectionError) as e: 

1166 self.logger.error("Error deleting port %s: %s", p["id"], str(e)) 

1167 # If there is connection error, it raises. 

1168 self._format_exception(e) 

1169 except Exception as e: 

1170 self.logger.error("Error deleting port %s: %s", p["id"], str(e)) 

1171 

1172 self.neutron.delete_network(net_id) 

1173 

1174 return net_id 

1175 except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e: 

1176 # If network to be deleted is not found, it does not raise. 

1177 self.logger.warning( 

1178 f"Error deleting network: {net_id} is not found, {str(e)}" 

1179 ) 

1180 

1181 def refresh_nets_status(self, net_list): 

1182 """Get the status of the networks 

1183 Params: the list of network identifiers 

1184 Returns a dictionary with: 

1185 net_id: #VIM id of this network 

1186 status: #Mandatory. Text with one of: 

1187 # DELETED (not found at vim) 

1188 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 

1189 # OTHER (Vim reported other status not understood) 

1190 # ERROR (VIM indicates an ERROR status) 

1191 # ACTIVE, INACTIVE, DOWN (admin down), 

1192 # BUILD (on building process) 

1193 # 

1194 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR 

1195 vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 

1196 """ 

1197 net_dict = {} 

1198 

1199 for net_id in net_list: 

1200 net = {} 

1201 

1202 try: 

1203 net_vim = self.get_network(net_id) 

1204 

1205 if net_vim["status"] in netStatus2manoFormat: 

1206 net["status"] = netStatus2manoFormat[net_vim["status"]] 

1207 else: 

1208 net["status"] = "OTHER" 

1209 net["error_msg"] = "VIM status reported " + net_vim["status"] 

1210 

1211 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]: 

1212 net["status"] = "DOWN" 

1213 

1214 net["vim_info"] = self.serialize(net_vim) 

1215 

1216 if net_vim.get("fault"): # TODO 

1217 net["error_msg"] = str(net_vim["fault"]) 

1218 except vimconn.VimConnNotFoundException as e: 

1219 self.logger.error("Exception getting net status: %s", str(e)) 

1220 net["status"] = "DELETED" 

1221 net["error_msg"] = str(e) 

1222 except vimconn.VimConnException as e: 

1223 self.logger.error("Exception getting net status: %s", str(e)) 

1224 net["status"] = "VIM_ERROR" 

1225 net["error_msg"] = str(e) 

1226 net_dict[net_id] = net 

1227 return net_dict 

1228 

1229 def get_flavor(self, flavor_id): 

1230 """Obtain flavor details from the VIM. Returns the flavor dict details""" 

1231 self.logger.debug("Getting flavor '%s'", flavor_id) 

1232 try: 

1233 self._reload_connection() 

1234 flavor = self.nova.flavors.find(id=flavor_id) 

1235 return flavor.to_dict() 

1236 

1237 except ( 

1238 nvExceptions.NotFound, 

1239 nvExceptions.ClientException, 

1240 ksExceptions.ClientException, 

1241 ConnectionError, 

1242 ) as e: 

1243 self._format_exception(e) 

1244 

1245 def get_flavor_id_from_data(self, flavor_dict): 

1246 """Obtain flavor id that match the flavor description 

1247 Returns the flavor_id or raises a vimconnNotFoundException 

1248 flavor_dict: contains the required ram, vcpus, disk 

1249 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus 

1250 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a 

1251 vimconnNotFoundException is raised 

1252 """ 

1253 exact_match = False if self.config.get("use_existing_flavors") else True 

1254 

1255 try: 

1256 self._reload_connection() 

1257 flavor_candidate_id = None 

1258 flavor_candidate_data = (10000, 10000, 10000) 

1259 flavor_target = ( 

1260 flavor_dict["ram"], 

1261 flavor_dict["vcpus"], 

1262 flavor_dict["disk"], 

1263 flavor_dict.get("ephemeral", 0), 

1264 flavor_dict.get("swap", 0), 

1265 ) 

1266 # numa=None 

1267 extended = flavor_dict.get("extended", {}) 

1268 if extended: 

1269 # TODO 

1270 raise vimconn.VimConnNotFoundException( 

1271 "Flavor with EPA still not implemented" 

1272 ) 

1273 # if len(numas) > 1: 

1274 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa") 

1275 # numa=numas[0] 

1276 # numas = extended.get("numas") 

1277 for flavor in self.nova.flavors.list(): 

1278 epa = flavor.get_keys() 

1279 

1280 if epa: 

1281 continue 

1282 # TODO 

1283 

1284 flavor_data = ( 

1285 flavor.ram, 

1286 flavor.vcpus, 

1287 flavor.disk, 

1288 flavor.ephemeral, 

1289 flavor.swap if isinstance(flavor.swap, int) else 0, 

1290 ) 

1291 if flavor_data == flavor_target: 

1292 return flavor.id 

1293 elif ( 

1294 not exact_match 

1295 and flavor_target < flavor_data < flavor_candidate_data 

1296 ): 

1297 flavor_candidate_id = flavor.id 

1298 flavor_candidate_data = flavor_data 

1299 

1300 if not exact_match and flavor_candidate_id: 

1301 return flavor_candidate_id 

1302 

1303 raise vimconn.VimConnNotFoundException( 

1304 "Cannot find any flavor matching '{}'".format(flavor_dict) 

1305 ) 

1306 except ( 

1307 nvExceptions.NotFound, 

1308 nvExceptions.BadRequest, 

1309 nvExceptions.ClientException, 

1310 ksExceptions.ClientException, 

1311 ConnectionError, 

1312 ) as e: 

1313 self._format_exception(e) 

1314 

1315 @staticmethod 

1316 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None: 

1317 """Process resource quota and fill up extra_specs. 

1318 Args: 

1319 quota (dict): Keeping the quota of resurces 

1320 prefix (str) Prefix 

1321 extra_specs (dict) Dict to be filled to be used during flavor creation 

1322 

1323 """ 

1324 if "limit" in quota: 

1325 extra_specs["quota:" + prefix + "_limit"] = quota["limit"] 

1326 

1327 if "reserve" in quota: 

1328 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"] 

1329 

1330 if "shares" in quota: 

1331 extra_specs["quota:" + prefix + "_shares_level"] = "custom" 

1332 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"] 

1333 

1334 @staticmethod 

1335 def process_numa_memory( 

1336 numa: dict, node_id: Optional[int], extra_specs: dict 

1337 ) -> None: 

1338 """Set the memory in extra_specs. 

1339 Args: 

1340 numa (dict): A dictionary which includes numa information 

1341 node_id (int): ID of numa node 

1342 extra_specs (dict): To be filled. 

1343 

1344 """ 

1345 if not numa.get("memory"): 

1346 return 

1347 memory_mb = numa["memory"] * 1024 

1348 memory = "hw:numa_mem.{}".format(node_id) 

1349 extra_specs[memory] = int(memory_mb) 

1350 

1351 @staticmethod 

1352 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None: 

1353 """Set the cpu in extra_specs. 

1354 Args: 

1355 numa (dict): A dictionary which includes numa information 

1356 node_id (int): ID of numa node 

1357 extra_specs (dict): To be filled. 

1358 

1359 """ 

1360 if not numa.get("vcpu"): 

1361 return 

1362 vcpu = numa["vcpu"] 

1363 cpu = "hw:numa_cpus.{}".format(node_id) 

1364 vcpu = ",".join(map(str, vcpu)) 

1365 extra_specs[cpu] = vcpu 

1366 

1367 @staticmethod 

1368 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]: 

1369 """Fill up extra_specs if numa has paired-threads. 

1370 Args: 

1371 numa (dict): A dictionary which includes numa information 

1372 extra_specs (dict): To be filled. 

1373 

1374 Returns: 

1375 threads (int) Number of virtual cpus 

1376 

1377 """ 

1378 if not numa.get("paired-threads"): 

1379 return 

1380 

1381 # cpu_thread_policy "require" implies that compute node must have an STM architecture 

1382 threads = numa["paired-threads"] * 2 

1383 extra_specs["hw:cpu_thread_policy"] = "require" 

1384 extra_specs["hw:cpu_policy"] = "dedicated" 

1385 return threads 

1386 

1387 @staticmethod 

1388 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]: 

1389 """Fill up extra_specs if numa has cores. 

1390 Args: 

1391 numa (dict): A dictionary which includes numa information 

1392 extra_specs (dict): To be filled. 

1393 

1394 Returns: 

1395 cores (int) Number of virtual cpus 

1396 

1397 """ 

1398 # cpu_thread_policy "isolate" implies that the host must not have an SMT 

1399 # architecture, or a non-SMT architecture will be emulated 

1400 if not numa.get("cores"): 

1401 return 

1402 cores = numa["cores"] 

1403 extra_specs["hw:cpu_thread_policy"] = "isolate" 

1404 extra_specs["hw:cpu_policy"] = "dedicated" 

1405 return cores 

1406 

1407 @staticmethod 

1408 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]: 

1409 """Fill up extra_specs if numa has threads. 

1410 Args: 

1411 numa (dict): A dictionary which includes numa information 

1412 extra_specs (dict): To be filled. 

1413 

1414 Returns: 

1415 threads (int) Number of virtual cpus 

1416 

1417 """ 

1418 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture 

1419 if not numa.get("threads"): 

1420 return 

1421 threads = numa["threads"] 

1422 extra_specs["hw:cpu_thread_policy"] = "prefer" 

1423 extra_specs["hw:cpu_policy"] = "dedicated" 

1424 return threads 

1425 

1426 def _process_numa_parameters_of_flavor( 

1427 self, numas: List, extra_specs: Dict 

1428 ) -> None: 

1429 """Process numa parameters and fill up extra_specs. 

1430 

1431 Args: 

1432 numas (list): List of dictionary which includes numa information 

1433 extra_specs (dict): To be filled. 

1434 

1435 """ 

1436 numa_nodes = len(numas) 

1437 extra_specs["hw:numa_nodes"] = str(numa_nodes) 

1438 cpu_cores, cpu_threads = 0, 0 

1439 

1440 if self.vim_type == "VIO": 

1441 self.process_vio_numa_nodes(numa_nodes, extra_specs) 

1442 

1443 for numa in numas: 

1444 if "id" in numa: 

1445 node_id = numa["id"] 

1446 # overwrite ram and vcpus 

1447 # check if key "memory" is present in numa else use ram value at flavor 

1448 self.process_numa_memory(numa, node_id, extra_specs) 

1449 self.process_numa_vcpu(numa, node_id, extra_specs) 

1450 

1451 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html 

1452 extra_specs["hw:cpu_sockets"] = str(numa_nodes) 

1453 

1454 if "paired-threads" in numa: 

1455 threads = self.process_numa_paired_threads(numa, extra_specs) 

1456 cpu_threads += threads 

1457 

1458 elif "cores" in numa: 

1459 cores = self.process_numa_cores(numa, extra_specs) 

1460 cpu_cores += cores 

1461 

1462 elif "threads" in numa: 

1463 threads = self.process_numa_threads(numa, extra_specs) 

1464 cpu_threads += threads 

1465 

1466 if cpu_cores: 

1467 extra_specs["hw:cpu_cores"] = str(cpu_cores) 

1468 if cpu_threads: 

1469 extra_specs["hw:cpu_threads"] = str(cpu_threads) 

1470 

1471 @staticmethod 

1472 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None: 

1473 """According to number of numa nodes, updates the extra_specs for VIO. 

1474 

1475 Args: 

1476 

1477 numa_nodes (int): List keeps the numa node numbers 

1478 extra_specs (dict): Extra specs dict to be updated 

1479 

1480 """ 

1481 # If there are several numas, we do not define specific affinity. 

1482 extra_specs["vmware:latency_sensitivity_level"] = "high" 

1483 

1484 def _change_flavor_name( 

1485 self, name: str, name_suffix: int, flavor_data: dict 

1486 ) -> str: 

1487 """Change the flavor name if the name already exists. 

1488 

1489 Args: 

1490 name (str): Flavor name to be checked 

1491 name_suffix (int): Suffix to be appended to name 

1492 flavor_data (dict): Flavor dict 

1493 

1494 Returns: 

1495 name (str): New flavor name to be used 

1496 

1497 """ 

1498 # Get used names 

1499 fl = self.nova.flavors.list() 

1500 fl_names = [f.name for f in fl] 

1501 

1502 while name in fl_names: 

1503 name_suffix += 1 

1504 name = flavor_data["name"] + "-" + str(name_suffix) 

1505 

1506 return name 

1507 

1508 def _process_extended_config_of_flavor( 

1509 self, extended: dict, extra_specs: dict 

1510 ) -> None: 

1511 """Process the extended dict to fill up extra_specs. 

1512 Args: 

1513 

1514 extended (dict): Keeping the extra specification of flavor 

1515 extra_specs (dict) Dict to be filled to be used during flavor creation 

1516 

1517 """ 

1518 quotas = { 

1519 "cpu-quota": "cpu", 

1520 "mem-quota": "memory", 

1521 "vif-quota": "vif", 

1522 "disk-io-quota": "disk_io", 

1523 } 

1524 

1525 page_sizes = { 

1526 "LARGE": "large", 

1527 "SMALL": "small", 

1528 "SIZE_2MB": "2MB", 

1529 "SIZE_1GB": "1GB", 

1530 "PREFER_LARGE": "any", 

1531 } 

1532 

1533 policies = { 

1534 "cpu-pinning-policy": "hw:cpu_policy", 

1535 "cpu-thread-pinning-policy": "hw:cpu_thread_policy", 

1536 "mem-policy": "hw:numa_mempolicy", 

1537 } 

1538 

1539 numas = extended.get("numas") 

1540 if numas: 

1541 self._process_numa_parameters_of_flavor(numas, extra_specs) 

1542 

1543 for quota, item in quotas.items(): 

1544 if quota in extended.keys(): 

1545 self.process_resource_quota(extended.get(quota), item, extra_specs) 

1546 

1547 # Set the mempage size as specified in the descriptor 

1548 if extended.get("mempage-size"): 

1549 if extended["mempage-size"] in page_sizes.keys(): 

1550 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]] 

1551 else: 

1552 # Normally, validations in NBI should not allow to this condition. 

1553 self.logger.debug( 

1554 "Invalid mempage-size %s. Will be ignored", 

1555 extended.get("mempage-size"), 

1556 ) 

1557 

1558 for policy, hw_policy in policies.items(): 

1559 if extended.get(policy): 

1560 extra_specs[hw_policy] = extended[policy].lower() 

1561 

1562 @staticmethod 

1563 def _get_flavor_details(flavor_data: dict) -> Tuple: 

1564 """Returns the details of flavor 

1565 Args: 

1566 flavor_data (dict): Dictionary that includes required flavor details 

1567 

1568 Returns: 

1569 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor 

1570 

1571 """ 

1572 return ( 

1573 flavor_data.get("ram", 64), 

1574 flavor_data.get("vcpus", 1), 

1575 {}, 

1576 flavor_data.get("extended"), 

1577 ) 

1578 

1579 @catch_any_exception 

1580 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str: 

1581 """Adds a tenant flavor to openstack VIM. 

1582 if change_name_if_used is True, it will change name in case of conflict, 

1583 because it is not supported name repetition. 

1584 

1585 Args: 

1586 flavor_data (dict): Flavor details to be processed 

1587 change_name_if_used (bool): Change name in case of conflict 

1588 

1589 Returns: 

1590 flavor_id (str): flavor identifier 

1591 

1592 """ 

1593 self.logger.debug("Adding flavor '%s'", str(flavor_data)) 

1594 retry = 0 

1595 max_retries = 3 

1596 name_suffix = 0 

1597 name = flavor_data["name"] 

1598 while retry < max_retries: 

1599 retry += 1 

1600 try: 

1601 self._reload_connection() 

1602 

1603 if change_name_if_used: 

1604 name = self._change_flavor_name(name, name_suffix, flavor_data) 

1605 

1606 ram, vcpus, extra_specs, extended = self._get_flavor_details( 

1607 flavor_data 

1608 ) 

1609 if extended: 

1610 self._process_extended_config_of_flavor(extended, extra_specs) 

1611 

1612 # Create flavor 

1613 

1614 new_flavor = self.nova.flavors.create( 

1615 name=name, 

1616 ram=ram, 

1617 vcpus=vcpus, 

1618 disk=flavor_data.get("disk", 0), 

1619 ephemeral=flavor_data.get("ephemeral", 0), 

1620 swap=flavor_data.get("swap", 0), 

1621 is_public=flavor_data.get("is_public", True), 

1622 ) 

1623 

1624 # Add metadata 

1625 if extra_specs: 

1626 new_flavor.set_keys(extra_specs) 

1627 

1628 return new_flavor.id 

1629 

1630 except nvExceptions.Conflict as e: 

1631 if change_name_if_used and retry < max_retries: 

1632 continue 

1633 

1634 self._format_exception(e) 

1635 

1636 @catch_any_exception 

1637 def delete_flavor(self, flavor_id): 

1638 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id""" 

1639 try: 

1640 self._reload_connection() 

1641 self.nova.flavors.delete(flavor_id) 

1642 return flavor_id 

1643 

1644 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e: 

1645 # If flavor is not found, it does not raise. 

1646 self.logger.warning( 

1647 f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}" 

1648 ) 

1649 

1650 def new_image(self, image_dict): 

1651 """ 

1652 Adds a tenant image to VIM. imge_dict is a dictionary with: 

1653 name: name 

1654 disk_format: qcow2, vhd, vmdk, raw (by default), ... 

1655 location: path or URI 

1656 public: "yes" or "no" 

1657 metadata: metadata of the image 

1658 Returns the image_id 

1659 """ 

1660 retry = 0 

1661 max_retries = 3 

1662 

1663 while retry < max_retries: 

1664 retry += 1 

1665 try: 

1666 self._reload_connection() 

1667 

1668 # determine format http://docs.openstack.org/developer/glance/formats.html 

1669 if "disk_format" in image_dict: 

1670 disk_format = image_dict["disk_format"] 

1671 else: # autodiscover based on extension 

1672 if image_dict["location"].endswith(".qcow2"): 

1673 disk_format = "qcow2" 

1674 elif image_dict["location"].endswith(".vhd"): 

1675 disk_format = "vhd" 

1676 elif image_dict["location"].endswith(".vmdk"): 

1677 disk_format = "vmdk" 

1678 elif image_dict["location"].endswith(".vdi"): 

1679 disk_format = "vdi" 

1680 elif image_dict["location"].endswith(".iso"): 

1681 disk_format = "iso" 

1682 elif image_dict["location"].endswith(".aki"): 

1683 disk_format = "aki" 

1684 elif image_dict["location"].endswith(".ari"): 

1685 disk_format = "ari" 

1686 elif image_dict["location"].endswith(".ami"): 

1687 disk_format = "ami" 

1688 else: 

1689 disk_format = "raw" 

1690 

1691 self.logger.debug( 

1692 "new_image: '%s' loading from '%s'", 

1693 image_dict["name"], 

1694 image_dict["location"], 

1695 ) 

1696 if self.vim_type == "VIO": 

1697 container_format = "bare" 

1698 if "container_format" in image_dict: 

1699 container_format = image_dict["container_format"] 

1700 

1701 new_image = self.glance.images.create( 

1702 name=image_dict["name"], 

1703 container_format=container_format, 

1704 disk_format=disk_format, 

1705 ) 

1706 else: 

1707 new_image = self.glance.images.create(name=image_dict["name"]) 

1708 

1709 if image_dict["location"].startswith("http"): 

1710 # TODO there is not a method to direct download. It must be downloaded locally with requests 

1711 raise vimconn.VimConnNotImplemented("Cannot create image from URL") 

1712 else: # local path 

1713 with open(image_dict["location"]) as fimage: 

1714 self.glance.images.upload(new_image.id, fimage) 

1715 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public= 

1716 # image_dict.get("public","yes")=="yes", 

1717 # container_format="bare", data=fimage, disk_format=disk_format) 

1718 

1719 metadata_to_load = image_dict.get("metadata") 

1720 

1721 # TODO location is a reserved word for current openstack versions. fixed for VIO please check 

1722 # for openstack 

1723 if self.vim_type == "VIO": 

1724 metadata_to_load["upload_location"] = image_dict["location"] 

1725 else: 

1726 metadata_to_load["location"] = image_dict["location"] 

1727 

1728 self.glance.images.update(new_image.id, **metadata_to_load) 

1729 

1730 return new_image.id 

1731 except ( 

1732 HTTPException, 

1733 gl1Exceptions.HTTPException, 

1734 gl1Exceptions.CommunicationError, 

1735 ConnectionError, 

1736 ) as e: 

1737 if retry == max_retries: 

1738 continue 

1739 

1740 self._format_exception(e) 

1741 except IOError as e: # can not open the file 

1742 raise vimconn.VimConnConnectionException( 

1743 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]), 

1744 http_code=vimconn.HTTP_Bad_Request, 

1745 ) 

1746 except Exception as e: 

1747 self._format_exception(e) 

1748 

1749 @catch_any_exception 

1750 def delete_image(self, image_id): 

1751 """Deletes a tenant image from openstack VIM. Returns the old id""" 

1752 try: 

1753 self._reload_connection() 

1754 self.glance.images.delete(image_id) 

1755 

1756 return image_id 

1757 except gl1Exceptions.NotFound as e: 

1758 # If image is not found, it does not raise. 

1759 self.logger.warning( 

1760 f"Error deleting image: {image_id} is not found, {str(e)}" 

1761 ) 

1762 

1763 @catch_any_exception 

1764 def get_image_id_from_path(self, path): 

1765 """Get the image id from image path in the VIM database. Returns the image_id""" 

1766 self._reload_connection() 

1767 images = self.glance.images.list() 

1768 

1769 for image in images: 

1770 if image.metadata.get("location") == path: 

1771 return image.id 

1772 

1773 raise vimconn.VimConnNotFoundException( 

1774 "image with location '{}' not found".format(path) 

1775 ) 

1776 

1777 def get_image_list(self, filter_dict={}): 

1778 """Obtain tenant images from VIM 

1779 Filter_dict can be: 

1780 id: image id 

1781 name: image name 

1782 checksum: image checksum 

1783 Returns the image list of dictionaries: 

1784 [{<the fields at Filter_dict plus some VIM specific>}, ...] 

1785 List can be empty 

1786 """ 

1787 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict)) 

1788 try: 

1789 self._reload_connection() 

1790 # filter_dict_os = filter_dict.copy() 

1791 # First we filter by the available filter fields: name, id. The others are removed. 

1792 image_list = self.glance.images.list() 

1793 filtered_list = [] 

1794 

1795 for image in image_list: 

1796 try: 

1797 if filter_dict.get("name") and image["name"] != filter_dict["name"]: 

1798 continue 

1799 

1800 if filter_dict.get("id") and image["id"] != filter_dict["id"]: 

1801 continue 

1802 

1803 if ( 

1804 filter_dict.get("checksum") 

1805 and image["checksum"] != filter_dict["checksum"] 

1806 ): 

1807 continue 

1808 

1809 filtered_list.append(image.copy()) 

1810 except gl1Exceptions.HTTPNotFound: 

1811 pass 

1812 

1813 return filtered_list 

1814 

1815 except ( 

1816 ksExceptions.ClientException, 

1817 nvExceptions.ClientException, 

1818 gl1Exceptions.CommunicationError, 

1819 ConnectionError, 

1820 ) as e: 

1821 self._format_exception(e) 

1822 

1823 def __wait_for_vm(self, vm_id, status): 

1824 """wait until vm is in the desired status and return True. 

1825 If the VM gets in ERROR status, return false. 

1826 If the timeout is reached generate an exception""" 

1827 elapsed_time = 0 

1828 while elapsed_time < server_timeout: 

1829 vm_status = self.nova.servers.get(vm_id).status 

1830 

1831 if vm_status == status: 

1832 return True 

1833 

1834 if vm_status == "ERROR": 

1835 return False 

1836 

1837 time.sleep(5) 

1838 elapsed_time += 5 

1839 

1840 # if we exceeded the timeout rollback 

1841 if elapsed_time >= server_timeout: 

1842 raise vimconn.VimConnException( 

1843 "Timeout waiting for instance " + vm_id + " to get " + status, 

1844 http_code=vimconn.HTTP_Request_Timeout, 

1845 ) 

1846 

1847 def _get_openstack_availablity_zones(self): 

1848 """ 

1849 Get from openstack availability zones available 

1850 :return: 

1851 """ 

1852 try: 

1853 openstack_availability_zone = self.nova.availability_zones.list() 

1854 openstack_availability_zone = [ 

1855 str(zone.zoneName) 

1856 for zone in openstack_availability_zone 

1857 if zone.zoneName != "internal" 

1858 ] 

1859 

1860 return openstack_availability_zone 

1861 except Exception: 

1862 return None 

1863 

1864 def _set_availablity_zones(self): 

1865 """ 

1866 Set vim availablity zone 

1867 :return: 

1868 """ 

1869 if "availability_zone" in self.config: 

1870 vim_availability_zones = self.config.get("availability_zone") 

1871 

1872 if isinstance(vim_availability_zones, str): 

1873 self.availability_zone = [vim_availability_zones] 

1874 elif isinstance(vim_availability_zones, list): 

1875 self.availability_zone = vim_availability_zones 

1876 else: 

1877 self.availability_zone = self._get_openstack_availablity_zones() 

1878 if "storage_availability_zone" in self.config: 

1879 self.storage_availability_zone = self.config.get( 

1880 "storage_availability_zone" 

1881 ) 

1882 

1883 def _get_vm_availability_zone( 

1884 self, availability_zone_index, availability_zone_list 

1885 ): 

1886 """ 

1887 Return thge availability zone to be used by the created VM. 

1888 :return: The VIM availability zone to be used or None 

1889 """ 

1890 if availability_zone_index is None: 

1891 if not self.config.get("availability_zone"): 

1892 return None 

1893 elif isinstance(self.config.get("availability_zone"), str): 

1894 return self.config["availability_zone"] 

1895 else: 

1896 # TODO consider using a different parameter at config for default AV and AV list match 

1897 return self.config["availability_zone"][0] 

1898 

1899 vim_availability_zones = self.availability_zone 

1900 # check if VIM offer enough availability zones describe in the VNFD 

1901 if vim_availability_zones and len(availability_zone_list) <= len( 

1902 vim_availability_zones 

1903 ): 

1904 # check if all the names of NFV AV match VIM AV names 

1905 match_by_index = False 

1906 for av in availability_zone_list: 

1907 if av not in vim_availability_zones: 

1908 match_by_index = True 

1909 break 

1910 

1911 if match_by_index: 

1912 return vim_availability_zones[availability_zone_index] 

1913 else: 

1914 return availability_zone_list[availability_zone_index] 

1915 else: 

1916 raise vimconn.VimConnConflictException( 

1917 "No enough availability zones at VIM for this deployment" 

1918 ) 

1919 

1920 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None: 

1921 """Fill up the security_groups in the port_dict. 

1922 

1923 Args: 

1924 net (dict): Network details 

1925 port_dict (dict): Port details 

1926 

1927 """ 

1928 if ( 

1929 self.config.get("security_groups") 

1930 and net.get("port_security") is not False 

1931 and not self.config.get("no_port_security_extension") 

1932 ): 

1933 if not self.security_groups_id: 

1934 self._get_ids_from_name() 

1935 

1936 port_dict["security_groups"] = self.security_groups_id 

1937 

1938 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None: 

1939 """Fill up the network binding depending on network type in the port_dict. 

1940 

1941 Args: 

1942 net (dict): Network details 

1943 port_dict (dict): Port details 

1944 

1945 """ 

1946 if not net.get("type"): 

1947 raise vimconn.VimConnException("Type is missing in the network details.") 

1948 

1949 if net["type"] == "virtual": 

1950 pass 

1951 

1952 # For VF 

1953 elif net["type"] == "VF" or net["type"] == "SR-IOV": 

1954 port_dict["binding:vnic_type"] = "direct" 

1955 

1956 # VIO specific Changes 

1957 if self.vim_type == "VIO": 

1958 # Need to create port with port_security_enabled = False and no-security-groups 

1959 port_dict["port_security_enabled"] = False 

1960 port_dict["provider_security_groups"] = [] 

1961 port_dict["security_groups"] = [] 

1962 

1963 else: 

1964 # For PT PCI-PASSTHROUGH 

1965 port_dict["binding:vnic_type"] = "direct-physical" 

1966 

1967 @staticmethod 

1968 def _set_fixed_ip(new_port: dict, net: dict) -> None: 

1969 """Set the "ip" parameter in net dictionary. 

1970 

1971 Args: 

1972 new_port (dict): New created port 

1973 net (dict): Network details 

1974 

1975 """ 

1976 fixed_ips = new_port["port"].get("fixed_ips") 

1977 

1978 if fixed_ips: 

1979 net["ip"] = fixed_ips[0].get("ip_address") 

1980 else: 

1981 net["ip"] = None 

1982 

1983 @staticmethod 

1984 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None: 

1985 """Fill up the mac_address and fixed_ips in port_dict. 

1986 

1987 Args: 

1988 net (dict): Network details 

1989 port_dict (dict): Port details 

1990 

1991 """ 

1992 if net.get("mac_address"): 

1993 port_dict["mac_address"] = net["mac_address"] 

1994 

1995 ip_dual_list = [] 

1996 if ip_list := net.get("ip_address"): 

1997 if not isinstance(ip_list, list): 

1998 ip_list = [ip_list] 

1999 for ip in ip_list: 

2000 ip_dict = {"ip_address": ip} 

2001 ip_dual_list.append(ip_dict) 

2002 port_dict["fixed_ips"] = ip_dual_list 

2003 # TODO add "subnet_id": <subnet_id> 

2004 

2005 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict: 

2006 """Create new port using neutron. 

2007 

2008 Args: 

2009 port_dict (dict): Port details 

2010 created_items (dict): All created items 

2011 net (dict): Network details 

2012 

2013 Returns: 

2014 new_port (dict): New created port 

2015 

2016 """ 

2017 new_port = self.neutron.create_port({"port": port_dict}) 

2018 created_items["port:" + str(new_port["port"]["id"])] = True 

2019 net["mac_address"] = new_port["port"]["mac_address"] 

2020 net["vim_id"] = new_port["port"]["id"] 

2021 

2022 return new_port 

2023 

2024 def _create_port( 

2025 self, net: dict, name: str, created_items: dict 

2026 ) -> Tuple[dict, dict]: 

2027 """Create port using net details. 

2028 

2029 Args: 

2030 net (dict): Network details 

2031 name (str): Name to be used as network name if net dict does not include name 

2032 created_items (dict): All created items 

2033 

2034 Returns: 

2035 new_port, port New created port, port dictionary 

2036 

2037 """ 

2038 

2039 port_dict = { 

2040 "network_id": net["net_id"], 

2041 "name": net.get("name"), 

2042 "admin_state_up": True, 

2043 } 

2044 

2045 if not port_dict["name"]: 

2046 port_dict["name"] = name 

2047 

2048 self._prepare_port_dict_security_groups(net, port_dict) 

2049 

2050 self._prepare_port_dict_binding(net, port_dict) 

2051 

2052 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict) 

2053 

2054 new_port = self._create_new_port(port_dict, created_items, net) 

2055 

2056 vimconnector._set_fixed_ip(new_port, net) 

2057 

2058 port = {"port-id": new_port["port"]["id"]} 

2059 

2060 if float(self.nova.api_version.get_string()) >= 2.32: 

2061 port["tag"] = new_port["port"]["name"] 

2062 

2063 return new_port, port 

2064 

2065 def _prepare_network_for_vminstance( 

2066 self, 

2067 name: str, 

2068 net_list: list, 

2069 created_items: dict, 

2070 net_list_vim: list, 

2071 external_network: list, 

2072 no_secured_ports: list, 

2073 ) -> None: 

2074 """Create port and fill up net dictionary for new VM instance creation. 

2075 

2076 Args: 

2077 name (str): Name of network 

2078 net_list (list): List of networks 

2079 created_items (dict): All created items belongs to a VM 

2080 net_list_vim (list): List of ports 

2081 external_network (list): List of external-networks 

2082 no_secured_ports (list): Port security disabled ports 

2083 """ 

2084 

2085 self._reload_connection() 

2086 

2087 for net in net_list: 

2088 # Skip non-connected iface 

2089 if not net.get("net_id"): 

2090 continue 

2091 

2092 new_port, port = self._create_port(net, name, created_items) 

2093 

2094 net_list_vim.append(port) 

2095 

2096 if net.get("floating_ip", False): 

2097 net["exit_on_floating_ip_error"] = True 

2098 external_network.append(net) 

2099 

2100 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"): 

2101 net["exit_on_floating_ip_error"] = False 

2102 external_network.append(net) 

2103 net["floating_ip"] = self.config.get("use_floating_ip") 

2104 

2105 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic 

2106 # is dropped. As a workaround we wait until the VM is active and then disable the port-security 

2107 if net.get("port_security") is False and not self.config.get( 

2108 "no_port_security_extension" 

2109 ): 

2110 no_secured_ports.append( 

2111 ( 

2112 new_port["port"]["id"], 

2113 net.get("port_security_disable_strategy"), 

2114 ) 

2115 ) 

2116 

2117 def _prepare_persistent_root_volumes( 

2118 self, 

2119 name: str, 

2120 storage_av_zone: list, 

2121 disk: dict, 

2122 base_disk_index: int, 

2123 block_device_mapping: dict, 

2124 existing_vim_volumes: list, 

2125 created_items: dict, 

2126 ) -> Optional[str]: 

2127 """Prepare persistent root volumes for new VM instance. 

2128 

2129 Args: 

2130 name (str): Name of VM instance 

2131 storage_av_zone (list): Storage of availability zones 

2132 disk (dict): Disk details 

2133 base_disk_index (int): Disk index 

2134 block_device_mapping (dict): Block device details 

2135 existing_vim_volumes (list): Existing disk details 

2136 created_items (dict): All created items belongs to VM 

2137 

2138 Returns: 

2139 boot_volume_id (str): ID of boot volume 

2140 

2141 """ 

2142 # Disk may include only vim_volume_id or only vim_id." 

2143 # Use existing persistent root volume finding with volume_id or vim_id 

2144 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id" 

2145 if disk.get(key_id): 

2146 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id] 

2147 existing_vim_volumes.append({"id": disk[key_id]}) 

2148 else: 

2149 # Create persistent root volume 

2150 volume = self.cinder.volumes.create( 

2151 size=disk["size"], 

2152 name=name + "vd" + chr(base_disk_index), 

2153 imageRef=disk["image_id"], 

2154 # Make sure volume is in the same AZ as the VM to be attached to 

2155 availability_zone=storage_av_zone, 

2156 ) 

2157 boot_volume_id = volume.id 

2158 self.update_block_device_mapping( 

2159 volume=volume, 

2160 block_device_mapping=block_device_mapping, 

2161 base_disk_index=base_disk_index, 

2162 disk=disk, 

2163 created_items=created_items, 

2164 ) 

2165 

2166 return boot_volume_id 

2167 

2168 @staticmethod 

2169 def update_block_device_mapping( 

2170 volume: object, 

2171 block_device_mapping: dict, 

2172 base_disk_index: int, 

2173 disk: dict, 

2174 created_items: dict, 

2175 ) -> None: 

2176 """Add volume information to block device mapping dict. 

2177 Args: 

2178 volume (object): Created volume object 

2179 block_device_mapping (dict): Block device details 

2180 base_disk_index (int): Disk index 

2181 disk (dict): Disk details 

2182 created_items (dict): All created items belongs to VM 

2183 """ 

2184 if not volume: 

2185 raise vimconn.VimConnException("Volume is empty.") 

2186 

2187 if not hasattr(volume, "id"): 

2188 raise vimconn.VimConnException( 

2189 "Created volume is not valid, does not have id attribute." 

2190 ) 

2191 

2192 block_device_mapping["vd" + chr(base_disk_index)] = volume.id 

2193 if disk.get("multiattach"): # multiattach volumes do not belong to VDUs 

2194 return 

2195 volume_txt = "volume:" + str(volume.id) 

2196 if disk.get("keep"): 

2197 volume_txt += ":keep" 

2198 created_items[volume_txt] = True 

2199 

2200 @catch_any_exception 

2201 def new_shared_volumes(self, shared_volume_data) -> (str, str): 

2202 availability_zone = ( 

2203 self.storage_availability_zone 

2204 if self.storage_availability_zone 

2205 else self.vm_av_zone 

2206 ) 

2207 volume = self.cinder.volumes.create( 

2208 size=shared_volume_data["size"], 

2209 name=shared_volume_data["name"], 

2210 volume_type="multiattach", 

2211 availability_zone=availability_zone, 

2212 ) 

2213 return volume.name, volume.id 

2214 

2215 def _prepare_shared_volumes( 

2216 self, 

2217 name: str, 

2218 disk: dict, 

2219 base_disk_index: int, 

2220 block_device_mapping: dict, 

2221 existing_vim_volumes: list, 

2222 created_items: dict, 

2223 ): 

2224 volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()} 

2225 if volumes.get(disk["name"]): 

2226 sv_id = volumes[disk["name"]] 

2227 max_retries = 3 

2228 vol_status = "" 

2229 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time 

2230 while max_retries: 

2231 max_retries -= 1 

2232 volume = self.cinder.volumes.get(sv_id) 

2233 vol_status = volume.status 

2234 if volume.status not in ("in-use", "available"): 

2235 time.sleep(5) 

2236 continue 

2237 self.update_block_device_mapping( 

2238 volume=volume, 

2239 block_device_mapping=block_device_mapping, 

2240 base_disk_index=base_disk_index, 

2241 disk=disk, 

2242 created_items=created_items, 

2243 ) 

2244 return 

2245 raise vimconn.VimConnException( 

2246 "Shared volume is not prepared, status is: {}".format(vol_status), 

2247 http_code=vimconn.HTTP_Internal_Server_Error, 

2248 ) 

2249 

2250 def _prepare_non_root_persistent_volumes( 

2251 self, 

2252 name: str, 

2253 disk: dict, 

2254 storage_av_zone: list, 

2255 block_device_mapping: dict, 

2256 base_disk_index: int, 

2257 existing_vim_volumes: list, 

2258 created_items: dict, 

2259 ) -> None: 

2260 """Prepare persistent volumes for new VM instance. 

2261 

2262 Args: 

2263 name (str): Name of VM instance 

2264 disk (dict): Disk details 

2265 storage_av_zone (list): Storage of availability zones 

2266 block_device_mapping (dict): Block device details 

2267 base_disk_index (int): Disk index 

2268 existing_vim_volumes (list): Existing disk details 

2269 created_items (dict): All created items belongs to VM 

2270 """ 

2271 # Non-root persistent volumes 

2272 # Disk may include only vim_volume_id or only vim_id." 

2273 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id" 

2274 if disk.get(key_id): 

2275 # Use existing persistent volume 

2276 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id] 

2277 existing_vim_volumes.append({"id": disk[key_id]}) 

2278 else: 

2279 volume_name = f"{name}vd{chr(base_disk_index)}" 

2280 volume = self.cinder.volumes.create( 

2281 size=disk["size"], 

2282 name=volume_name, 

2283 # Make sure volume is in the same AZ as the VM to be attached to 

2284 availability_zone=storage_av_zone, 

2285 ) 

2286 self.update_block_device_mapping( 

2287 volume=volume, 

2288 block_device_mapping=block_device_mapping, 

2289 base_disk_index=base_disk_index, 

2290 disk=disk, 

2291 created_items=created_items, 

2292 ) 

2293 

2294 def _wait_for_created_volumes_availability( 

2295 self, elapsed_time: int, created_items: dict 

2296 ) -> Optional[int]: 

2297 """Wait till created volumes become available. 

2298 

2299 Args: 

2300 elapsed_time (int): Passed time while waiting 

2301 created_items (dict): All created items belongs to VM 

2302 

2303 Returns: 

2304 elapsed_time (int): Time spent while waiting 

2305 

2306 """ 

2307 while elapsed_time < volume_timeout: 

2308 for created_item in created_items: 

2309 v, volume_id = ( 

2310 created_item.split(":")[0], 

2311 created_item.split(":")[1], 

2312 ) 

2313 if v == "volume": 

2314 volume = self.cinder.volumes.get(volume_id) 

2315 if ( 

2316 volume.volume_type == "multiattach" 

2317 and volume.status == "in-use" 

2318 ): 

2319 return elapsed_time 

2320 elif volume.status != "available": 

2321 break 

2322 else: 

2323 # All ready: break from while 

2324 break 

2325 

2326 time.sleep(5) 

2327 elapsed_time += 5 

2328 

2329 return elapsed_time 

2330 

2331 def _wait_for_existing_volumes_availability( 

2332 self, elapsed_time: int, existing_vim_volumes: list 

2333 ) -> Optional[int]: 

2334 """Wait till existing volumes become available. 

2335 

2336 Args: 

2337 elapsed_time (int): Passed time while waiting 

2338 existing_vim_volumes (list): Existing volume details 

2339 

2340 Returns: 

2341 elapsed_time (int): Time spent while waiting 

2342 

2343 """ 

2344 

2345 while elapsed_time < volume_timeout: 

2346 for volume in existing_vim_volumes: 

2347 v = self.cinder.volumes.get(volume["id"]) 

2348 if v.volume_type == "multiattach" and v.status == "in-use": 

2349 return elapsed_time 

2350 elif v.status != "available": 

2351 break 

2352 else: # all ready: break from while 

2353 break 

2354 

2355 time.sleep(5) 

2356 elapsed_time += 5 

2357 

2358 return elapsed_time 

2359 

2360 def _prepare_disk_for_vminstance( 

2361 self, 

2362 name: str, 

2363 existing_vim_volumes: list, 

2364 created_items: dict, 

2365 storage_av_zone: list, 

2366 block_device_mapping: dict, 

2367 disk_list: list = None, 

2368 ) -> None: 

2369 """Prepare all volumes for new VM instance. 

2370 

2371 Args: 

2372 name (str): Name of Instance 

2373 existing_vim_volumes (list): List of existing volumes 

2374 created_items (dict): All created items belongs to VM 

2375 storage_av_zone (list): Storage availability zone 

2376 block_device_mapping (dict): Block devices to be attached to VM 

2377 disk_list (list): List of disks 

2378 

2379 """ 

2380 # Create additional volumes in case these are present in disk_list 

2381 base_disk_index = ord("b") 

2382 boot_volume_id = None 

2383 elapsed_time = 0 

2384 for disk in disk_list: 

2385 if "image_id" in disk: 

2386 # Root persistent volume 

2387 base_disk_index = ord("a") 

2388 boot_volume_id = self._prepare_persistent_root_volumes( 

2389 name=name, 

2390 storage_av_zone=storage_av_zone, 

2391 disk=disk, 

2392 base_disk_index=base_disk_index, 

2393 block_device_mapping=block_device_mapping, 

2394 existing_vim_volumes=existing_vim_volumes, 

2395 created_items=created_items, 

2396 ) 

2397 elif disk.get("multiattach"): 

2398 self._prepare_shared_volumes( 

2399 name=name, 

2400 disk=disk, 

2401 base_disk_index=base_disk_index, 

2402 block_device_mapping=block_device_mapping, 

2403 existing_vim_volumes=existing_vim_volumes, 

2404 created_items=created_items, 

2405 ) 

2406 else: 

2407 # Non-root persistent volume 

2408 self._prepare_non_root_persistent_volumes( 

2409 name=name, 

2410 disk=disk, 

2411 storage_av_zone=storage_av_zone, 

2412 block_device_mapping=block_device_mapping, 

2413 base_disk_index=base_disk_index, 

2414 existing_vim_volumes=existing_vim_volumes, 

2415 created_items=created_items, 

2416 ) 

2417 base_disk_index += 1 

2418 

2419 # Wait until created volumes are with status available 

2420 elapsed_time = self._wait_for_created_volumes_availability( 

2421 elapsed_time, created_items 

2422 ) 

2423 # Wait until existing volumes in vim are with status available 

2424 elapsed_time = self._wait_for_existing_volumes_availability( 

2425 elapsed_time, existing_vim_volumes 

2426 ) 

2427 # If we exceeded the timeout rollback 

2428 if elapsed_time >= volume_timeout: 

2429 raise vimconn.VimConnException( 

2430 "Timeout creating volumes for instance " + name, 

2431 http_code=vimconn.HTTP_Request_Timeout, 

2432 ) 

2433 if boot_volume_id: 

2434 self.cinder.volumes.set_bootable(boot_volume_id, True) 

2435 

2436 def _find_the_external_network_for_floating_ip(self): 

2437 """Get the external network ip in order to create floating IP. 

2438 

2439 Returns: 

2440 pool_id (str): External network pool ID 

2441 

2442 """ 

2443 

2444 # Find the external network 

2445 external_nets = list() 

2446 

2447 for net in self.neutron.list_networks()["networks"]: 

2448 if net["router:external"]: 

2449 external_nets.append(net) 

2450 

2451 if len(external_nets) == 0: 

2452 raise vimconn.VimConnException( 

2453 "Cannot create floating_ip automatically since " 

2454 "no external network is present", 

2455 http_code=vimconn.HTTP_Conflict, 

2456 ) 

2457 

2458 if len(external_nets) > 1: 

2459 raise vimconn.VimConnException( 

2460 "Cannot create floating_ip automatically since " 

2461 "multiple external networks are present", 

2462 http_code=vimconn.HTTP_Conflict, 

2463 ) 

2464 

2465 # Pool ID 

2466 return external_nets[0].get("id") 

2467 

2468 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None: 

2469 """Trigger neutron to create a new floating IP using external network ID. 

2470 

2471 Args: 

2472 param (dict): Input parameters to create a floating IP 

2473 created_items (dict): All created items belongs to new VM instance 

2474 

2475 Raises: 

2476 

2477 VimConnException 

2478 """ 

2479 try: 

2480 self.logger.debug("Creating floating IP") 

2481 new_floating_ip = self.neutron.create_floatingip(param) 

2482 free_floating_ip = new_floating_ip["floatingip"]["id"] 

2483 created_items["floating_ip:" + str(free_floating_ip)] = True 

2484 

2485 except Exception as e: 

2486 raise vimconn.VimConnException( 

2487 type(e).__name__ + ": Cannot create new floating_ip " + str(e), 

2488 http_code=vimconn.HTTP_Conflict, 

2489 ) 

2490 

2491 def _create_floating_ip( 

2492 self, floating_network: dict, server: object, created_items: dict 

2493 ) -> None: 

2494 """Get the available Pool ID and create a new floating IP. 

2495 

2496 Args: 

2497 floating_network (dict): Dict including external network ID 

2498 server (object): Server object 

2499 created_items (dict): All created items belongs to new VM instance 

2500 

2501 """ 

2502 

2503 # Pool_id is available 

2504 if ( 

2505 isinstance(floating_network["floating_ip"], str) 

2506 and floating_network["floating_ip"].lower() != "true" 

2507 ): 

2508 pool_id = floating_network["floating_ip"] 

2509 

2510 # Find the Pool_id 

2511 else: 

2512 pool_id = self._find_the_external_network_for_floating_ip() 

2513 

2514 param = { 

2515 "floatingip": { 

2516 "floating_network_id": pool_id, 

2517 "tenant_id": server.tenant_id, 

2518 } 

2519 } 

2520 

2521 self._neutron_create_float_ip(param, created_items) 

2522 

2523 def _find_floating_ip( 

2524 self, 

2525 server: object, 

2526 floating_ips: list, 

2527 floating_network: dict, 

2528 ) -> Optional[str]: 

2529 """Find the available free floating IPs if there are. 

2530 

2531 Args: 

2532 server (object): Server object 

2533 floating_ips (list): List of floating IPs 

2534 floating_network (dict): Details of floating network such as ID 

2535 

2536 Returns: 

2537 free_floating_ip (str): Free floating ip address 

2538 

2539 """ 

2540 for fip in floating_ips: 

2541 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id: 

2542 continue 

2543 

2544 if isinstance(floating_network["floating_ip"], str): 

2545 if fip.get("floating_network_id") != floating_network["floating_ip"]: 

2546 continue 

2547 

2548 return fip["id"] 

2549 

2550 def _assign_floating_ip( 

2551 self, free_floating_ip: str, floating_network: dict 

2552 ) -> Dict: 

2553 """Assign the free floating ip address to port. 

2554 

2555 Args: 

2556 free_floating_ip (str): Floating IP to be assigned 

2557 floating_network (dict): ID of floating network 

2558 

2559 Returns: 

2560 fip (dict) (dict): Floating ip details 

2561 

2562 """ 

2563 # The vim_id key contains the neutron.port_id 

2564 self.neutron.update_floatingip( 

2565 free_floating_ip, 

2566 {"floatingip": {"port_id": floating_network["vim_id"]}}, 

2567 ) 

2568 # For race condition ensure not re-assigned to other VM after 5 seconds 

2569 time.sleep(5) 

2570 

2571 return self.neutron.show_floatingip(free_floating_ip) 

2572 

2573 def _get_free_floating_ip( 

2574 self, server: object, floating_network: dict 

2575 ) -> Optional[str]: 

2576 """Get the free floating IP address. 

2577 

2578 Args: 

2579 server (object): Server Object 

2580 floating_network (dict): Floating network details 

2581 

2582 Returns: 

2583 free_floating_ip (str): Free floating ip addr 

2584 

2585 """ 

2586 

2587 floating_ips = self.neutron.list_floatingips().get("floatingips", ()) 

2588 

2589 # Randomize 

2590 random.shuffle(floating_ips) 

2591 

2592 return self._find_floating_ip(server, floating_ips, floating_network) 

2593 

2594 def _prepare_external_network_for_vminstance( 

2595 self, 

2596 external_network: list, 

2597 server: object, 

2598 created_items: dict, 

2599 vm_start_time: float, 

2600 ) -> None: 

2601 """Assign floating IP address for VM instance. 

2602 

2603 Args: 

2604 external_network (list): ID of External network 

2605 server (object): Server Object 

2606 created_items (dict): All created items belongs to new VM instance 

2607 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC 

2608 

2609 Raises: 

2610 VimConnException 

2611 

2612 """ 

2613 for floating_network in external_network: 

2614 try: 

2615 assigned = False 

2616 floating_ip_retries = 3 

2617 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry 

2618 # several times 

2619 while not assigned: 

2620 free_floating_ip = self._get_free_floating_ip( 

2621 server, floating_network 

2622 ) 

2623 

2624 if not free_floating_ip: 

2625 self._create_floating_ip( 

2626 floating_network, server, created_items 

2627 ) 

2628 

2629 try: 

2630 # For race condition ensure not already assigned 

2631 fip = self.neutron.show_floatingip(free_floating_ip) 

2632 

2633 if fip["floatingip"].get("port_id"): 

2634 continue 

2635 

2636 # Assign floating ip 

2637 fip = self._assign_floating_ip( 

2638 free_floating_ip, floating_network 

2639 ) 

2640 

2641 if fip["floatingip"]["port_id"] != floating_network["vim_id"]: 

2642 self.logger.warning( 

2643 "floating_ip {} re-assigned to other port".format( 

2644 free_floating_ip 

2645 ) 

2646 ) 

2647 continue 

2648 

2649 self.logger.debug( 

2650 "Assigned floating_ip {} to VM {}".format( 

2651 free_floating_ip, server.id 

2652 ) 

2653 ) 

2654 

2655 assigned = True 

2656 

2657 except Exception as e: 

2658 # Openstack need some time after VM creation to assign an IP. So retry if fails 

2659 vm_status = self.nova.servers.get(server.id).status 

2660 

2661 if vm_status not in ("ACTIVE", "ERROR"): 

2662 if time.time() - vm_start_time < server_timeout: 

2663 time.sleep(5) 

2664 continue 

2665 elif floating_ip_retries > 0: 

2666 floating_ip_retries -= 1 

2667 continue 

2668 

2669 raise vimconn.VimConnException( 

2670 "Cannot create floating_ip: {} {}".format( 

2671 type(e).__name__, e 

2672 ), 

2673 http_code=vimconn.HTTP_Conflict, 

2674 ) 

2675 

2676 except Exception as e: 

2677 if not floating_network["exit_on_floating_ip_error"]: 

2678 self.logger.error("Cannot create floating_ip. %s", str(e)) 

2679 continue 

2680 

2681 raise 

2682 

2683 def _update_port_security_for_vminstance( 

2684 self, 

2685 no_secured_ports: list, 

2686 server: object, 

2687 ) -> None: 

2688 """Updates the port security according to no_secured_ports list. 

2689 

2690 Args: 

2691 no_secured_ports (list): List of ports that security will be disabled 

2692 server (object): Server Object 

2693 

2694 Raises: 

2695 VimConnException 

2696 

2697 """ 

2698 # Wait until the VM is active and then disable the port-security 

2699 if no_secured_ports: 

2700 self.__wait_for_vm(server.id, "ACTIVE") 

2701 

2702 for port in no_secured_ports: 

2703 port_update = { 

2704 "port": {"port_security_enabled": False, "security_groups": None} 

2705 } 

2706 

2707 if port[1] == "allow-address-pairs": 

2708 port_update = { 

2709 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]} 

2710 } 

2711 

2712 try: 

2713 self.neutron.update_port(port[0], port_update) 

2714 

2715 except Exception: 

2716 raise vimconn.VimConnException( 

2717 "It was not possible to disable port security for port {}".format( 

2718 port[0] 

2719 ) 

2720 ) 

2721 

2722 def new_vminstance( 

2723 self, 

2724 name: str, 

2725 description: str, 

2726 start: bool, 

2727 image_id: str, 

2728 flavor_id: str, 

2729 affinity_group_list: list, 

2730 net_list: list, 

2731 cloud_config=None, 

2732 disk_list=None, 

2733 availability_zone_index=None, 

2734 availability_zone_list=None, 

2735 ) -> tuple: 

2736 """Adds a VM instance to VIM. 

2737 

2738 Args: 

2739 name (str): name of VM 

2740 description (str): description 

2741 start (bool): indicates if VM must start or boot in pause mode. Ignored 

2742 image_id (str) image uuid 

2743 flavor_id (str) flavor uuid 

2744 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty. 

2745 net_list (list): list of interfaces, each one is a dictionary with: 

2746 name: name of network 

2747 net_id: network uuid to connect 

2748 vpci: virtual vcpi to assign, ignored because openstack lack #TODO 

2749 model: interface model, ignored #TODO 

2750 mac_address: used for SR-IOV ifaces #TODO for other types 

2751 use: 'data', 'bridge', 'mgmt' 

2752 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared' 

2753 vim_id: filled/added by this function 

2754 floating_ip: True/False (or it can be None) 

2755 port_security: True/False 

2756 cloud_config (dict): (optional) dictionary with: 

2757 key-pairs: (optional) list of strings with the public key to be inserted to the default user 

2758 users: (optional) list of users to be inserted, each item is a dict with: 

2759 name: (mandatory) user name, 

2760 key-pairs: (optional) list of strings with the public key to be inserted to the user 

2761 user-data: (optional) string is a text script to be passed directly to cloud-init 

2762 config-files: (optional). List of files to be transferred. Each item is a dict with: 

2763 dest: (mandatory) string with the destination absolute path 

2764 encoding: (optional, by default text). Can be one of: 

2765 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' 

2766 content : (mandatory) string with the content of the file 

2767 permissions: (optional) string with file permissions, typically octal notation '0644' 

2768 owner: (optional) file owner, string with the format 'owner:group' 

2769 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk) 

2770 disk_list: (optional) list with additional disks to the VM. Each item is a dict with: 

2771 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted 

2772 size: (mandatory) string with the size of the disk in GB 

2773 vim_id: (optional) should use this existing volume id 

2774 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required 

2775 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if 

2776 availability_zone_index is None 

2777 #TODO ip, security groups 

2778 

2779 Returns: 

2780 A tuple with the instance identifier and created_items or raises an exception on error 

2781 created_items can be None or a dictionary where this method can include key-values that will be passed to 

2782 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. 

2783 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same 

2784 as not present. 

2785 

2786 """ 

2787 self.logger.debug( 

2788 "new_vminstance input: image='%s' flavor='%s' nics='%s'", 

2789 image_id, 

2790 flavor_id, 

2791 str(net_list), 

2792 ) 

2793 server = None 

2794 created_items = {} 

2795 net_list_vim = [] 

2796 # list of external networks to be connected to instance, later on used to create floating_ip 

2797 external_network = [] 

2798 # List of ports with port-security disabled 

2799 no_secured_ports = [] 

2800 block_device_mapping = {} 

2801 existing_vim_volumes = [] 

2802 server_group_id = None 

2803 scheduller_hints = {} 

2804 

2805 try: 

2806 # Check the Openstack Connection 

2807 self._reload_connection() 

2808 

2809 # Prepare network list 

2810 self._prepare_network_for_vminstance( 

2811 name=name, 

2812 net_list=net_list, 

2813 created_items=created_items, 

2814 net_list_vim=net_list_vim, 

2815 external_network=external_network, 

2816 no_secured_ports=no_secured_ports, 

2817 ) 

2818 

2819 # Cloud config 

2820 config_drive, userdata = self._create_user_data(cloud_config) 

2821 

2822 # Get availability Zone 

2823 self.vm_av_zone = self._get_vm_availability_zone( 

2824 availability_zone_index, availability_zone_list 

2825 ) 

2826 

2827 storage_av_zone = ( 

2828 self.storage_availability_zone 

2829 if self.storage_availability_zone 

2830 else self.vm_av_zone 

2831 ) 

2832 

2833 if disk_list: 

2834 # Prepare disks 

2835 self._prepare_disk_for_vminstance( 

2836 name=name, 

2837 existing_vim_volumes=existing_vim_volumes, 

2838 created_items=created_items, 

2839 storage_av_zone=storage_av_zone, 

2840 block_device_mapping=block_device_mapping, 

2841 disk_list=disk_list, 

2842 ) 

2843 

2844 if affinity_group_list: 

2845 # Only first id on the list will be used. Openstack restriction 

2846 server_group_id = affinity_group_list[0]["affinity_group_id"] 

2847 scheduller_hints["group"] = server_group_id 

2848 

2849 self.logger.debug( 

2850 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, " 

2851 "availability_zone={}, key_name={}, userdata={}, config_drive={}, " 

2852 "block_device_mapping={}, server_group={})".format( 

2853 name, 

2854 image_id, 

2855 flavor_id, 

2856 net_list_vim, 

2857 self.config.get("security_groups"), 

2858 self.vm_av_zone, 

2859 self.config.get("keypair"), 

2860 userdata, 

2861 config_drive, 

2862 block_device_mapping, 

2863 server_group_id, 

2864 ) 

2865 ) 

2866 # Create VM 

2867 server = self.nova.servers.create( 

2868 name=name, 

2869 image=image_id, 

2870 flavor=flavor_id, 

2871 nics=net_list_vim, 

2872 security_groups=self.config.get("security_groups"), 

2873 # TODO remove security_groups in future versions. Already at neutron port 

2874 availability_zone=self.vm_av_zone, 

2875 key_name=self.config.get("keypair"), 

2876 userdata=userdata, 

2877 config_drive=config_drive, 

2878 block_device_mapping=block_device_mapping, 

2879 scheduler_hints=scheduller_hints, 

2880 ) 

2881 

2882 vm_start_time = time.time() 

2883 

2884 self._update_port_security_for_vminstance(no_secured_ports, server) 

2885 

2886 self._prepare_external_network_for_vminstance( 

2887 external_network=external_network, 

2888 server=server, 

2889 created_items=created_items, 

2890 vm_start_time=vm_start_time, 

2891 ) 

2892 

2893 return server.id, created_items 

2894 

2895 except Exception as e: 

2896 server_id = None 

2897 if server: 

2898 server_id = server.id 

2899 

2900 try: 

2901 created_items = self.remove_keep_tag_from_persistent_volumes( 

2902 created_items 

2903 ) 

2904 

2905 self.delete_vminstance(server_id, created_items) 

2906 

2907 except Exception as e2: 

2908 self.logger.error("new_vminstance rollback fail {}".format(e2)) 

2909 

2910 self._format_exception(e) 

2911 

2912 @staticmethod 

2913 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict: 

2914 """Removes the keep flag from persistent volumes. So, those volumes could be removed. 

2915 

2916 Args: 

2917 created_items (dict): All created items belongs to VM 

2918 

2919 Returns: 

2920 updated_created_items (dict): Dict which does not include keep flag for volumes. 

2921 

2922 """ 

2923 return { 

2924 key.replace(":keep", ""): value for (key, value) in created_items.items() 

2925 } 

2926 

2927 def get_vminstance(self, vm_id): 

2928 """Returns the VM instance information from VIM""" 

2929 return self._find_nova_server(vm_id) 

2930 

2931 @catch_any_exception 

2932 def get_vminstance_console(self, vm_id, console_type="vnc"): 

2933 """ 

2934 Get a console for the virtual machine 

2935 Params: 

2936 vm_id: uuid of the VM 

2937 console_type, can be: 

2938 "novnc" (by default), "xvpvnc" for VNC types, 

2939 "rdp-html5" for RDP types, "spice-html5" for SPICE types 

2940 Returns dict with the console parameters: 

2941 protocol: ssh, ftp, http, https, ... 

2942 server: usually ip address 

2943 port: the http, ssh, ... port 

2944 suffix: extra text, e.g. the http path and query string 

2945 """ 

2946 self.logger.debug("Getting VM CONSOLE from VIM") 

2947 self._reload_connection() 

2948 server = self.nova.servers.find(id=vm_id) 

2949 

2950 if console_type is None or console_type == "novnc": 

2951 console_dict = server.get_vnc_console("novnc") 

2952 elif console_type == "xvpvnc": 

2953 console_dict = server.get_vnc_console(console_type) 

2954 elif console_type == "rdp-html5": 

2955 console_dict = server.get_rdp_console(console_type) 

2956 elif console_type == "spice-html5": 

2957 console_dict = server.get_spice_console(console_type) 

2958 else: 

2959 raise vimconn.VimConnException( 

2960 "console type '{}' not allowed".format(console_type), 

2961 http_code=vimconn.HTTP_Bad_Request, 

2962 ) 

2963 

2964 console_dict1 = console_dict.get("console") 

2965 

2966 if console_dict1: 

2967 console_url = console_dict1.get("url") 

2968 

2969 if console_url: 

2970 # parse console_url 

2971 protocol_index = console_url.find("//") 

2972 suffix_index = ( 

2973 console_url[protocol_index + 2 :].find("/") + protocol_index + 2 

2974 ) 

2975 port_index = ( 

2976 console_url[protocol_index + 2 : suffix_index].find(":") 

2977 + protocol_index 

2978 + 2 

2979 ) 

2980 

2981 if protocol_index < 0 or port_index < 0 or suffix_index < 0: 

2982 return ( 

2983 -vimconn.HTTP_Internal_Server_Error, 

2984 "Unexpected response from VIM", 

2985 ) 

2986 

2987 console_dict = { 

2988 "protocol": console_url[0:protocol_index], 

2989 "server": console_url[protocol_index + 2 : port_index], 

2990 "port": console_url[port_index:suffix_index], 

2991 "suffix": console_url[suffix_index + 1 :], 

2992 } 

2993 protocol_index += 2 

2994 

2995 return console_dict 

2996 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM") 

2997 

2998 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None: 

2999 """Neutron delete ports by id. 

3000 Args: 

3001 k_id (str): Port id in the VIM 

3002 """ 

3003 try: 

3004 self.neutron.delete_port(k_id) 

3005 

3006 except (neExceptions.ConnectionFailed, ConnectionError) as e: 

3007 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) 

3008 # If there is connection error, raise. 

3009 self._format_exception(e) 

3010 except Exception as e: 

3011 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) 

3012 

3013 def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool: 

3014 """Cinder delete volume by id. 

3015 Args: 

3016 shared_volume_vim_id (str): ID of shared volume in VIM 

3017 """ 

3018 elapsed_time = 0 

3019 try: 

3020 while elapsed_time < server_timeout: 

3021 vol_status = self.cinder.volumes.get(shared_volume_vim_id).status 

3022 if vol_status == "available": 

3023 self.cinder.volumes.delete(shared_volume_vim_id) 

3024 return True 

3025 

3026 time.sleep(5) 

3027 elapsed_time += 5 

3028 

3029 if elapsed_time >= server_timeout: 

3030 raise vimconn.VimConnException( 

3031 "Timeout waiting for volume " 

3032 + shared_volume_vim_id 

3033 + " to be available", 

3034 http_code=vimconn.HTTP_Request_Timeout, 

3035 ) 

3036 

3037 except Exception as e: 

3038 self.logger.error( 

3039 "Error deleting volume: {}: {}".format(type(e).__name__, e) 

3040 ) 

3041 self._format_exception(e) 

3042 

3043 def _delete_volumes_by_id_wth_cinder( 

3044 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict 

3045 ) -> bool: 

3046 """Cinder delete volume by id. 

3047 Args: 

3048 k (str): Full item name in created_items 

3049 k_id (str): ID of floating ip in VIM 

3050 volumes_to_hold (list): Volumes not to delete 

3051 created_items (dict): All created items belongs to VM 

3052 """ 

3053 try: 

3054 if k_id in volumes_to_hold: 

3055 return False 

3056 

3057 if self.cinder.volumes.get(k_id).status != "available": 

3058 return True 

3059 

3060 else: 

3061 self.cinder.volumes.delete(k_id) 

3062 created_items[k] = None 

3063 

3064 except (cExceptions.ConnectionError, ConnectionError) as e: 

3065 self.logger.error( 

3066 "Error deleting volume: {}: {}".format(type(e).__name__, e) 

3067 ) 

3068 self._format_exception(e) 

3069 except Exception as e: 

3070 self.logger.error( 

3071 "Error deleting volume: {}: {}".format(type(e).__name__, e) 

3072 ) 

3073 

3074 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None: 

3075 """Neutron delete floating ip by id. 

3076 Args: 

3077 k (str): Full item name in created_items 

3078 k_id (str): ID of floating ip in VIM 

3079 created_items (dict): All created items belongs to VM 

3080 """ 

3081 try: 

3082 self.neutron.delete_floatingip(k_id) 

3083 created_items[k] = None 

3084 

3085 except (neExceptions.ConnectionFailed, ConnectionError) as e: 

3086 self.logger.error( 

3087 "Error deleting floating ip: {}: {}".format(type(e).__name__, e) 

3088 ) 

3089 self._format_exception(e) 

3090 except Exception as e: 

3091 self.logger.error( 

3092 "Error deleting floating ip: {}: {}".format(type(e).__name__, e) 

3093 ) 

3094 

3095 @staticmethod 

3096 def _get_item_name_id(k: str) -> Tuple[str, str]: 

3097 k_item, _, k_id = k.partition(":") 

3098 return k_item, k_id 

3099 

3100 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None: 

3101 """Delete VM ports attached to the networks before deleting virtual machine. 

3102 Args: 

3103 created_items (dict): All created items belongs to VM 

3104 """ 

3105 

3106 for k, v in created_items.items(): 

3107 if not v: # skip already deleted 

3108 continue 

3109 

3110 try: 

3111 k_item, k_id = self._get_item_name_id(k) 

3112 if k_item == "port": 

3113 self._delete_ports_by_id_wth_neutron(k_id) 

3114 

3115 except (neExceptions.ConnectionFailed, ConnectionError) as e: 

3116 self.logger.error( 

3117 "Error deleting port: {}: {}".format(type(e).__name__, e) 

3118 ) 

3119 self._format_exception(e) 

3120 except Exception as e: 

3121 self.logger.error( 

3122 "Error deleting port: {}: {}".format(type(e).__name__, e) 

3123 ) 

3124 

3125 def _delete_created_items( 

3126 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool 

3127 ) -> bool: 

3128 """Delete Volumes and floating ip if they exist in created_items.""" 

3129 for k, v in created_items.items(): 

3130 if not v: # skip already deleted 

3131 continue 

3132 

3133 try: 

3134 k_item, k_id = self._get_item_name_id(k) 

3135 if k_item == "volume": 

3136 unavailable_vol = self._delete_volumes_by_id_wth_cinder( 

3137 k, k_id, volumes_to_hold, created_items 

3138 ) 

3139 

3140 if unavailable_vol: 

3141 keep_waiting = True 

3142 

3143 elif k_item == "floating_ip": 

3144 self._delete_floating_ip_by_id(k, k_id, created_items) 

3145 

3146 except ( 

3147 cExceptions.ConnectionError, 

3148 neExceptions.ConnectionFailed, 

3149 ConnectionError, 

3150 AttributeError, 

3151 TypeError, 

3152 ) as e: 

3153 self.logger.error("Error deleting {}: {}".format(k, e)) 

3154 self._format_exception(e) 

3155 

3156 except Exception as e: 

3157 self.logger.error("Error deleting {}: {}".format(k, e)) 

3158 

3159 return keep_waiting 

3160 

3161 @staticmethod 

3162 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict: 

3163 """Remove the volumes which has key flag from created_items 

3164 

3165 Args: 

3166 created_items (dict): All created items belongs to VM 

3167 

3168 Returns: 

3169 created_items (dict): Persistent volumes eliminated created_items 

3170 """ 

3171 return { 

3172 key: value 

3173 for (key, value) in created_items.items() 

3174 if len(key.split(":")) == 2 

3175 } 

3176 

3177 @catch_any_exception 

3178 def delete_vminstance( 

3179 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None 

3180 ) -> None: 

3181 """Removes a VM instance from VIM. Returns the old identifier. 

3182 Args: 

3183 vm_id (str): Identifier of VM instance 

3184 created_items (dict): All created items belongs to VM 

3185 volumes_to_hold (list): Volumes_to_hold 

3186 """ 

3187 if created_items is None: 

3188 created_items = {} 

3189 if volumes_to_hold is None: 

3190 volumes_to_hold = [] 

3191 

3192 try: 

3193 created_items = self._extract_items_wth_keep_flag_from_created_items( 

3194 created_items 

3195 ) 

3196 

3197 self._reload_connection() 

3198 

3199 # Delete VM ports attached to the networks before the virtual machine 

3200 if created_items: 

3201 self._delete_vm_ports_attached_to_network(created_items) 

3202 

3203 if vm_id: 

3204 self.nova.servers.delete(vm_id) 

3205 

3206 # Although having detached, volumes should have in active status before deleting. 

3207 # We ensure in this loop 

3208 keep_waiting = True 

3209 elapsed_time = 0 

3210 

3211 while keep_waiting and elapsed_time < volume_timeout: 

3212 keep_waiting = False 

3213 

3214 # Delete volumes and floating IP. 

3215 keep_waiting = self._delete_created_items( 

3216 created_items, volumes_to_hold, keep_waiting 

3217 ) 

3218 

3219 if keep_waiting: 

3220 time.sleep(1) 

3221 elapsed_time += 1 

3222 except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e: 

3223 # If VM does not exist, it does not raise 

3224 self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}") 

3225 

3226 def refresh_vms_status(self, vm_list): 

3227 """Get the status of the virtual machines and their interfaces/ports 

3228 Params: the list of VM identifiers 

3229 Returns a dictionary with: 

3230 vm_id: #VIM id of this Virtual Machine 

3231 status: #Mandatory. Text with one of: 

3232 # DELETED (not found at vim) 

3233 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 

3234 # OTHER (Vim reported other status not understood) 

3235 # ERROR (VIM indicates an ERROR status) 

3236 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 

3237 # CREATING (on building process), ERROR 

3238 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address 

3239 # 

3240 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR 

3241 vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 

3242 interfaces: 

3243 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 

3244 mac_address: #Text format XX:XX:XX:XX:XX:XX 

3245 vim_net_id: #network id where this interface is connected 

3246 vim_interface_id: #interface/port VIM id 

3247 ip_address: #null, or text with IPv4, IPv6 address 

3248 compute_node: #identification of compute node where PF,VF interface is allocated 

3249 pci: #PCI address of the NIC that hosts the PF,VF 

3250 vlan: #physical VLAN used for VF 

3251 """ 

3252 vm_dict = {} 

3253 self.logger.debug( 

3254 "refresh_vms status: Getting tenant VM instance information from VIM" 

3255 ) 

3256 for vm_id in vm_list: 

3257 vm = {} 

3258 

3259 try: 

3260 vm_vim = self.get_vminstance(vm_id) 

3261 

3262 if vm_vim["status"] in vmStatus2manoFormat: 

3263 vm["status"] = vmStatus2manoFormat[vm_vim["status"]] 

3264 else: 

3265 vm["status"] = "OTHER" 

3266 vm["error_msg"] = "VIM status reported " + vm_vim["status"] 

3267 

3268 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None) 

3269 vm_vim.pop("user_data", None) 

3270 vm["vim_info"] = self.serialize(vm_vim) 

3271 

3272 vm["interfaces"] = [] 

3273 if vm_vim.get("fault"): 

3274 vm["error_msg"] = str(vm_vim["fault"]) 

3275 

3276 # get interfaces 

3277 try: 

3278 self._reload_connection() 

3279 port_dict = self.neutron.list_ports(device_id=vm_id) 

3280 

3281 for port in port_dict["ports"]: 

3282 interface = {} 

3283 interface["vim_info"] = self.serialize(port) 

3284 interface["mac_address"] = port.get("mac_address") 

3285 interface["vim_net_id"] = port["network_id"] 

3286 interface["vim_interface_id"] = port["id"] 

3287 # check if OS-EXT-SRV-ATTR:host is there, 

3288 # in case of non-admin credentials, it will be missing 

3289 

3290 if vm_vim.get("OS-EXT-SRV-ATTR:host"): 

3291 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"] 

3292 

3293 interface["pci"] = None 

3294 

3295 # check if binding:profile is there, 

3296 # in case of non-admin credentials, it will be missing 

3297 if port.get("binding:profile"): 

3298 if port["binding:profile"].get("pci_slot"): 

3299 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting 

3300 # the slot to 0x00 

3301 # TODO: This is just a workaround valid for niantinc. Find a better way to do so 

3302 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic 

3303 pci = port["binding:profile"]["pci_slot"] 

3304 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2) 

3305 interface["pci"] = pci 

3306 

3307 interface["vlan"] = None 

3308 

3309 if port.get("binding:vif_details"): 

3310 interface["vlan"] = port["binding:vif_details"].get("vlan") 

3311 

3312 # Get vlan from network in case not present in port for those old openstacks and cases where 

3313 # it is needed vlan at PT 

3314 if not interface["vlan"]: 

3315 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id 

3316 network = self.neutron.show_network(port["network_id"]) 

3317 

3318 if ( 

3319 network["network"].get("provider:network_type") 

3320 == "vlan" 

3321 ): 

3322 # and port.get("binding:vnic_type") in ("direct", "direct-physical"): 

3323 interface["vlan"] = network["network"].get( 

3324 "provider:segmentation_id" 

3325 ) 

3326 

3327 ips = [] 

3328 # look for floating ip address 

3329 try: 

3330 floating_ip_dict = self.neutron.list_floatingips( 

3331 port_id=port["id"] 

3332 ) 

3333 

3334 if floating_ip_dict.get("floatingips"): 

3335 ips.append( 

3336 floating_ip_dict["floatingips"][0].get( 

3337 "floating_ip_address" 

3338 ) 

3339 ) 

3340 except Exception: 

3341 pass 

3342 

3343 for subnet in port["fixed_ips"]: 

3344 ips.append(subnet["ip_address"]) 

3345 

3346 interface["ip_address"] = ";".join(ips) 

3347 vm["interfaces"].append(interface) 

3348 except Exception as e: 

3349 self.logger.error( 

3350 "Error getting vm interface information {}: {}".format( 

3351 type(e).__name__, e 

3352 ), 

3353 exc_info=True, 

3354 ) 

3355 except vimconn.VimConnNotFoundException as e: 

3356 self.logger.error("Exception getting vm status: %s", str(e)) 

3357 vm["status"] = "DELETED" 

3358 vm["error_msg"] = str(e) 

3359 except vimconn.VimConnException as e: 

3360 self.logger.error("Exception getting vm status: %s", str(e)) 

3361 vm["status"] = "VIM_ERROR" 

3362 vm["error_msg"] = str(e) 

3363 

3364 vm_dict[vm_id] = vm 

3365 

3366 return vm_dict 

3367 

3368 @catch_any_exception 

3369 def action_vminstance(self, vm_id, action_dict, created_items={}): 

3370 """Send and action over a VM instance from VIM 

3371 Returns None or the console dict if the action was successfully sent to the VIM 

3372 """ 

3373 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) 

3374 self._reload_connection() 

3375 server = self.nova.servers.find(id=vm_id) 

3376 if "start" in action_dict: 

3377 if action_dict["start"] == "rebuild": 

3378 server.rebuild() 

3379 vm_state = self.__wait_for_vm(vm_id, "ACTIVE") 

3380 if not vm_state: 

3381 raise nvExceptions.BadRequest( 

3382 409, 

3383 message="Cannot 'REBUILD' vm_state is in ERROR", 

3384 ) 

3385 else: 

3386 if server.status == "PAUSED": 

3387 server.unpause() 

3388 elif server.status == "SUSPENDED": 

3389 server.resume() 

3390 elif server.status == "SHUTOFF": 

3391 server.start() 

3392 vm_state = self.__wait_for_vm(vm_id, "ACTIVE") 

3393 if not vm_state: 

3394 raise nvExceptions.BadRequest( 

3395 409, 

3396 message="Cannot 'START' vm_state is in ERROR", 

3397 ) 

3398 else: 

3399 self.logger.debug( 

3400 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state" 

3401 ) 

3402 raise vimconn.VimConnException( 

3403 "Cannot 'start' instance while it is in active state", 

3404 http_code=vimconn.HTTP_Bad_Request, 

3405 ) 

3406 elif "pause" in action_dict: 

3407 server.pause() 

3408 elif "resume" in action_dict: 

3409 server.resume() 

3410 elif "shutoff" in action_dict or "shutdown" in action_dict: 

3411 self.logger.debug("server status %s", server.status) 

3412 if server.status == "ACTIVE": 

3413 server.stop() 

3414 vm_state = self.__wait_for_vm(vm_id, "SHUTOFF") 

3415 if not vm_state: 

3416 raise nvExceptions.BadRequest( 

3417 409, 

3418 message="Cannot 'STOP' vm_state is in ERROR", 

3419 ) 

3420 else: 

3421 self.logger.debug("ERROR: VM is not in Active state") 

3422 raise vimconn.VimConnException( 

3423 "VM is not in active state, stop operation is not allowed", 

3424 http_code=vimconn.HTTP_Bad_Request, 

3425 ) 

3426 elif "forceOff" in action_dict: 

3427 server.stop() # TODO 

3428 elif "terminate" in action_dict: 

3429 server.delete() 

3430 elif "createImage" in action_dict: 

3431 server.create_image() 

3432 # "path":path_schema, 

3433 # "description":description_schema, 

3434 # "name":name_schema, 

3435 # "metadata":metadata_schema, 

3436 # "imageRef": id_schema, 

3437 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] }, 

3438 elif "rebuild" in action_dict: 

3439 server.rebuild(server.image["id"]) 

3440 elif "reboot" in action_dict: 

3441 server.reboot() # reboot_type="SOFT" 

3442 elif "console" in action_dict: 

3443 console_type = action_dict["console"] 

3444 

3445 if console_type is None or console_type == "novnc": 

3446 console_dict = server.get_vnc_console("novnc") 

3447 elif console_type == "xvpvnc": 

3448 console_dict = server.get_vnc_console(console_type) 

3449 elif console_type == "rdp-html5": 

3450 console_dict = server.get_rdp_console(console_type) 

3451 elif console_type == "spice-html5": 

3452 console_dict = server.get_spice_console(console_type) 

3453 else: 

3454 raise vimconn.VimConnException( 

3455 "console type '{}' not allowed".format(console_type), 

3456 http_code=vimconn.HTTP_Bad_Request, 

3457 ) 

3458 

3459 try: 

3460 console_url = console_dict["console"]["url"] 

3461 # parse console_url 

3462 protocol_index = console_url.find("//") 

3463 suffix_index = ( 

3464 console_url[protocol_index + 2 :].find("/") + protocol_index + 2 

3465 ) 

3466 port_index = ( 

3467 console_url[protocol_index + 2 : suffix_index].find(":") 

3468 + protocol_index 

3469 + 2 

3470 ) 

3471 

3472 if protocol_index < 0 or port_index < 0 or suffix_index < 0: 

3473 raise vimconn.VimConnException( 

3474 "Unexpected response from VIM " + str(console_dict) 

3475 ) 

3476 

3477 console_dict2 = { 

3478 "protocol": console_url[0:protocol_index], 

3479 "server": console_url[protocol_index + 2 : port_index], 

3480 "port": int(console_url[port_index + 1 : suffix_index]), 

3481 "suffix": console_url[suffix_index + 1 :], 

3482 } 

3483 

3484 return console_dict2 

3485 except Exception: 

3486 raise vimconn.VimConnException( 

3487 "Unexpected response from VIM " + str(console_dict) 

3488 ) 

3489 

3490 return None 

3491 

3492 # ###### VIO Specific Changes ######### 

3493 def _generate_vlanID(self): 

3494 """ 

3495 Method to get unused vlanID 

3496 Args: 

3497 None 

3498 Returns: 

3499 vlanID 

3500 """ 

3501 # Get used VLAN IDs 

3502 usedVlanIDs = [] 

3503 networks = self.get_network_list() 

3504 

3505 for net in networks: 

3506 if net.get("provider:segmentation_id"): 

3507 usedVlanIDs.append(net.get("provider:segmentation_id")) 

3508 

3509 used_vlanIDs = set(usedVlanIDs) 

3510 

3511 # find unused VLAN ID 

3512 for vlanID_range in self.config.get("dataplane_net_vlan_range"): 

3513 try: 

3514 start_vlanid, end_vlanid = map( 

3515 int, vlanID_range.replace(" ", "").split("-") 

3516 ) 

3517 

3518 for vlanID in range(start_vlanid, end_vlanid + 1): 

3519 if vlanID not in used_vlanIDs: 

3520 return vlanID 

3521 except Exception as exp: 

3522 raise vimconn.VimConnException( 

3523 "Exception {} occurred while generating VLAN ID.".format(exp) 

3524 ) 

3525 else: 

3526 raise vimconn.VimConnConflictException( 

3527 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format( 

3528 self.config.get("dataplane_net_vlan_range") 

3529 ) 

3530 ) 

3531 

3532 def _generate_multisegment_vlanID(self): 

3533 """ 

3534 Method to get unused vlanID 

3535 Args: 

3536 None 

3537 Returns: 

3538 vlanID 

3539 """ 

3540 # Get used VLAN IDs 

3541 usedVlanIDs = [] 

3542 networks = self.get_network_list() 

3543 for net in networks: 

3544 if net.get("provider:network_type") == "vlan" and net.get( 

3545 "provider:segmentation_id" 

3546 ): 

3547 usedVlanIDs.append(net.get("provider:segmentation_id")) 

3548 elif net.get("segments"): 

3549 for segment in net.get("segments"): 

3550 if segment.get("provider:network_type") == "vlan" and segment.get( 

3551 "provider:segmentation_id" 

3552 ): 

3553 usedVlanIDs.append(segment.get("provider:segmentation_id")) 

3554 

3555 used_vlanIDs = set(usedVlanIDs) 

3556 

3557 # find unused VLAN ID 

3558 for vlanID_range in self.config.get("multisegment_vlan_range"): 

3559 try: 

3560 start_vlanid, end_vlanid = map( 

3561 int, vlanID_range.replace(" ", "").split("-") 

3562 ) 

3563 

3564 for vlanID in range(start_vlanid, end_vlanid + 1): 

3565 if vlanID not in used_vlanIDs: 

3566 return vlanID 

3567 except Exception as exp: 

3568 raise vimconn.VimConnException( 

3569 "Exception {} occurred while generating VLAN ID.".format(exp) 

3570 ) 

3571 else: 

3572 raise vimconn.VimConnConflictException( 

3573 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format( 

3574 self.config.get("multisegment_vlan_range") 

3575 ) 

3576 ) 

3577 

3578 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range): 

3579 """ 

3580 Method to validate user given vlanID ranges 

3581 Args: None 

3582 Returns: None 

3583 """ 

3584 for vlanID_range in input_vlan_range: 

3585 vlan_range = vlanID_range.replace(" ", "") 

3586 # validate format 

3587 vlanID_pattern = r"(\d)*-(\d)*$" 

3588 match_obj = re.match(vlanID_pattern, vlan_range) 

3589 if not match_obj: 

3590 raise vimconn.VimConnConflictException( 

3591 "Invalid VLAN range for {}: {}.You must provide " 

3592 "'{}' in format [start_ID - end_ID].".format( 

3593 text_vlan_range, vlanID_range, text_vlan_range 

3594 ) 

3595 ) 

3596 

3597 start_vlanid, end_vlanid = map(int, vlan_range.split("-")) 

3598 if start_vlanid <= 0: 

3599 raise vimconn.VimConnConflictException( 

3600 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN " 

3601 "networks valid IDs are 1 to 4094 ".format( 

3602 text_vlan_range, vlanID_range 

3603 ) 

3604 ) 

3605 

3606 if end_vlanid > 4094: 

3607 raise vimconn.VimConnConflictException( 

3608 "Invalid VLAN range for {}: {}. End VLAN ID can not be " 

3609 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format( 

3610 text_vlan_range, vlanID_range 

3611 ) 

3612 ) 

3613 

3614 if start_vlanid > end_vlanid: 

3615 raise vimconn.VimConnConflictException( 

3616 "Invalid VLAN range for {}: {}. You must provide '{}'" 

3617 " in format start_ID - end_ID and start_ID < end_ID ".format( 

3618 text_vlan_range, vlanID_range, text_vlan_range 

3619 ) 

3620 ) 

3621 

3622 def get_hosts_info(self): 

3623 """Get the information of deployed hosts 

3624 Returns the hosts content""" 

3625 if self.debug: 

3626 print("osconnector: Getting Host info from VIM") 

3627 

3628 try: 

3629 h_list = [] 

3630 self._reload_connection() 

3631 hypervisors = self.nova.hypervisors.list() 

3632 

3633 for hype in hypervisors: 

3634 h_list.append(hype.to_dict()) 

3635 

3636 return 1, {"hosts": h_list} 

3637 except nvExceptions.NotFound as e: 

3638 error_value = -vimconn.HTTP_Not_Found 

3639 error_text = str(e) if len(e.args) == 0 else str(e.args[0]) 

3640 except (ksExceptions.ClientException, nvExceptions.ClientException) as e: 

3641 error_value = -vimconn.HTTP_Bad_Request 

3642 error_text = ( 

3643 type(e).__name__ 

3644 + ": " 

3645 + (str(e) if len(e.args) == 0 else str(e.args[0])) 

3646 ) 

3647 

3648 # TODO insert exception vimconn.HTTP_Unauthorized 

3649 # if reaching here is because an exception 

3650 self.logger.debug("get_hosts_info " + error_text) 

3651 

3652 return error_value, error_text 

3653 

3654 def get_hosts(self, vim_tenant): 

3655 """Get the hosts and deployed instances 

3656 Returns the hosts content""" 

3657 r, hype_dict = self.get_hosts_info() 

3658 

3659 if r < 0: 

3660 return r, hype_dict 

3661 

3662 hypervisors = hype_dict["hosts"] 

3663 

3664 try: 

3665 servers = self.nova.servers.list() 

3666 for hype in hypervisors: 

3667 for server in servers: 

3668 if ( 

3669 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"] 

3670 == hype["hypervisor_hostname"] 

3671 ): 

3672 if "vm" in hype: 

3673 hype["vm"].append(server.id) 

3674 else: 

3675 hype["vm"] = [server.id] 

3676 

3677 return 1, hype_dict 

3678 except nvExceptions.NotFound as e: 

3679 error_value = -vimconn.HTTP_Not_Found 

3680 error_text = str(e) if len(e.args) == 0 else str(e.args[0]) 

3681 except (ksExceptions.ClientException, nvExceptions.ClientException) as e: 

3682 error_value = -vimconn.HTTP_Bad_Request 

3683 error_text = ( 

3684 type(e).__name__ 

3685 + ": " 

3686 + (str(e) if len(e.args) == 0 else str(e.args[0])) 

3687 ) 

3688 

3689 # TODO insert exception vimconn.HTTP_Unauthorized 

3690 # if reaching here is because an exception 

3691 self.logger.debug("get_hosts " + error_text) 

3692 

3693 return error_value, error_text 

3694 

3695 def new_classification(self, name, ctype, definition): 

3696 self.logger.debug( 

3697 "Adding a new (Traffic) Classification to VIM, named %s", name 

3698 ) 

3699 

3700 try: 

3701 new_class = None 

3702 self._reload_connection() 

3703 

3704 if ctype not in supportedClassificationTypes: 

3705 raise vimconn.VimConnNotSupportedException( 

3706 "OpenStack VIM connector does not support provided " 

3707 "Classification Type {}, supported ones are: {}".format( 

3708 ctype, supportedClassificationTypes 

3709 ) 

3710 ) 

3711 

3712 if not self._validate_classification(ctype, definition): 

3713 raise vimconn.VimConnException( 

3714 "Incorrect Classification definition for the type specified." 

3715 ) 

3716 

3717 classification_dict = definition 

3718 classification_dict["name"] = name 

3719 

3720 self.logger.info( 

3721 "Adding a new (Traffic) Classification to VIM, named {} and {}.".format( 

3722 name, classification_dict 

3723 ) 

3724 ) 

3725 new_class = self.neutron.create_sfc_flow_classifier( 

3726 {"flow_classifier": classification_dict} 

3727 ) 

3728 

3729 return new_class["flow_classifier"]["id"] 

3730 except ( 

3731 neExceptions.ConnectionFailed, 

3732 ksExceptions.ClientException, 

3733 neExceptions.NeutronException, 

3734 ConnectionError, 

3735 ) as e: 

3736 self.logger.error("Creation of Classification failed.") 

3737 self._format_exception(e) 

3738 

3739 def get_classification(self, class_id): 

3740 self.logger.debug(" Getting Classification %s from VIM", class_id) 

3741 filter_dict = {"id": class_id} 

3742 class_list = self.get_classification_list(filter_dict) 

3743 

3744 if len(class_list) == 0: 

3745 raise vimconn.VimConnNotFoundException( 

3746 "Classification '{}' not found".format(class_id) 

3747 ) 

3748 elif len(class_list) > 1: 

3749 raise vimconn.VimConnConflictException( 

3750 "Found more than one Classification with this criteria" 

3751 ) 

3752 

3753 classification = class_list[0] 

3754 

3755 return classification 

3756 

3757 def get_classification_list(self, filter_dict={}): 

3758 self.logger.debug( 

3759 "Getting Classifications from VIM filter: '%s'", str(filter_dict) 

3760 ) 

3761 

3762 try: 

3763 filter_dict_os = filter_dict.copy() 

3764 self._reload_connection() 

3765 

3766 if self.api_version3 and "tenant_id" in filter_dict_os: 

3767 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") 

3768 

3769 classification_dict = self.neutron.list_sfc_flow_classifiers( 

3770 **filter_dict_os 

3771 ) 

3772 classification_list = classification_dict["flow_classifiers"] 

3773 self.__classification_os2mano(classification_list) 

3774 

3775 return classification_list 

3776 except ( 

3777 neExceptions.ConnectionFailed, 

3778 ksExceptions.ClientException, 

3779 neExceptions.NeutronException, 

3780 ConnectionError, 

3781 ) as e: 

3782 self._format_exception(e) 

3783 

3784 def delete_classification(self, class_id): 

3785 self.logger.debug("Deleting Classification '%s' from VIM", class_id) 

3786 

3787 try: 

3788 self._reload_connection() 

3789 self.neutron.delete_sfc_flow_classifier(class_id) 

3790 

3791 return class_id 

3792 except ( 

3793 neExceptions.ConnectionFailed, 

3794 neExceptions.NeutronException, 

3795 ksExceptions.ClientException, 

3796 neExceptions.NeutronException, 

3797 ConnectionError, 

3798 ) as e: 

3799 self._format_exception(e) 

3800 

3801 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True): 

3802 self.logger.debug( 

3803 "Adding a new Service Function Instance to VIM, named '%s'", name 

3804 ) 

3805 

3806 try: 

3807 new_sfi = None 

3808 self._reload_connection() 

3809 correlation = None 

3810 

3811 if sfc_encap: 

3812 correlation = "nsh" 

3813 

3814 if len(ingress_ports) != 1: 

3815 raise vimconn.VimConnNotSupportedException( 

3816 "OpenStack VIM connector can only have 1 ingress port per SFI" 

3817 ) 

3818 

3819 if len(egress_ports) != 1: 

3820 raise vimconn.VimConnNotSupportedException( 

3821 "OpenStack VIM connector can only have 1 egress port per SFI" 

3822 ) 

3823 

3824 sfi_dict = { 

3825 "name": name, 

3826 "ingress": ingress_ports[0], 

3827 "egress": egress_ports[0], 

3828 "service_function_parameters": {"correlation": correlation}, 

3829 } 

3830 self.logger.info("Adding a new SFI to VIM, {}.".format(sfi_dict)) 

3831 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict}) 

3832 

3833 return new_sfi["port_pair"]["id"] 

3834 except ( 

3835 neExceptions.ConnectionFailed, 

3836 ksExceptions.ClientException, 

3837 neExceptions.NeutronException, 

3838 ConnectionError, 

3839 ) as e: 

3840 if new_sfi: 

3841 try: 

3842 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"]) 

3843 except Exception: 

3844 self.logger.error( 

3845 "Creation of Service Function Instance failed, with " 

3846 "subsequent deletion failure as well." 

3847 ) 

3848 

3849 self._format_exception(e) 

3850 

3851 def get_sfi(self, sfi_id): 

3852 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id) 

3853 filter_dict = {"id": sfi_id} 

3854 sfi_list = self.get_sfi_list(filter_dict) 

3855 

3856 if len(sfi_list) == 0: 

3857 raise vimconn.VimConnNotFoundException( 

3858 "Service Function Instance '{}' not found".format(sfi_id) 

3859 ) 

3860 elif len(sfi_list) > 1: 

3861 raise vimconn.VimConnConflictException( 

3862 "Found more than one Service Function Instance with this criteria" 

3863 ) 

3864 

3865 sfi = sfi_list[0] 

3866 

3867 return sfi 

3868 

3869 def get_sfi_list(self, filter_dict={}): 

3870 self.logger.debug( 

3871 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict) 

3872 ) 

3873 

3874 try: 

3875 self._reload_connection() 

3876 filter_dict_os = filter_dict.copy() 

3877 

3878 if self.api_version3 and "tenant_id" in filter_dict_os: 

3879 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") 

3880 

3881 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os) 

3882 sfi_list = sfi_dict["port_pairs"] 

3883 self.__sfi_os2mano(sfi_list) 

3884 

3885 return sfi_list 

3886 except ( 

3887 neExceptions.ConnectionFailed, 

3888 ksExceptions.ClientException, 

3889 neExceptions.NeutronException, 

3890 ConnectionError, 

3891 ) as e: 

3892 self._format_exception(e) 

3893 

3894 def delete_sfi(self, sfi_id): 

3895 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id) 

3896 

3897 try: 

3898 self._reload_connection() 

3899 self.neutron.delete_sfc_port_pair(sfi_id) 

3900 

3901 return sfi_id 

3902 except ( 

3903 neExceptions.ConnectionFailed, 

3904 neExceptions.NeutronException, 

3905 ksExceptions.ClientException, 

3906 neExceptions.NeutronException, 

3907 ConnectionError, 

3908 ) as e: 

3909 self._format_exception(e) 

3910 

3911 def new_sf(self, name, sfis, sfc_encap=True): 

3912 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name) 

3913 

3914 new_sf = None 

3915 

3916 try: 

3917 self._reload_connection() 

3918 

3919 for instance in sfis: 

3920 sfi = self.get_sfi(instance) 

3921 

3922 if sfi.get("sfc_encap") != sfc_encap: 

3923 raise vimconn.VimConnNotSupportedException( 

3924 "OpenStack VIM connector requires all SFIs of the " 

3925 "same SF to share the same SFC Encapsulation" 

3926 ) 

3927 

3928 sf_dict = {"name": name, "port_pairs": sfis} 

3929 

3930 self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict)) 

3931 new_sf = self.neutron.create_sfc_port_pair_group( 

3932 {"port_pair_group": sf_dict} 

3933 ) 

3934 

3935 return new_sf["port_pair_group"]["id"] 

3936 except ( 

3937 neExceptions.ConnectionFailed, 

3938 ksExceptions.ClientException, 

3939 neExceptions.NeutronException, 

3940 ConnectionError, 

3941 ) as e: 

3942 if new_sf: 

3943 try: 

3944 new_sf_id = new_sf.get("port_pair_group").get("id") 

3945 self.neutron.delete_sfc_port_pair_group(new_sf_id) 

3946 except Exception: 

3947 self.logger.error( 

3948 "Creation of Service Function failed, with " 

3949 "subsequent deletion failure as well." 

3950 ) 

3951 

3952 self._format_exception(e) 

3953 

3954 def get_sf(self, sf_id): 

3955 self.logger.debug("Getting Service Function %s from VIM", sf_id) 

3956 filter_dict = {"id": sf_id} 

3957 sf_list = self.get_sf_list(filter_dict) 

3958 

3959 if len(sf_list) == 0: 

3960 raise vimconn.VimConnNotFoundException( 

3961 "Service Function '{}' not found".format(sf_id) 

3962 ) 

3963 elif len(sf_list) > 1: 

3964 raise vimconn.VimConnConflictException( 

3965 "Found more than one Service Function with this criteria" 

3966 ) 

3967 

3968 sf = sf_list[0] 

3969 

3970 return sf 

3971 

3972 def get_sf_list(self, filter_dict={}): 

3973 self.logger.debug( 

3974 "Getting Service Function from VIM filter: '%s'", str(filter_dict) 

3975 ) 

3976 

3977 try: 

3978 self._reload_connection() 

3979 filter_dict_os = filter_dict.copy() 

3980 

3981 if self.api_version3 and "tenant_id" in filter_dict_os: 

3982 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") 

3983 

3984 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os) 

3985 sf_list = sf_dict["port_pair_groups"] 

3986 self.__sf_os2mano(sf_list) 

3987 

3988 return sf_list 

3989 except ( 

3990 neExceptions.ConnectionFailed, 

3991 ksExceptions.ClientException, 

3992 neExceptions.NeutronException, 

3993 ConnectionError, 

3994 ) as e: 

3995 self._format_exception(e) 

3996 

3997 def delete_sf(self, sf_id): 

3998 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id) 

3999 

4000 try: 

4001 self._reload_connection() 

4002 self.neutron.delete_sfc_port_pair_group(sf_id) 

4003 

4004 return sf_id 

4005 except ( 

4006 neExceptions.ConnectionFailed, 

4007 neExceptions.NeutronException, 

4008 ksExceptions.ClientException, 

4009 neExceptions.NeutronException, 

4010 ConnectionError, 

4011 ) as e: 

4012 self._format_exception(e) 

4013 

4014 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None): 

4015 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name) 

4016 

4017 new_sfp = None 

4018 

4019 try: 

4020 self._reload_connection() 

4021 # In networking-sfc the MPLS encapsulation is legacy 

4022 # should be used when no full SFC Encapsulation is intended 

4023 correlation = "mpls" 

4024 

4025 if sfc_encap: 

4026 correlation = "nsh" 

4027 

4028 sfp_dict = { 

4029 "name": name, 

4030 "flow_classifiers": classifications, 

4031 "port_pair_groups": sfs, 

4032 "chain_parameters": {"correlation": correlation}, 

4033 } 

4034 

4035 if spi: 

4036 sfp_dict["chain_id"] = spi 

4037 

4038 self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict)) 

4039 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict}) 

4040 

4041 return new_sfp["port_chain"]["id"] 

4042 except ( 

4043 neExceptions.ConnectionFailed, 

4044 ksExceptions.ClientException, 

4045 neExceptions.NeutronException, 

4046 ConnectionError, 

4047 ) as e: 

4048 if new_sfp: 

4049 try: 

4050 new_sfp_id = new_sfp.get("port_chain").get("id") 

4051 self.neutron.delete_sfc_port_chain(new_sfp_id) 

4052 except Exception: 

4053 self.logger.error( 

4054 "Creation of Service Function Path failed, with " 

4055 "subsequent deletion failure as well." 

4056 ) 

4057 

4058 self._format_exception(e) 

4059 

4060 def get_sfp(self, sfp_id): 

4061 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id) 

4062 

4063 filter_dict = {"id": sfp_id} 

4064 sfp_list = self.get_sfp_list(filter_dict) 

4065 

4066 if len(sfp_list) == 0: 

4067 raise vimconn.VimConnNotFoundException( 

4068 "Service Function Path '{}' not found".format(sfp_id) 

4069 ) 

4070 elif len(sfp_list) > 1: 

4071 raise vimconn.VimConnConflictException( 

4072 "Found more than one Service Function Path with this criteria" 

4073 ) 

4074 

4075 sfp = sfp_list[0] 

4076 

4077 return sfp 

4078 

4079 def get_sfp_list(self, filter_dict={}): 

4080 self.logger.debug( 

4081 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict) 

4082 ) 

4083 

4084 try: 

4085 self._reload_connection() 

4086 filter_dict_os = filter_dict.copy() 

4087 

4088 if self.api_version3 and "tenant_id" in filter_dict_os: 

4089 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") 

4090 

4091 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os) 

4092 sfp_list = sfp_dict["port_chains"] 

4093 self.__sfp_os2mano(sfp_list) 

4094 

4095 return sfp_list 

4096 except ( 

4097 neExceptions.ConnectionFailed, 

4098 ksExceptions.ClientException, 

4099 neExceptions.NeutronException, 

4100 ConnectionError, 

4101 ) as e: 

4102 self._format_exception(e) 

4103 

4104 def delete_sfp(self, sfp_id): 

4105 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id) 

4106 

4107 try: 

4108 self._reload_connection() 

4109 self.neutron.delete_sfc_port_chain(sfp_id) 

4110 

4111 return sfp_id 

4112 except ( 

4113 neExceptions.ConnectionFailed, 

4114 neExceptions.NeutronException, 

4115 ksExceptions.ClientException, 

4116 neExceptions.NeutronException, 

4117 ConnectionError, 

4118 ) as e: 

4119 self._format_exception(e) 

4120 

4121 def refresh_sfps_status(self, sfp_list): 

4122 """Get the status of the service function path 

4123 Params: the list of sfp identifiers 

4124 Returns a dictionary with: 

4125 vm_id: #VIM id of this service function path 

4126 status: #Mandatory. Text with one of: 

4127 # DELETED (not found at vim) 

4128 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 

4129 # OTHER (Vim reported other status not understood) 

4130 # ERROR (VIM indicates an ERROR status) 

4131 # ACTIVE, 

4132 # CREATING (on building process) 

4133 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR 

4134 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F 

4135 """ 

4136 sfp_dict = {} 

4137 self.logger.debug( 

4138 "refresh_sfps status: Getting tenant SFP information from VIM" 

4139 ) 

4140 

4141 for sfp_id in sfp_list: 

4142 sfp = {} 

4143 

4144 try: 

4145 sfp_vim = self.get_sfp(sfp_id) 

4146 

4147 if sfp_vim["spi"]: 

4148 sfp["status"] = vmStatus2manoFormat["ACTIVE"] 

4149 else: 

4150 sfp["status"] = "OTHER" 

4151 sfp["error_msg"] = "VIM status reported " + sfp["status"] 

4152 

4153 sfp["vim_info"] = self.serialize(sfp_vim) 

4154 

4155 if sfp_vim.get("fault"): 

4156 sfp["error_msg"] = str(sfp_vim["fault"]) 

4157 except vimconn.VimConnNotFoundException as e: 

4158 self.logger.error("Exception getting sfp status: %s", str(e)) 

4159 sfp["status"] = "DELETED" 

4160 sfp["error_msg"] = str(e) 

4161 except vimconn.VimConnException as e: 

4162 self.logger.error("Exception getting sfp status: %s", str(e)) 

4163 sfp["status"] = "VIM_ERROR" 

4164 sfp["error_msg"] = str(e) 

4165 

4166 sfp_dict[sfp_id] = sfp 

4167 

4168 return sfp_dict 

4169 

4170 def refresh_sfis_status(self, sfi_list): 

4171 """Get the status of the service function instances 

4172 Params: the list of sfi identifiers 

4173 Returns a dictionary with: 

4174 vm_id: #VIM id of this service function instance 

4175 status: #Mandatory. Text with one of: 

4176 # DELETED (not found at vim) 

4177 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 

4178 # OTHER (Vim reported other status not understood) 

4179 # ERROR (VIM indicates an ERROR status) 

4180 # ACTIVE, 

4181 # CREATING (on building process) 

4182 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR 

4183 vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 

4184 """ 

4185 sfi_dict = {} 

4186 self.logger.debug( 

4187 "refresh_sfis status: Getting tenant sfi information from VIM" 

4188 ) 

4189 

4190 for sfi_id in sfi_list: 

4191 sfi = {} 

4192 

4193 try: 

4194 sfi_vim = self.get_sfi(sfi_id) 

4195 

4196 if sfi_vim: 

4197 sfi["status"] = vmStatus2manoFormat["ACTIVE"] 

4198 else: 

4199 sfi["status"] = "OTHER" 

4200 sfi["error_msg"] = "VIM status reported " + sfi["status"] 

4201 

4202 sfi["vim_info"] = self.serialize(sfi_vim) 

4203 

4204 if sfi_vim.get("fault"): 

4205 sfi["error_msg"] = str(sfi_vim["fault"]) 

4206 except vimconn.VimConnNotFoundException as e: 

4207 self.logger.error("Exception getting sfi status: %s", str(e)) 

4208 sfi["status"] = "DELETED" 

4209 sfi["error_msg"] = str(e) 

4210 except vimconn.VimConnException as e: 

4211 self.logger.error("Exception getting sfi status: %s", str(e)) 

4212 sfi["status"] = "VIM_ERROR" 

4213 sfi["error_msg"] = str(e) 

4214 

4215 sfi_dict[sfi_id] = sfi 

4216 

4217 return sfi_dict 

4218 

4219 def refresh_sfs_status(self, sf_list): 

4220 """Get the status of the service functions 

4221 Params: the list of sf identifiers 

4222 Returns a dictionary with: 

4223 vm_id: #VIM id of this service function 

4224 status: #Mandatory. Text with one of: 

4225 # DELETED (not found at vim) 

4226 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 

4227 # OTHER (Vim reported other status not understood) 

4228 # ERROR (VIM indicates an ERROR status) 

4229 # ACTIVE, 

4230 # CREATING (on building process) 

4231 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR 

4232 vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 

4233 """ 

4234 sf_dict = {} 

4235 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM") 

4236 

4237 for sf_id in sf_list: 

4238 sf = {} 

4239 

4240 try: 

4241 sf_vim = self.get_sf(sf_id) 

4242 

4243 if sf_vim: 

4244 sf["status"] = vmStatus2manoFormat["ACTIVE"] 

4245 else: 

4246 sf["status"] = "OTHER" 

4247 sf["error_msg"] = "VIM status reported " + sf_vim["status"] 

4248 

4249 sf["vim_info"] = self.serialize(sf_vim) 

4250 

4251 if sf_vim.get("fault"): 

4252 sf["error_msg"] = str(sf_vim["fault"]) 

4253 except vimconn.VimConnNotFoundException as e: 

4254 self.logger.error("Exception getting sf status: %s", str(e)) 

4255 sf["status"] = "DELETED" 

4256 sf["error_msg"] = str(e) 

4257 except vimconn.VimConnException as e: 

4258 self.logger.error("Exception getting sf status: %s", str(e)) 

4259 sf["status"] = "VIM_ERROR" 

4260 sf["error_msg"] = str(e) 

4261 

4262 sf_dict[sf_id] = sf 

4263 

4264 return sf_dict 

4265 

4266 def refresh_classifications_status(self, classification_list): 

4267 """Get the status of the classifications 

4268 Params: the list of classification identifiers 

4269 Returns a dictionary with: 

4270 vm_id: #VIM id of this classifier 

4271 status: #Mandatory. Text with one of: 

4272 # DELETED (not found at vim) 

4273 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 

4274 # OTHER (Vim reported other status not understood) 

4275 # ERROR (VIM indicates an ERROR status) 

4276 # ACTIVE, 

4277 # CREATING (on building process) 

4278 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR 

4279 vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 

4280 """ 

4281 classification_dict = {} 

4282 self.logger.debug( 

4283 "refresh_classifications status: Getting tenant classification information from VIM" 

4284 ) 

4285 

4286 for classification_id in classification_list: 

4287 classification = {} 

4288 

4289 try: 

4290 classification_vim = self.get_classification(classification_id) 

4291 

4292 if classification_vim: 

4293 classification["status"] = vmStatus2manoFormat["ACTIVE"] 

4294 else: 

4295 classification["status"] = "OTHER" 

4296 classification["error_msg"] = ( 

4297 "VIM status reported " + classification["status"] 

4298 ) 

4299 

4300 classification["vim_info"] = self.serialize(classification_vim) 

4301 

4302 if classification_vim.get("fault"): 

4303 classification["error_msg"] = str(classification_vim["fault"]) 

4304 except vimconn.VimConnNotFoundException as e: 

4305 self.logger.error("Exception getting classification status: %s", str(e)) 

4306 classification["status"] = "DELETED" 

4307 classification["error_msg"] = str(e) 

4308 except vimconn.VimConnException as e: 

4309 self.logger.error("Exception getting classification status: %s", str(e)) 

4310 classification["status"] = "VIM_ERROR" 

4311 classification["error_msg"] = str(e) 

4312 

4313 classification_dict[classification_id] = classification 

4314 

4315 return classification_dict 

4316 

4317 @catch_any_exception 

4318 def new_affinity_group(self, affinity_group_data): 

4319 """Adds a server group to VIM 

4320 affinity_group_data contains a dictionary with information, keys: 

4321 name: name in VIM for the server group 

4322 type: affinity or anti-affinity 

4323 scope: Only nfvi-node allowed 

4324 Returns the server group identifier""" 

4325 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data)) 

4326 name = affinity_group_data["name"] 

4327 policy = affinity_group_data["type"] 

4328 self._reload_connection() 

4329 new_server_group = self.nova.server_groups.create(name, policy) 

4330 return new_server_group.id 

4331 

4332 @catch_any_exception 

4333 def get_affinity_group(self, affinity_group_id): 

4334 """Obtain server group details from the VIM. Returns the server group detais as a dict""" 

4335 self.logger.debug("Getting flavor '%s'", affinity_group_id) 

4336 self._reload_connection() 

4337 server_group = self.nova.server_groups.find(id=affinity_group_id) 

4338 return server_group.to_dict() 

4339 

4340 @catch_any_exception 

4341 def delete_affinity_group(self, affinity_group_id): 

4342 """Deletes a server group from the VIM. Returns the old affinity_group_id""" 

4343 self.logger.debug("Getting server group '%s'", affinity_group_id) 

4344 self._reload_connection() 

4345 self.nova.server_groups.delete(affinity_group_id) 

4346 return affinity_group_id 

4347 

4348 @catch_any_exception 

4349 def get_vdu_state(self, vm_id, host_is_required=False) -> list: 

4350 """Getting the state of a VDU. 

4351 Args: 

4352 vm_id (str): ID of an instance 

4353 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict 

4354 and if this is set to True, it raises KeyError. 

4355 Returns: 

4356 vdu_data (list): VDU details including state, flavor, host_info, AZ 

4357 """ 

4358 self.logger.debug("Getting the status of VM") 

4359 self.logger.debug("VIM VM ID %s", vm_id) 

4360 self._reload_connection() 

4361 server_dict = self._find_nova_server(vm_id) 

4362 srv_attr = "OS-EXT-SRV-ATTR:host" 

4363 host_info = ( 

4364 server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr) 

4365 ) 

4366 vdu_data = [ 

4367 server_dict["status"], 

4368 server_dict["flavor"]["id"], 

4369 host_info, 

4370 server_dict["OS-EXT-AZ:availability_zone"], 

4371 ] 

4372 self.logger.debug("vdu_data %s", vdu_data) 

4373 return vdu_data 

4374 

4375 def check_compute_availability(self, host, server_flavor_details): 

4376 self._reload_connection() 

4377 hypervisor_search = self.nova.hypervisors.search( 

4378 hypervisor_match=host, servers=True 

4379 ) 

4380 for hypervisor in hypervisor_search: 

4381 hypervisor_id = hypervisor.to_dict()["id"] 

4382 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id) 

4383 hypervisor_dict = hypervisor_details.to_dict() 

4384 hypervisor_temp = json.dumps(hypervisor_dict) 

4385 hypervisor_json = json.loads(hypervisor_temp) 

4386 resources_available = [ 

4387 hypervisor_json["free_ram_mb"], 

4388 hypervisor_json["disk_available_least"], 

4389 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"], 

4390 ] 

4391 compute_available = all( 

4392 x > y for x, y in zip(resources_available, server_flavor_details) 

4393 ) 

4394 if compute_available: 

4395 return host 

4396 

4397 def check_availability_zone( 

4398 self, old_az, server_flavor_details, old_host, host=None 

4399 ): 

4400 self._reload_connection() 

4401 az_check = {"zone_check": False, "compute_availability": None} 

4402 aggregates_list = self.nova.aggregates.list() 

4403 for aggregate in aggregates_list: 

4404 aggregate_details = aggregate.to_dict() 

4405 aggregate_temp = json.dumps(aggregate_details) 

4406 aggregate_json = json.loads(aggregate_temp) 

4407 if aggregate_json["availability_zone"] == old_az: 

4408 hosts_list = aggregate_json["hosts"] 

4409 if host is not None: 

4410 if host in hosts_list: 

4411 az_check["zone_check"] = True 

4412 available_compute_id = self.check_compute_availability( 

4413 host, server_flavor_details 

4414 ) 

4415 if available_compute_id is not None: 

4416 az_check["compute_availability"] = available_compute_id 

4417 else: 

4418 for check_host in hosts_list: 

4419 if check_host != old_host: 

4420 available_compute_id = self.check_compute_availability( 

4421 check_host, server_flavor_details 

4422 ) 

4423 if available_compute_id is not None: 

4424 az_check["zone_check"] = True 

4425 az_check["compute_availability"] = available_compute_id 

4426 break 

4427 else: 

4428 az_check["zone_check"] = True 

4429 return az_check 

4430 

4431 @catch_any_exception 

4432 def migrate_instance(self, vm_id, compute_host=None): 

4433 """ 

4434 Migrate a vdu 

4435 param: 

4436 vm_id: ID of an instance 

4437 compute_host: Host to migrate the vdu to 

4438 """ 

4439 self._reload_connection() 

4440 vm_state = False 

4441 instance_state = self.get_vdu_state(vm_id, host_is_required=True) 

4442 server_flavor_id = instance_state[1] 

4443 server_hypervisor_name = instance_state[2] 

4444 server_availability_zone = instance_state[3] 

4445 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict() 

4446 server_flavor_details = [ 

4447 server_flavor["ram"], 

4448 server_flavor["disk"], 

4449 server_flavor["vcpus"], 

4450 ] 

4451 if compute_host == server_hypervisor_name: 

4452 raise vimconn.VimConnException( 

4453 "Unable to migrate instance '{}' to the same host '{}'".format( 

4454 vm_id, compute_host 

4455 ), 

4456 http_code=vimconn.HTTP_Bad_Request, 

4457 ) 

4458 az_status = self.check_availability_zone( 

4459 server_availability_zone, 

4460 server_flavor_details, 

4461 server_hypervisor_name, 

4462 compute_host, 

4463 ) 

4464 availability_zone_check = az_status["zone_check"] 

4465 available_compute_id = az_status.get("compute_availability") 

4466 

4467 if availability_zone_check is False: 

4468 raise vimconn.VimConnException( 

4469 "Unable to migrate instance '{}' to a different availability zone".format( 

4470 vm_id 

4471 ), 

4472 http_code=vimconn.HTTP_Bad_Request, 

4473 ) 

4474 if available_compute_id is not None: 

4475 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25 

4476 self.nova.servers.live_migrate( 

4477 server=vm_id, 

4478 host=available_compute_id, 

4479 block_migration=True, 

4480 ) 

4481 state = "MIGRATING" 

4482 changed_compute_host = "" 

4483 if state == "MIGRATING": 

4484 vm_state = self.__wait_for_vm(vm_id, "ACTIVE") 

4485 changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[ 

4486 2 

4487 ] 

4488 if vm_state and changed_compute_host == available_compute_id: 

4489 self.logger.debug( 

4490 "Instance '{}' migrated to the new compute host '{}'".format( 

4491 vm_id, changed_compute_host 

4492 ) 

4493 ) 

4494 return state, available_compute_id 

4495 else: 

4496 raise vimconn.VimConnException( 

4497 "Migration Failed. Instance '{}' not moved to the new host {}".format( 

4498 vm_id, available_compute_id 

4499 ), 

4500 http_code=vimconn.HTTP_Bad_Request, 

4501 ) 

4502 else: 

4503 raise vimconn.VimConnException( 

4504 "Compute '{}' not available or does not have enough resources to migrate the instance".format( 

4505 available_compute_id 

4506 ), 

4507 http_code=vimconn.HTTP_Bad_Request, 

4508 ) 

4509 

4510 @catch_any_exception 

4511 def resize_instance(self, vm_id, new_flavor_id): 

4512 """ 

4513 For resizing the vm based on the given 

4514 flavor details 

4515 param: 

4516 vm_id : ID of an instance 

4517 new_flavor_id : Flavor id to be resized 

4518 Return the status of a resized instance 

4519 """ 

4520 self._reload_connection() 

4521 self.logger.debug("resize the flavor of an instance") 

4522 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id) 

4523 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"] 

4524 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"] 

4525 if instance_status == "ACTIVE" or instance_status == "SHUTOFF": 

4526 if old_flavor_disk > new_flavor_disk: 

4527 raise nvExceptions.BadRequest( 

4528 400, 

4529 message="Server disk resize failed. Resize to lower disk flavor is not allowed", 

4530 ) 

4531 else: 

4532 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id) 

4533 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE") 

4534 if vm_state: 

4535 instance_resized_status = self.confirm_resize( 

4536 vm_id, instance_status 

4537 ) 

4538 return instance_resized_status 

4539 else: 

4540 raise nvExceptions.BadRequest( 

4541 409, 

4542 message="Cannot 'resize' vm_state is in ERROR", 

4543 ) 

4544 

4545 else: 

4546 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state") 

4547 raise nvExceptions.BadRequest( 

4548 409, 

4549 message="Cannot 'resize' instance while it is in vm_state resized", 

4550 ) 

4551 

4552 def confirm_resize(self, vm_id, instance_state): 

4553 """ 

4554 Confirm the resize of an instance 

4555 param: 

4556 vm_id: ID of an instance 

4557 """ 

4558 self._reload_connection() 

4559 self.nova.servers.confirm_resize(server=vm_id) 

4560 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE": 

4561 self.__wait_for_vm(vm_id, instance_state) 

4562 instance_status = self.get_vdu_state(vm_id)[0] 

4563 return instance_status 

4564 

4565 def get_monitoring_data(self): 

4566 try: 

4567 self.logger.debug("Getting servers and ports data from Openstack VIMs.") 

4568 self._reload_connection() 

4569 all_servers = self.nova.servers.list(detailed=True) 

4570 try: 

4571 for server in all_servers: 

4572 if server.flavor.get("original_name"): 

4573 server.flavor["id"] = self.nova.flavors.find( 

4574 name=server.flavor["original_name"] 

4575 ).id 

4576 except nClient.exceptions.NotFound as e: 

4577 self.logger.warning(str(e.message)) 

4578 all_ports = self.neutron.list_ports() 

4579 return all_servers, all_ports 

4580 except Exception as e: 

4581 raise vimconn.VimConnException( 

4582 f"Exception in monitoring while getting VMs and ports status: {str(e)}" 

4583 )