Coverage for n2vc/k8s_helm_base_conn.py: 56%

819 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-05-07 06:04 +0000

1## 

2# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U. 

3# This file is part of OSM 

4# All Rights Reserved. 

5# 

6# Licensed under the Apache License, Version 2.0 (the "License"); 

7# you may not use this file except in compliance with the License. 

8# You may obtain a copy of the License at 

9# 

10# http://www.apache.org/licenses/LICENSE-2.0 

11# 

12# Unless required by applicable law or agreed to in writing, software 

13# distributed under the License is distributed on an "AS IS" BASIS, 

14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 

15# implied. 

16# See the License for the specific language governing permissions and 

17# limitations under the License. 

18# 

19# For those usages not covered by the Apache License, Version 2.0 please 

20# contact with: nfvlabs@tid.es 

21## 

22import abc 

23import asyncio 

24from typing import Union 

25from shlex import quote 

26import random 

27import time 

28import shlex 

29import shutil 

30import stat 

31import os 

32import yaml 

33from uuid import uuid4 

34from urllib.parse import urlparse 

35 

36from n2vc.config import EnvironConfig 

37from n2vc.exceptions import K8sException 

38from n2vc.k8s_conn import K8sConnector 

39from n2vc.kubectl import Kubectl 

40 

41 

42class K8sHelmBaseConnector(K8sConnector): 

43 

44 """ 

45 #################################################################################### 

46 ################################### P U B L I C #################################### 

47 #################################################################################### 

48 """ 

49 

50 service_account = "osm" 

51 

52 def __init__( 

53 self, 

54 fs: object, 

55 db: object, 

56 kubectl_command: str = "/usr/bin/kubectl", 

57 helm_command: str = "/usr/bin/helm", 

58 log: object = None, 

59 on_update_db=None, 

60 ): 

61 """ 

62 

63 :param fs: file system for kubernetes and helm configuration 

64 :param db: database object to write current operation status 

65 :param kubectl_command: path to kubectl executable 

66 :param helm_command: path to helm executable 

67 :param log: logger 

68 :param on_update_db: callback called when k8s connector updates database 

69 """ 

70 

71 # parent class 

72 K8sConnector.__init__(self, db=db, log=log, on_update_db=on_update_db) 

73 

74 self.log.info("Initializing K8S Helm connector") 

75 

76 self.config = EnvironConfig() 

77 # random numbers for release name generation 

78 random.seed(time.time()) 

79 

80 # the file system 

81 self.fs = fs 

82 

83 # exception if kubectl is not installed 

84 self.kubectl_command = kubectl_command 

85 self._check_file_exists(filename=kubectl_command, exception_if_not_exists=True) 

86 

87 # exception if helm is not installed 

88 self._helm_command = helm_command 

89 self._check_file_exists(filename=helm_command, exception_if_not_exists=True) 

90 

91 # exception if main post renderer executable is not present 

92 self.main_post_renderer_path = EnvironConfig(prefixes=["OSMLCM_"]).get( 

93 "mainpostrendererpath" 

94 ) 

95 if self.main_post_renderer_path: 

96 self._check_file_exists( 

97 filename=self.main_post_renderer_path, exception_if_not_exists=True 

98 ) 

99 

100 # exception if podLabels post renderer executable is not present 

101 self.podLabels_post_renderer_path = EnvironConfig(prefixes=["OSMLCM_"]).get( 

102 "podlabelspostrendererpath" 

103 ) 

104 if self.podLabels_post_renderer_path: 

105 self._check_file_exists( 

106 filename=self.podLabels_post_renderer_path, exception_if_not_exists=True 

107 ) 

108 

109 # exception if nodeSelector post renderer executable is not present 

110 self.nodeSelector_post_renderer_path = EnvironConfig(prefixes=["OSMLCM_"]).get( 

111 "nodeselectorpostrendererpath" 

112 ) 

113 if self.nodeSelector_post_renderer_path: 

114 self._check_file_exists( 

115 filename=self.nodeSelector_post_renderer_path, 

116 exception_if_not_exists=True, 

117 ) 

118 

119 # obtain stable repo url from config or apply default 

120 self._stable_repo_url = self.config.get("stablerepourl") 

121 if self._stable_repo_url == "None": 

122 self._stable_repo_url = None 

123 

124 # Lock to avoid concurrent execution of helm commands 

125 self.cmd_lock = asyncio.Lock() 

126 

127 def _get_namespace(self, cluster_uuid: str) -> str: 

128 """ 

129 Obtains the namespace used by the cluster with the uuid passed by argument 

130 

131 param: cluster_uuid: cluster's uuid 

132 """ 

133 

134 # first, obtain the cluster corresponding to the uuid passed by argument 

135 k8scluster = self.db.get_one( 

136 "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False 

137 ) 

138 return k8scluster.get("namespace") 

139 

140 async def init_env( 

141 self, 

142 k8s_creds: str, 

143 namespace: str = "kube-system", 

144 reuse_cluster_uuid=None, 

145 **kwargs, 

146 ) -> tuple[str, bool]: 

147 """ 

148 It prepares a given K8s cluster environment to run Charts 

149 

150 :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid 

151 '.kube/config' 

152 :param namespace: optional namespace to be used for helm. By default, 

153 'kube-system' will be used 

154 :param reuse_cluster_uuid: existing cluster uuid for reuse 

155 :param kwargs: Additional parameters (None yet) 

156 :return: uuid of the K8s cluster and True if connector has installed some 

157 software in the cluster 

158 (on error, an exception will be raised) 

159 """ 

160 

161 if reuse_cluster_uuid: 

162 cluster_id = reuse_cluster_uuid 

163 else: 

164 cluster_id = str(uuid4()) 

165 

166 self.log.debug( 

167 "Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace) 

168 ) 

169 

170 paths, env = self._init_paths_env( 

171 cluster_name=cluster_id, create_if_not_exist=True 

172 ) 

173 mode = stat.S_IRUSR | stat.S_IWUSR 

174 with open(paths["kube_config"], "w", mode) as f: 

175 f.write(k8s_creds) 

176 os.chmod(paths["kube_config"], 0o600) 

177 

178 # Code with initialization specific of helm version 

179 n2vc_installed_sw = await self._cluster_init(cluster_id, namespace, paths, env) 

180 

181 # sync fs with local data 

182 self.fs.reverse_sync(from_path=cluster_id) 

183 

184 self.log.info("Cluster {} initialized".format(cluster_id)) 

185 

186 return cluster_id, n2vc_installed_sw 

187 

188 async def repo_add( 

189 self, 

190 cluster_uuid: str, 

191 name: str, 

192 url: str, 

193 repo_type: str = "chart", 

194 cert: str = None, 

195 user: str = None, 

196 password: str = None, 

197 oci: bool = False, 

198 ): 

199 self.log.debug( 

200 "Cluster {}, adding {} repository {}. URL: {}".format( 

201 cluster_uuid, repo_type, name, url 

202 ) 

203 ) 

204 

205 # init_env 

206 paths, env = self._init_paths_env( 

207 cluster_name=cluster_uuid, create_if_not_exist=True 

208 ) 

209 

210 # sync local dir 

211 self.fs.sync(from_path=cluster_uuid) 

212 

213 if oci: 

214 if user and password: 

215 host_port = urlparse(url).netloc if url.startswith("oci://") else url 

216 # helm registry login url 

217 command = "env KUBECONFIG={} {} registry login {}".format( 

218 paths["kube_config"], self._helm_command, quote(host_port) 

219 ) 

220 else: 

221 self.log.debug( 

222 "OCI registry login is not needed for repo: {}".format(name) 

223 ) 

224 return 

225 else: 

226 # helm repo add name url 

227 command = "env KUBECONFIG={} {} repo add {} {}".format( 

228 paths["kube_config"], self._helm_command, quote(name), quote(url) 

229 ) 

230 

231 if cert: 

232 temp_cert_file = os.path.join( 

233 self.fs.path, "{}/helmcerts/".format(cluster_uuid), "temp.crt" 

234 ) 

235 os.makedirs(os.path.dirname(temp_cert_file), exist_ok=True) 

236 with open(temp_cert_file, "w") as the_cert: 

237 the_cert.write(cert) 

238 command += " --ca-file {}".format(quote(temp_cert_file)) 

239 

240 if user: 

241 command += " --username={}".format(quote(user)) 

242 

243 if password: 

244 command += " --password={}".format(quote(password)) 

245 

246 self.log.debug("adding repo: {}".format(command)) 

247 await self._local_async_exec( 

248 command=command, raise_exception_on_error=True, env=env 

249 ) 

250 

251 if not oci: 

252 # helm repo update 

253 command = "env KUBECONFIG={} {} repo update {}".format( 

254 paths["kube_config"], self._helm_command, quote(name) 

255 ) 

256 self.log.debug("updating repo: {}".format(command)) 

257 await self._local_async_exec( 

258 command=command, raise_exception_on_error=False, env=env 

259 ) 

260 

261 # sync fs 

262 self.fs.reverse_sync(from_path=cluster_uuid) 

263 

264 async def repo_update(self, cluster_uuid: str, name: str, repo_type: str = "chart"): 

265 self.log.debug( 

266 "Cluster {}, updating {} repository {}".format( 

267 cluster_uuid, repo_type, name 

268 ) 

269 ) 

270 

271 # init_env 

272 paths, env = self._init_paths_env( 

273 cluster_name=cluster_uuid, create_if_not_exist=True 

274 ) 

275 

276 # sync local dir 

277 self.fs.sync(from_path=cluster_uuid) 

278 

279 # helm repo update 

280 command = "{} repo update {}".format(self._helm_command, quote(name)) 

281 self.log.debug("updating repo: {}".format(command)) 

282 await self._local_async_exec( 

283 command=command, raise_exception_on_error=False, env=env 

284 ) 

285 

286 # sync fs 

287 self.fs.reverse_sync(from_path=cluster_uuid) 

288 

289 async def repo_list(self, cluster_uuid: str) -> list: 

290 """ 

291 Get the list of registered repositories 

292 

293 :return: list of registered repositories: [ (name, url) .... ] 

294 """ 

295 

296 self.log.debug("list repositories for cluster {}".format(cluster_uuid)) 

297 

298 # config filename 

299 paths, env = self._init_paths_env( 

300 cluster_name=cluster_uuid, create_if_not_exist=True 

301 ) 

302 

303 # sync local dir 

304 self.fs.sync(from_path=cluster_uuid) 

305 

306 command = "env KUBECONFIG={} {} repo list --output yaml".format( 

307 paths["kube_config"], self._helm_command 

308 ) 

309 

310 # Set exception to false because if there are no repos just want an empty list 

311 output, _rc = await self._local_async_exec( 

312 command=command, raise_exception_on_error=False, env=env 

313 ) 

314 

315 # sync fs 

316 self.fs.reverse_sync(from_path=cluster_uuid) 

317 

318 if _rc == 0: 

319 if output and len(output) > 0: 

320 repos = yaml.load(output, Loader=yaml.SafeLoader) 

321 # unify format between helm2 and helm3 setting all keys lowercase 

322 return self._lower_keys_list(repos) 

323 else: 

324 return [] 

325 else: 

326 return [] 

327 

328 async def repo_remove(self, cluster_uuid: str, name: str): 

329 self.log.debug( 

330 "remove {} repositories for cluster {}".format(name, cluster_uuid) 

331 ) 

332 

333 # init env, paths 

334 paths, env = self._init_paths_env( 

335 cluster_name=cluster_uuid, create_if_not_exist=True 

336 ) 

337 

338 # sync local dir 

339 self.fs.sync(from_path=cluster_uuid) 

340 

341 command = "env KUBECONFIG={} {} repo remove {}".format( 

342 paths["kube_config"], self._helm_command, quote(name) 

343 ) 

344 await self._local_async_exec( 

345 command=command, raise_exception_on_error=True, env=env 

346 ) 

347 

348 # sync fs 

349 self.fs.reverse_sync(from_path=cluster_uuid) 

350 

351 async def reset( 

352 self, 

353 cluster_uuid: str, 

354 force: bool = False, 

355 uninstall_sw: bool = False, 

356 **kwargs, 

357 ) -> bool: 

358 """Reset a cluster 

359 

360 Resets the Kubernetes cluster by removing the helm deployment that represents it. 

361 

362 :param cluster_uuid: The UUID of the cluster to reset 

363 :param force: Boolean to force the reset 

364 :param uninstall_sw: Boolean to force the reset 

365 :param kwargs: Additional parameters (None yet) 

366 :return: Returns True if successful or raises an exception. 

367 """ 

368 namespace = self._get_namespace(cluster_uuid=cluster_uuid) 

369 self.log.debug( 

370 "Resetting K8s environment. cluster uuid: {} uninstall={}".format( 

371 cluster_uuid, uninstall_sw 

372 ) 

373 ) 

374 

375 # sync local dir 

376 self.fs.sync(from_path=cluster_uuid) 

377 

378 # uninstall releases if needed. 

379 if uninstall_sw: 

380 releases = await self.instances_list(cluster_uuid=cluster_uuid) 

381 if len(releases) > 0: 

382 if force: 

383 for r in releases: 

384 try: 

385 kdu_instance = r.get("name") 

386 chart = r.get("chart") 

387 self.log.debug( 

388 "Uninstalling {} -> {}".format(chart, kdu_instance) 

389 ) 

390 await self.uninstall( 

391 cluster_uuid=cluster_uuid, kdu_instance=kdu_instance 

392 ) 

393 except Exception as e: 

394 # will not raise exception as it was found 

395 # that in some cases of previously installed helm releases it 

396 # raised an error 

397 self.log.warn( 

398 "Error uninstalling release {}: {}".format( 

399 kdu_instance, e 

400 ) 

401 ) 

402 else: 

403 msg = ( 

404 "Cluster uuid: {} has releases and not force. Leaving K8s helm environment" 

405 ).format(cluster_uuid) 

406 self.log.warn(msg) 

407 uninstall_sw = ( 

408 False # Allow to remove k8s cluster without removing Tiller 

409 ) 

410 

411 if uninstall_sw: 

412 await self._uninstall_sw(cluster_id=cluster_uuid, namespace=namespace) 

413 

414 # delete cluster directory 

415 self.log.debug("Removing directory {}".format(cluster_uuid)) 

416 self.fs.file_delete(cluster_uuid, ignore_non_exist=True) 

417 # Remove also local directorio if still exist 

418 direct = self.fs.path + "/" + cluster_uuid 

419 shutil.rmtree(direct, ignore_errors=True) 

420 

421 return True 

422 

423 def _is_helm_chart_a_file(self, chart_name: str): 

424 return chart_name.count("/") > 1 

425 

426 @staticmethod 

427 def _is_helm_chart_a_url(chart_name: str): 

428 result = urlparse(chart_name) 

429 return all([result.scheme, result.netloc]) 

430 

431 async def _install_impl( 

432 self, 

433 cluster_id: str, 

434 kdu_model: str, 

435 paths: dict, 

436 env: dict, 

437 kdu_instance: str, 

438 atomic: bool = True, 

439 timeout: float = 300, 

440 params: dict = None, 

441 db_dict: dict = None, 

442 labels: dict = None, 

443 kdu_name: str = None, 

444 namespace: str = None, 

445 ): 

446 # init env, paths 

447 paths, env = self._init_paths_env( 

448 cluster_name=cluster_id, create_if_not_exist=True 

449 ) 

450 

451 # params to str 

452 params_str, file_to_delete = self._params_to_file_option( 

453 cluster_id=cluster_id, params=params 

454 ) 

455 

456 kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_id) 

457 

458 command = self._get_install_command( 

459 kdu_model, 

460 kdu_instance, 

461 namespace, 

462 labels, 

463 params_str, 

464 version, 

465 atomic, 

466 timeout, 

467 paths["kube_config"], 

468 ) 

469 

470 self.log.debug("installing: {}".format(command)) 

471 

472 if atomic: 

473 # exec helm in a task 

474 exec_task = asyncio.ensure_future( 

475 coro_or_future=self._local_async_exec( 

476 command=command, raise_exception_on_error=False, env=env 

477 ) 

478 ) 

479 

480 # write status in another task 

481 status_task = asyncio.ensure_future( 

482 coro_or_future=self._store_status( 

483 cluster_id=cluster_id, 

484 kdu_instance=kdu_instance, 

485 namespace=namespace, 

486 db_dict=db_dict, 

487 operation="install", 

488 ) 

489 ) 

490 

491 # wait for execution task 

492 await asyncio.wait([exec_task]) 

493 

494 # cancel status task 

495 status_task.cancel() 

496 

497 output, rc = exec_task.result() 

498 

499 else: 

500 output, rc = await self._local_async_exec( 

501 command=command, raise_exception_on_error=False, env=env 

502 ) 

503 

504 # remove temporal values yaml file 

505 if file_to_delete: 

506 os.remove(file_to_delete) 

507 

508 # write final status 

509 await self._store_status( 

510 cluster_id=cluster_id, 

511 kdu_instance=kdu_instance, 

512 namespace=namespace, 

513 db_dict=db_dict, 

514 operation="install", 

515 ) 

516 

517 if rc != 0: 

518 msg = "Error executing command: {}\nOutput: {}".format(command, output) 

519 self.log.error(msg) 

520 raise K8sException(msg) 

521 

522 async def upgrade( 

523 self, 

524 cluster_uuid: str, 

525 kdu_instance: str, 

526 kdu_model: str = None, 

527 atomic: bool = True, 

528 timeout: float = 300, 

529 params: dict = None, 

530 db_dict: dict = None, 

531 namespace: str = None, 

532 targetHostK8sLabels: dict = None, 

533 reset_values: bool = False, 

534 reuse_values: bool = True, 

535 reset_then_reuse_values: bool = False, 

536 force: bool = False, 

537 ): 

538 self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid)) 

539 

540 # sync local dir 

541 self.fs.sync(from_path=cluster_uuid) 

542 

543 # look for instance to obtain namespace 

544 

545 # set namespace 

546 if not namespace: 

547 instance_info = await self.get_instance_info(cluster_uuid, kdu_instance) 

548 if not instance_info: 

549 raise K8sException("kdu_instance {} not found".format(kdu_instance)) 

550 namespace = instance_info["namespace"] 

551 

552 # init env, paths 

553 paths, env = self._init_paths_env( 

554 cluster_name=cluster_uuid, create_if_not_exist=True 

555 ) 

556 

557 # sync local dir 

558 self.fs.sync(from_path=cluster_uuid) 

559 

560 # params to str 

561 params_str, file_to_delete = self._params_to_file_option( 

562 cluster_id=cluster_uuid, params=params 

563 ) 

564 

565 kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid) 

566 

567 labels_dict = None 

568 if db_dict and await self._contains_labels( 

569 kdu_instance, namespace, paths["kube_config"], env 

570 ): 

571 labels_dict = await self._labels_dict(db_dict, kdu_instance) 

572 

573 command = self._get_upgrade_command( 

574 kdu_model, 

575 kdu_instance, 

576 namespace, 

577 params_str, 

578 labels_dict, 

579 version, 

580 atomic, 

581 timeout, 

582 paths["kube_config"], 

583 targetHostK8sLabels, 

584 reset_values, 

585 reuse_values, 

586 reset_then_reuse_values, 

587 force, 

588 ) 

589 

590 self.log.debug("upgrading: {}".format(command)) 

591 

592 if atomic: 

593 # exec helm in a task 

594 exec_task = asyncio.ensure_future( 

595 coro_or_future=self._local_async_exec( 

596 command=command, raise_exception_on_error=False, env=env 

597 ) 

598 ) 

599 # write status in another task 

600 status_task = asyncio.ensure_future( 

601 coro_or_future=self._store_status( 

602 cluster_id=cluster_uuid, 

603 kdu_instance=kdu_instance, 

604 namespace=namespace, 

605 db_dict=db_dict, 

606 operation="upgrade", 

607 ) 

608 ) 

609 

610 # wait for execution task 

611 await asyncio.wait([exec_task]) 

612 

613 # cancel status task 

614 status_task.cancel() 

615 output, rc = exec_task.result() 

616 

617 else: 

618 output, rc = await self._local_async_exec( 

619 command=command, raise_exception_on_error=False, env=env 

620 ) 

621 

622 # remove temporal values yaml file 

623 if file_to_delete: 

624 os.remove(file_to_delete) 

625 

626 # write final status 

627 await self._store_status( 

628 cluster_id=cluster_uuid, 

629 kdu_instance=kdu_instance, 

630 namespace=namespace, 

631 db_dict=db_dict, 

632 operation="upgrade", 

633 ) 

634 

635 if rc != 0: 

636 msg = "Error executing command: {}\nOutput: {}".format(command, output) 

637 self.log.error(msg) 

638 raise K8sException(msg) 

639 

640 # sync fs 

641 self.fs.reverse_sync(from_path=cluster_uuid) 

642 

643 # return new revision number 

644 instance = await self.get_instance_info( 

645 cluster_uuid=cluster_uuid, kdu_instance=kdu_instance 

646 ) 

647 if instance: 

648 revision = int(instance.get("revision")) 

649 self.log.debug("New revision: {}".format(revision)) 

650 return revision 

651 else: 

652 return 0 

653 

654 async def scale( 

655 self, 

656 kdu_instance: str, 

657 scale: int, 

658 resource_name: str, 

659 total_timeout: float = 1800, 

660 cluster_uuid: str = None, 

661 kdu_model: str = None, 

662 atomic: bool = True, 

663 db_dict: dict = None, 

664 **kwargs, 

665 ): 

666 """Scale a resource in a Helm Chart. 

667 

668 Args: 

669 kdu_instance: KDU instance name 

670 scale: Scale to which to set the resource 

671 resource_name: Resource name 

672 total_timeout: The time, in seconds, to wait 

673 cluster_uuid: The UUID of the cluster 

674 kdu_model: The chart reference 

675 atomic: if set, upgrade process rolls back changes made in case of failed upgrade. 

676 The --wait flag will be set automatically if --atomic is used 

677 db_dict: Dictionary for any additional data 

678 kwargs: Additional parameters 

679 

680 Returns: 

681 True if successful, False otherwise 

682 """ 

683 

684 debug_mgs = "scaling {} in cluster {}".format(kdu_model, cluster_uuid) 

685 if resource_name: 

686 debug_mgs = "scaling resource {} in model {} (cluster {})".format( 

687 resource_name, kdu_model, cluster_uuid 

688 ) 

689 

690 self.log.debug(debug_mgs) 

691 

692 # look for instance to obtain namespace 

693 # get_instance_info function calls the sync command 

694 instance_info = await self.get_instance_info(cluster_uuid, kdu_instance) 

695 if not instance_info: 

696 raise K8sException("kdu_instance {} not found".format(kdu_instance)) 

697 

698 # init env, paths 

699 paths, env = self._init_paths_env( 

700 cluster_name=cluster_uuid, create_if_not_exist=True 

701 ) 

702 

703 # version 

704 kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid) 

705 

706 repo_url = await self._find_repo(kdu_model, cluster_uuid) 

707 

708 _, replica_str = await self._get_replica_count_url( 

709 kdu_model, repo_url, resource_name 

710 ) 

711 

712 labels_dict = None 

713 if db_dict and await self._contains_labels( 

714 kdu_instance, instance_info["namespace"], paths["kube_config"], env 

715 ): 

716 labels_dict = await self._labels_dict(db_dict, kdu_instance) 

717 

718 command = self._get_upgrade_scale_command( 

719 kdu_model, 

720 kdu_instance, 

721 instance_info["namespace"], 

722 scale, 

723 labels_dict, 

724 version, 

725 atomic, 

726 replica_str, 

727 total_timeout, 

728 resource_name, 

729 paths["kube_config"], 

730 ) 

731 

732 self.log.debug("scaling: {}".format(command)) 

733 

734 if atomic: 

735 # exec helm in a task 

736 exec_task = asyncio.ensure_future( 

737 coro_or_future=self._local_async_exec( 

738 command=command, raise_exception_on_error=False, env=env 

739 ) 

740 ) 

741 # write status in another task 

742 status_task = asyncio.ensure_future( 

743 coro_or_future=self._store_status( 

744 cluster_id=cluster_uuid, 

745 kdu_instance=kdu_instance, 

746 namespace=instance_info["namespace"], 

747 db_dict=db_dict, 

748 operation="scale", 

749 ) 

750 ) 

751 

752 # wait for execution task 

753 await asyncio.wait([exec_task]) 

754 

755 # cancel status task 

756 status_task.cancel() 

757 output, rc = exec_task.result() 

758 

759 else: 

760 output, rc = await self._local_async_exec( 

761 command=command, raise_exception_on_error=False, env=env 

762 ) 

763 

764 # write final status 

765 await self._store_status( 

766 cluster_id=cluster_uuid, 

767 kdu_instance=kdu_instance, 

768 namespace=instance_info["namespace"], 

769 db_dict=db_dict, 

770 operation="scale", 

771 ) 

772 

773 if rc != 0: 

774 msg = "Error executing command: {}\nOutput: {}".format(command, output) 

775 self.log.error(msg) 

776 raise K8sException(msg) 

777 

778 # sync fs 

779 self.fs.reverse_sync(from_path=cluster_uuid) 

780 

781 return True 

782 

783 async def get_scale_count( 

784 self, 

785 resource_name: str, 

786 kdu_instance: str, 

787 cluster_uuid: str, 

788 kdu_model: str, 

789 **kwargs, 

790 ) -> int: 

791 """Get a resource scale count. 

792 

793 Args: 

794 cluster_uuid: The UUID of the cluster 

795 resource_name: Resource name 

796 kdu_instance: KDU instance name 

797 kdu_model: The name or path of an Helm Chart 

798 kwargs: Additional parameters 

799 

800 Returns: 

801 Resource instance count 

802 """ 

803 

804 self.log.debug( 

805 "getting scale count for {} in cluster {}".format(kdu_model, cluster_uuid) 

806 ) 

807 

808 # look for instance to obtain namespace 

809 instance_info = await self.get_instance_info(cluster_uuid, kdu_instance) 

810 if not instance_info: 

811 raise K8sException("kdu_instance {} not found".format(kdu_instance)) 

812 

813 # init env, paths 

814 paths, _ = self._init_paths_env( 

815 cluster_name=cluster_uuid, create_if_not_exist=True 

816 ) 

817 

818 replicas = await self._get_replica_count_instance( 

819 kdu_instance=kdu_instance, 

820 namespace=instance_info["namespace"], 

821 kubeconfig=paths["kube_config"], 

822 resource_name=resource_name, 

823 ) 

824 

825 self.log.debug( 

826 f"Number of replicas of the KDU instance {kdu_instance} and resource {resource_name} obtained: {replicas}" 

827 ) 

828 

829 # Get default value if scale count is not found from provided values 

830 # Important note: this piece of code shall only be executed in the first scaling operation, 

831 # since it is expected that the _get_replica_count_instance is able to obtain the number of 

832 # replicas when a scale operation was already conducted previously for this KDU/resource! 

833 if replicas is None: 

834 repo_url = await self._find_repo( 

835 kdu_model=kdu_model, cluster_uuid=cluster_uuid 

836 ) 

837 replicas, _ = await self._get_replica_count_url( 

838 kdu_model=kdu_model, repo_url=repo_url, resource_name=resource_name 

839 ) 

840 

841 self.log.debug( 

842 f"Number of replicas of the Helm Chart package for KDU instance {kdu_instance} and resource " 

843 f"{resource_name} obtained: {replicas}" 

844 ) 

845 

846 if replicas is None: 

847 msg = "Replica count not found. Cannot be scaled" 

848 self.log.error(msg) 

849 raise K8sException(msg) 

850 

851 return int(replicas) 

852 

853 async def rollback( 

854 self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None 

855 ): 

856 self.log.debug( 

857 "rollback kdu_instance {} to revision {} from cluster {}".format( 

858 kdu_instance, revision, cluster_uuid 

859 ) 

860 ) 

861 

862 # sync local dir 

863 self.fs.sync(from_path=cluster_uuid) 

864 

865 # look for instance to obtain namespace 

866 instance_info = await self.get_instance_info(cluster_uuid, kdu_instance) 

867 if not instance_info: 

868 raise K8sException("kdu_instance {} not found".format(kdu_instance)) 

869 

870 # init env, paths 

871 paths, env = self._init_paths_env( 

872 cluster_name=cluster_uuid, create_if_not_exist=True 

873 ) 

874 

875 # sync local dir 

876 self.fs.sync(from_path=cluster_uuid) 

877 

878 command = self._get_rollback_command( 

879 kdu_instance, instance_info["namespace"], revision, paths["kube_config"] 

880 ) 

881 

882 self.log.debug("rolling_back: {}".format(command)) 

883 

884 # exec helm in a task 

885 exec_task = asyncio.ensure_future( 

886 coro_or_future=self._local_async_exec( 

887 command=command, raise_exception_on_error=False, env=env 

888 ) 

889 ) 

890 # write status in another task 

891 status_task = asyncio.ensure_future( 

892 coro_or_future=self._store_status( 

893 cluster_id=cluster_uuid, 

894 kdu_instance=kdu_instance, 

895 namespace=instance_info["namespace"], 

896 db_dict=db_dict, 

897 operation="rollback", 

898 ) 

899 ) 

900 

901 # wait for execution task 

902 await asyncio.wait([exec_task]) 

903 

904 # cancel status task 

905 status_task.cancel() 

906 

907 output, rc = exec_task.result() 

908 

909 # write final status 

910 await self._store_status( 

911 cluster_id=cluster_uuid, 

912 kdu_instance=kdu_instance, 

913 namespace=instance_info["namespace"], 

914 db_dict=db_dict, 

915 operation="rollback", 

916 ) 

917 

918 if rc != 0: 

919 msg = "Error executing command: {}\nOutput: {}".format(command, output) 

920 self.log.error(msg) 

921 raise K8sException(msg) 

922 

923 # sync fs 

924 self.fs.reverse_sync(from_path=cluster_uuid) 

925 

926 # return new revision number 

927 instance = await self.get_instance_info( 

928 cluster_uuid=cluster_uuid, kdu_instance=kdu_instance 

929 ) 

930 if instance: 

931 revision = int(instance.get("revision")) 

932 self.log.debug("New revision: {}".format(revision)) 

933 return revision 

934 else: 

935 return 0 

936 

937 async def uninstall(self, cluster_uuid: str, kdu_instance: str, **kwargs): 

938 """ 

939 Removes an existing KDU instance. It would implicitly use the `delete` or 'uninstall' call 

940 (this call should happen after all _terminate-config-primitive_ of the VNF 

941 are invoked). 

942 

943 :param cluster_uuid: UUID of a K8s cluster known by OSM, or namespace:cluster_id 

944 :param kdu_instance: unique name for the KDU instance to be deleted 

945 :param kwargs: Additional parameters (None yet) 

946 :return: True if successful 

947 """ 

948 

949 self.log.debug( 

950 "uninstall kdu_instance {} from cluster {}".format( 

951 kdu_instance, cluster_uuid 

952 ) 

953 ) 

954 

955 # sync local dir 

956 self.fs.sync(from_path=cluster_uuid) 

957 

958 # look for instance to obtain namespace 

959 instance_info = await self.get_instance_info(cluster_uuid, kdu_instance) 

960 if not instance_info: 

961 self.log.warning(("kdu_instance {} not found".format(kdu_instance))) 

962 return True 

963 # init env, paths 

964 paths, env = self._init_paths_env( 

965 cluster_name=cluster_uuid, create_if_not_exist=True 

966 ) 

967 

968 # sync local dir 

969 self.fs.sync(from_path=cluster_uuid) 

970 

971 command = self._get_uninstall_command( 

972 kdu_instance, instance_info["namespace"], paths["kube_config"] 

973 ) 

974 output, _rc = await self._local_async_exec( 

975 command=command, raise_exception_on_error=True, env=env 

976 ) 

977 

978 # sync fs 

979 self.fs.reverse_sync(from_path=cluster_uuid) 

980 

981 return self._output_to_table(output) 

982 

983 async def instances_list(self, cluster_uuid: str) -> list: 

984 """ 

985 returns a list of deployed releases in a cluster 

986 

987 :param cluster_uuid: the 'cluster' or 'namespace:cluster' 

988 :return: 

989 """ 

990 

991 self.log.debug("list releases for cluster {}".format(cluster_uuid)) 

992 

993 # sync local dir 

994 self.fs.sync(from_path=cluster_uuid) 

995 

996 # execute internal command 

997 result = await self._instances_list(cluster_uuid) 

998 

999 # sync fs 

1000 self.fs.reverse_sync(from_path=cluster_uuid) 

1001 

1002 return result 

1003 

1004 async def get_instance_info(self, cluster_uuid: str, kdu_instance: str): 

1005 instances = await self.instances_list(cluster_uuid=cluster_uuid) 

1006 for instance in instances: 

1007 if instance.get("name") == kdu_instance: 

1008 return instance 

1009 self.log.debug("Instance {} not found".format(kdu_instance)) 

1010 return None 

1011 

1012 async def upgrade_charm( 

1013 self, 

1014 ee_id: str = None, 

1015 path: str = None, 

1016 charm_id: str = None, 

1017 charm_type: str = None, 

1018 timeout: float = None, 

1019 ) -> str: 

1020 """This method upgrade charms in VNFs 

1021 

1022 Args: 

1023 ee_id: Execution environment id 

1024 path: Local path to the charm 

1025 charm_id: charm-id 

1026 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm 

1027 timeout: (Float) Timeout for the ns update operation 

1028 

1029 Returns: 

1030 The output of the update operation if status equals to "completed" 

1031 """ 

1032 raise K8sException("KDUs deployed with Helm do not support charm upgrade") 

1033 

1034 async def exec_primitive( 

1035 self, 

1036 cluster_uuid: str = None, 

1037 kdu_instance: str = None, 

1038 primitive_name: str = None, 

1039 timeout: float = 300, 

1040 params: dict = None, 

1041 db_dict: dict = None, 

1042 **kwargs, 

1043 ) -> str: 

1044 """Exec primitive (Juju action) 

1045 

1046 :param cluster_uuid: The UUID of the cluster or namespace:cluster 

1047 :param kdu_instance: The unique name of the KDU instance 

1048 :param primitive_name: Name of action that will be executed 

1049 :param timeout: Timeout for action execution 

1050 :param params: Dictionary of all the parameters needed for the action 

1051 :db_dict: Dictionary for any additional data 

1052 :param kwargs: Additional parameters (None yet) 

1053 

1054 :return: Returns the output of the action 

1055 """ 

1056 raise K8sException( 

1057 "KDUs deployed with Helm don't support actions " 

1058 "different from rollback, upgrade and status" 

1059 ) 

1060 

1061 async def get_services( 

1062 self, cluster_uuid: str, kdu_instance: str, namespace: str 

1063 ) -> list: 

1064 """ 

1065 Returns a list of services defined for the specified kdu instance. 

1066 

1067 :param cluster_uuid: UUID of a K8s cluster known by OSM 

1068 :param kdu_instance: unique name for the KDU instance 

1069 :param namespace: K8s namespace used by the KDU instance 

1070 :return: If successful, it will return a list of services, Each service 

1071 can have the following data: 

1072 - `name` of the service 

1073 - `type` type of service in the k8 cluster 

1074 - `ports` List of ports offered by the service, for each port includes at least 

1075 name, port, protocol 

1076 - `cluster_ip` Internal ip to be used inside k8s cluster 

1077 - `external_ip` List of external ips (in case they are available) 

1078 """ 

1079 

1080 self.log.debug( 

1081 "get_services: cluster_uuid: {}, kdu_instance: {}".format( 

1082 cluster_uuid, kdu_instance 

1083 ) 

1084 ) 

1085 

1086 # init env, paths 

1087 paths, env = self._init_paths_env( 

1088 cluster_name=cluster_uuid, create_if_not_exist=True 

1089 ) 

1090 

1091 # sync local dir 

1092 self.fs.sync(from_path=cluster_uuid) 

1093 

1094 # get list of services names for kdu 

1095 service_names = await self._get_services( 

1096 cluster_uuid, kdu_instance, namespace, paths["kube_config"] 

1097 ) 

1098 

1099 service_list = [] 

1100 for service in service_names: 

1101 service = await self._get_service(cluster_uuid, service, namespace) 

1102 service_list.append(service) 

1103 

1104 # sync fs 

1105 self.fs.reverse_sync(from_path=cluster_uuid) 

1106 

1107 return service_list 

1108 

1109 async def get_service( 

1110 self, cluster_uuid: str, service_name: str, namespace: str 

1111 ) -> object: 

1112 self.log.debug( 

1113 "get service, service_name: {}, namespace: {}, cluster_uuid: {}".format( 

1114 service_name, namespace, cluster_uuid 

1115 ) 

1116 ) 

1117 

1118 # sync local dir 

1119 self.fs.sync(from_path=cluster_uuid) 

1120 

1121 service = await self._get_service(cluster_uuid, service_name, namespace) 

1122 

1123 # sync fs 

1124 self.fs.reverse_sync(from_path=cluster_uuid) 

1125 

1126 return service 

1127 

1128 async def status_kdu( 

1129 self, cluster_uuid: str, kdu_instance: str, yaml_format: str = False, **kwargs 

1130 ) -> Union[str, dict]: 

1131 """ 

1132 This call would retrieve tha current state of a given KDU instance. It would be 

1133 would allow to retrieve the _composition_ (i.e. K8s objects) and _specific 

1134 values_ of the configuration parameters applied to a given instance. This call 

1135 would be based on the `status` call. 

1136 

1137 :param cluster_uuid: UUID of a K8s cluster known by OSM 

1138 :param kdu_instance: unique name for the KDU instance 

1139 :param kwargs: Additional parameters (None yet) 

1140 :param yaml_format: if the return shall be returned as an YAML string or as a 

1141 dictionary 

1142 :return: If successful, it will return the following vector of arguments: 

1143 - K8s `namespace` in the cluster where the KDU lives 

1144 - `state` of the KDU instance. It can be: 

1145 - UNKNOWN 

1146 - DEPLOYED 

1147 - DELETED 

1148 - SUPERSEDED 

1149 - FAILED or 

1150 - DELETING 

1151 - List of `resources` (objects) that this release consists of, sorted by kind, 

1152 and the status of those resources 

1153 - Last `deployment_time`. 

1154 

1155 """ 

1156 self.log.debug( 

1157 "status_kdu: cluster_uuid: {}, kdu_instance: {}".format( 

1158 cluster_uuid, kdu_instance 

1159 ) 

1160 ) 

1161 

1162 # sync local dir 

1163 self.fs.sync(from_path=cluster_uuid) 

1164 

1165 # get instance: needed to obtain namespace 

1166 instances = await self._instances_list(cluster_id=cluster_uuid) 

1167 for instance in instances: 

1168 if instance.get("name") == kdu_instance: 

1169 break 

1170 else: 

1171 # instance does not exist 

1172 raise K8sException( 

1173 "Instance name: {} not found in cluster: {}".format( 

1174 kdu_instance, cluster_uuid 

1175 ) 

1176 ) 

1177 

1178 status = await self._status_kdu( 

1179 cluster_id=cluster_uuid, 

1180 kdu_instance=kdu_instance, 

1181 namespace=instance["namespace"], 

1182 yaml_format=yaml_format, 

1183 show_error_log=True, 

1184 ) 

1185 

1186 # sync fs 

1187 self.fs.reverse_sync(from_path=cluster_uuid) 

1188 

1189 return status 

1190 

1191 async def get_values_kdu( 

1192 self, kdu_instance: str, namespace: str, kubeconfig: str 

1193 ) -> str: 

1194 self.log.debug("get kdu_instance values {}".format(kdu_instance)) 

1195 

1196 return await self._exec_get_command( 

1197 get_command="values", 

1198 kdu_instance=kdu_instance, 

1199 namespace=namespace, 

1200 kubeconfig=kubeconfig, 

1201 ) 

1202 

1203 async def values_kdu(self, kdu_model: str, repo_url: str = None) -> str: 

1204 """Method to obtain the Helm Chart package's values 

1205 

1206 Args: 

1207 kdu_model: The name or path of an Helm Chart 

1208 repo_url: Helm Chart repository url 

1209 

1210 Returns: 

1211 str: the values of the Helm Chart package 

1212 """ 

1213 

1214 self.log.debug( 

1215 "inspect kdu_model values {} from (optional) repo: {}".format( 

1216 kdu_model, repo_url 

1217 ) 

1218 ) 

1219 

1220 return await self._exec_inspect_command( 

1221 inspect_command="values", kdu_model=kdu_model, repo_url=repo_url 

1222 ) 

1223 

1224 async def help_kdu(self, kdu_model: str, repo_url: str = None) -> str: 

1225 self.log.debug( 

1226 "inspect kdu_model {} readme.md from repo: {}".format(kdu_model, repo_url) 

1227 ) 

1228 

1229 return await self._exec_inspect_command( 

1230 inspect_command="readme", kdu_model=kdu_model, repo_url=repo_url 

1231 ) 

1232 

1233 async def synchronize_repos(self, cluster_uuid: str): 

1234 self.log.debug("synchronize repos for cluster helm-id: {}".format(cluster_uuid)) 

1235 try: 

1236 db_repo_ids = self._get_helm_chart_repos_ids(cluster_uuid) 

1237 db_repo_dict = self._get_db_repos_dict(db_repo_ids) 

1238 

1239 local_repo_list = await self.repo_list(cluster_uuid) 

1240 local_repo_dict = {repo["name"]: repo["url"] for repo in local_repo_list} 

1241 

1242 deleted_repo_list = [] 

1243 added_repo_dict = {} 

1244 

1245 # iterate over the list of repos in the database that should be 

1246 # added if not present 

1247 for repo_name, db_repo in db_repo_dict.items(): 

1248 try: 

1249 # check if it is already present 

1250 curr_repo_url = local_repo_dict.get(db_repo["name"]) 

1251 repo_id = db_repo.get("_id") 

1252 if curr_repo_url != db_repo["url"]: 

1253 if curr_repo_url: 

1254 self.log.debug( 

1255 "repo {} url changed, delete and and again".format( 

1256 db_repo["url"] 

1257 ) 

1258 ) 

1259 await self.repo_remove(cluster_uuid, db_repo["name"]) 

1260 deleted_repo_list.append(repo_id) 

1261 

1262 # add repo 

1263 self.log.debug("add repo {}".format(db_repo["name"])) 

1264 await self.repo_add( 

1265 cluster_uuid, 

1266 db_repo["name"], 

1267 db_repo["url"], 

1268 cert=db_repo.get("ca_cert"), 

1269 user=db_repo.get("user"), 

1270 password=db_repo.get("password"), 

1271 oci=db_repo.get("oci", False), 

1272 ) 

1273 added_repo_dict[repo_id] = db_repo["name"] 

1274 except Exception as e: 

1275 raise K8sException( 

1276 "Error adding repo id: {}, err_msg: {} ".format( 

1277 repo_id, repr(e) 

1278 ) 

1279 ) 

1280 

1281 # Delete repos that are present but not in nbi_list 

1282 for repo_name in local_repo_dict: 

1283 if not db_repo_dict.get(repo_name) and repo_name != "stable": 

1284 self.log.debug("delete repo {}".format(repo_name)) 

1285 try: 

1286 await self.repo_remove(cluster_uuid, repo_name) 

1287 deleted_repo_list.append(repo_name) 

1288 except Exception as e: 

1289 self.warning( 

1290 "Error deleting repo, name: {}, err_msg: {}".format( 

1291 repo_name, str(e) 

1292 ) 

1293 ) 

1294 

1295 return deleted_repo_list, added_repo_dict 

1296 

1297 except K8sException: 

1298 raise 

1299 except Exception as e: 

1300 # Do not raise errors synchronizing repos 

1301 self.log.error("Error synchronizing repos: {}".format(e)) 

1302 raise Exception("Error synchronizing repos: {}".format(e)) 

1303 

1304 def _get_db_repos_dict(self, repo_ids: list): 

1305 db_repos_dict = {} 

1306 for repo_id in repo_ids: 

1307 db_repo = self.db.get_one("k8srepos", {"_id": repo_id}) 

1308 db_repos_dict[db_repo["name"]] = db_repo 

1309 return db_repos_dict 

1310 

1311 """ 

1312 #################################################################################### 

1313 ################################### TO BE IMPLEMENTED SUBCLASSES ################### 

1314 #################################################################################### 

1315 """ 

1316 

1317 @abc.abstractmethod 

1318 def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True): 

1319 """ 

1320 Creates and returns base cluster and kube dirs and returns them. 

1321 Also created helm3 dirs according to new directory specification, paths are 

1322 not returned but assigned to helm environment variables 

1323 

1324 :param cluster_name: cluster_name 

1325 :return: Dictionary with config_paths and dictionary with helm environment variables 

1326 """ 

1327 

1328 @abc.abstractmethod 

1329 async def _cluster_init(self, cluster_id, namespace, paths, env): 

1330 """ 

1331 Implements the helm version dependent cluster initialization 

1332 """ 

1333 

1334 @abc.abstractmethod 

1335 async def _instances_list(self, cluster_id): 

1336 """ 

1337 Implements the helm version dependent helm instances list 

1338 """ 

1339 

1340 @abc.abstractmethod 

1341 async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig): 

1342 """ 

1343 Implements the helm version dependent method to obtain services from a helm instance 

1344 """ 

1345 

1346 @abc.abstractmethod 

1347 async def _status_kdu( 

1348 self, 

1349 cluster_id: str, 

1350 kdu_instance: str, 

1351 namespace: str = None, 

1352 yaml_format: bool = False, 

1353 show_error_log: bool = False, 

1354 ) -> Union[str, dict]: 

1355 """ 

1356 Implements the helm version dependent method to obtain status of a helm instance 

1357 """ 

1358 

1359 @abc.abstractmethod 

1360 def _get_install_command( 

1361 self, 

1362 kdu_model, 

1363 kdu_instance, 

1364 namespace, 

1365 labels, 

1366 params_str, 

1367 version, 

1368 atomic, 

1369 timeout, 

1370 kubeconfig, 

1371 ) -> str: 

1372 """ 

1373 Obtain command to be executed to delete the indicated instance 

1374 """ 

1375 

1376 @abc.abstractmethod 

1377 def _get_upgrade_scale_command( 

1378 self, 

1379 kdu_model, 

1380 kdu_instance, 

1381 namespace, 

1382 count, 

1383 labels, 

1384 version, 

1385 atomic, 

1386 replicas, 

1387 timeout, 

1388 resource_name, 

1389 kubeconfig, 

1390 ) -> str: 

1391 """Generates the command to scale a Helm Chart release 

1392 

1393 Args: 

1394 kdu_model (str): Kdu model name, corresponding to the Helm local location or repository 

1395 kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question 

1396 namespace (str): Namespace where this KDU instance is deployed 

1397 scale (int): Scale count 

1398 version (str): Constraint with specific version of the Chart to use 

1399 atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. 

1400 The --wait flag will be set automatically if --atomic is used 

1401 replica_str (str): The key under resource_name key where the scale count is stored 

1402 timeout (float): The time, in seconds, to wait 

1403 resource_name (str): The KDU's resource to scale 

1404 kubeconfig (str): Kubeconfig file path 

1405 

1406 Returns: 

1407 str: command to scale a Helm Chart release 

1408 """ 

1409 

1410 @abc.abstractmethod 

1411 def _get_upgrade_command( 

1412 self, 

1413 kdu_model, 

1414 kdu_instance, 

1415 namespace, 

1416 params_str, 

1417 labels, 

1418 version, 

1419 atomic, 

1420 timeout, 

1421 kubeconfig, 

1422 targetHostK8sLabels, 

1423 reset_values, 

1424 reuse_values, 

1425 reset_then_reuse_values, 

1426 force, 

1427 ) -> str: 

1428 """Generates the command to upgrade a Helm Chart release 

1429 

1430 Args: 

1431 kdu_model (str): Kdu model name, corresponding to the Helm local location or repository 

1432 kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question 

1433 namespace (str): Namespace where this KDU instance is deployed 

1434 params_str (str): Params used to upgrade the Helm Chart release 

1435 version (str): Constraint with specific version of the Chart to use 

1436 atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. 

1437 The --wait flag will be set automatically if --atomic is used 

1438 timeout (float): The time, in seconds, to wait 

1439 kubeconfig (str): Kubeconfig file path 

1440 reset_values(bool): If set, helm resets values instead of reusing previous values. 

1441 reuse_values(bool): If set, helm reuses previous values. 

1442 reset_then_reuse_values(bool): If set, helm resets values, then apply the last release's values 

1443 force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods. 

1444 Returns: 

1445 str: command to upgrade a Helm Chart release 

1446 """ 

1447 

1448 @abc.abstractmethod 

1449 def _get_rollback_command( 

1450 self, kdu_instance, namespace, revision, kubeconfig 

1451 ) -> str: 

1452 """ 

1453 Obtain command to be executed to rollback the indicated instance 

1454 """ 

1455 

1456 @abc.abstractmethod 

1457 def _get_uninstall_command( 

1458 self, kdu_instance: str, namespace: str, kubeconfig: str 

1459 ) -> str: 

1460 """ 

1461 Obtain command to be executed to delete the indicated instance 

1462 """ 

1463 

1464 @abc.abstractmethod 

1465 def _get_inspect_command( 

1466 self, show_command: str, kdu_model: str, repo_str: str, version: str 

1467 ): 

1468 """Generates the command to obtain the information about an Helm Chart package 

1469 (´helm show ...´ command) 

1470 

1471 Args: 

1472 show_command: the second part of the command (`helm show <show_command>`) 

1473 kdu_model: The name or path of an Helm Chart 

1474 repo_url: Helm Chart repository url 

1475 version: constraint with specific version of the Chart to use 

1476 

1477 Returns: 

1478 str: the generated Helm Chart command 

1479 """ 

1480 

1481 @abc.abstractmethod 

1482 def _get_get_command( 

1483 self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str 

1484 ): 

1485 """Obtain command to be executed to get information about the kdu instance.""" 

1486 

1487 @abc.abstractmethod 

1488 async def _uninstall_sw(self, cluster_id: str, namespace: str): 

1489 """ 

1490 Method call to uninstall cluster software for helm. This method is dependent 

1491 of helm version 

1492 For Helm v2 it will be called when Tiller must be uninstalled 

1493 For Helm v3 it does nothing and does not need to be callled 

1494 """ 

1495 

1496 @abc.abstractmethod 

1497 def _get_helm_chart_repos_ids(self, cluster_uuid) -> list: 

1498 """ 

1499 Obtains the cluster repos identifiers 

1500 """ 

1501 

1502 """ 

1503 #################################################################################### 

1504 ################################### P R I V A T E ################################## 

1505 #################################################################################### 

1506 """ 

1507 

1508 @staticmethod 

1509 def _check_file_exists(filename: str, exception_if_not_exists: bool = False): 

1510 if os.path.exists(filename): 

1511 return True 

1512 else: 

1513 msg = "File {} does not exist".format(filename) 

1514 if exception_if_not_exists: 

1515 raise K8sException(msg) 

1516 

1517 @staticmethod 

1518 def _remove_multiple_spaces(strobj): 

1519 strobj = strobj.strip() 

1520 while " " in strobj: 

1521 strobj = strobj.replace(" ", " ") 

1522 return strobj 

1523 

1524 @staticmethod 

1525 def _output_to_lines(output: str) -> list: 

1526 output_lines = list() 

1527 lines = output.splitlines(keepends=False) 

1528 for line in lines: 

1529 line = line.strip() 

1530 if len(line) > 0: 

1531 output_lines.append(line) 

1532 return output_lines 

1533 

1534 @staticmethod 

1535 def _output_to_table(output: str) -> list: 

1536 output_table = list() 

1537 lines = output.splitlines(keepends=False) 

1538 for line in lines: 

1539 line = line.replace("\t", " ") 

1540 line_list = list() 

1541 output_table.append(line_list) 

1542 cells = line.split(sep=" ") 

1543 for cell in cells: 

1544 cell = cell.strip() 

1545 if len(cell) > 0: 

1546 line_list.append(cell) 

1547 return output_table 

1548 

1549 @staticmethod 

1550 def _parse_services(output: str) -> list: 

1551 lines = output.splitlines(keepends=False) 

1552 services = [] 

1553 for line in lines: 

1554 line = line.replace("\t", " ") 

1555 cells = line.split(sep=" ") 

1556 if len(cells) > 0 and cells[0].startswith("service/"): 

1557 elems = cells[0].split(sep="/") 

1558 if len(elems) > 1: 

1559 services.append(elems[1]) 

1560 return services 

1561 

1562 @staticmethod 

1563 def _get_deep(dictionary: dict, members: tuple): 

1564 target = dictionary 

1565 value = None 

1566 try: 

1567 for m in members: 

1568 value = target.get(m) 

1569 if not value: 

1570 return None 

1571 else: 

1572 target = value 

1573 except Exception: 

1574 pass 

1575 return value 

1576 

1577 # find key:value in several lines 

1578 @staticmethod 

1579 def _find_in_lines(p_lines: list, p_key: str) -> str: 

1580 for line in p_lines: 

1581 try: 

1582 if line.startswith(p_key + ":"): 

1583 parts = line.split(":") 

1584 the_value = parts[1].strip() 

1585 return the_value 

1586 except Exception: 

1587 # ignore it 

1588 pass 

1589 return None 

1590 

1591 @staticmethod 

1592 def _lower_keys_list(input_list: list): 

1593 """ 

1594 Transform the keys in a list of dictionaries to lower case and returns a new list 

1595 of dictionaries 

1596 """ 

1597 new_list = [] 

1598 if input_list: 

1599 for dictionary in input_list: 

1600 new_dict = dict((k.lower(), v) for k, v in dictionary.items()) 

1601 new_list.append(new_dict) 

1602 return new_list 

1603 

1604 async def _local_async_exec( 

1605 self, 

1606 command: str, 

1607 raise_exception_on_error: bool = False, 

1608 show_error_log: bool = True, 

1609 encode_utf8: bool = False, 

1610 env: dict = None, 

1611 ) -> tuple[str, int]: 

1612 command = K8sHelmBaseConnector._remove_multiple_spaces(command) 

1613 self.log.debug( 

1614 "Executing async local command: {}, env: {}".format(command, env) 

1615 ) 

1616 

1617 # split command 

1618 command = shlex.split(command) 

1619 

1620 environ = os.environ.copy() 

1621 if env: 

1622 environ.update(env) 

1623 

1624 try: 

1625 async with self.cmd_lock: 

1626 process = await asyncio.create_subprocess_exec( 

1627 *command, 

1628 stdout=asyncio.subprocess.PIPE, 

1629 stderr=asyncio.subprocess.PIPE, 

1630 env=environ, 

1631 ) 

1632 

1633 # wait for command terminate 

1634 stdout, stderr = await process.communicate() 

1635 

1636 return_code = process.returncode 

1637 

1638 output = "" 

1639 if stdout: 

1640 output = stdout.decode("utf-8").strip() 

1641 # output = stdout.decode() 

1642 if stderr: 

1643 output = stderr.decode("utf-8").strip() 

1644 # output = stderr.decode() 

1645 

1646 if return_code != 0 and show_error_log: 

1647 self.log.debug( 

1648 "Return code (FAIL): {}\nOutput:\n{}".format(return_code, output) 

1649 ) 

1650 else: 

1651 self.log.debug("Return code: {}".format(return_code)) 

1652 

1653 if raise_exception_on_error and return_code != 0: 

1654 raise K8sException(output) 

1655 

1656 if encode_utf8: 

1657 output = output.encode("utf-8").strip() 

1658 output = str(output).replace("\\n", "\n") 

1659 

1660 return output, return_code 

1661 

1662 except asyncio.CancelledError: 

1663 # first, kill the process if it is still running 

1664 if process.returncode is None: 

1665 process.kill() 

1666 raise 

1667 except K8sException: 

1668 raise 

1669 except Exception as e: 

1670 msg = "Exception executing command: {} -> {}".format(command, e) 

1671 self.log.error(msg) 

1672 if raise_exception_on_error: 

1673 raise K8sException(e) from e 

1674 else: 

1675 return "", -1 

1676 

1677 async def _local_async_exec_pipe( 

1678 self, 

1679 command1: str, 

1680 command2: str, 

1681 raise_exception_on_error: bool = True, 

1682 show_error_log: bool = True, 

1683 encode_utf8: bool = False, 

1684 env: dict = None, 

1685 ): 

1686 command1 = K8sHelmBaseConnector._remove_multiple_spaces(command1) 

1687 command2 = K8sHelmBaseConnector._remove_multiple_spaces(command2) 

1688 command = "{} | {}".format(command1, command2) 

1689 self.log.debug( 

1690 "Executing async local command: {}, env: {}".format(command, env) 

1691 ) 

1692 

1693 # split command 

1694 command1 = shlex.split(command1) 

1695 command2 = shlex.split(command2) 

1696 

1697 environ = os.environ.copy() 

1698 if env: 

1699 environ.update(env) 

1700 

1701 process_1 = None 

1702 try: 

1703 async with self.cmd_lock: 

1704 read, write = os.pipe() 

1705 process_1 = await asyncio.create_subprocess_exec( 

1706 *command1, stdout=write, env=environ 

1707 ) 

1708 os.close(write) 

1709 process_2 = await asyncio.create_subprocess_exec( 

1710 *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ 

1711 ) 

1712 os.close(read) 

1713 stdout, stderr = await process_2.communicate() 

1714 

1715 return_code = process_2.returncode 

1716 

1717 output = "" 

1718 if stdout: 

1719 output = stdout.decode("utf-8").strip() 

1720 # output = stdout.decode() 

1721 if stderr: 

1722 output = stderr.decode("utf-8").strip() 

1723 # output = stderr.decode() 

1724 

1725 if return_code != 0 and show_error_log: 

1726 self.log.debug( 

1727 "Return code (FAIL): {}\nOutput:\n{}".format(return_code, output) 

1728 ) 

1729 else: 

1730 self.log.debug("Return code: {}".format(return_code)) 

1731 

1732 if raise_exception_on_error and return_code != 0: 

1733 raise K8sException(output) 

1734 

1735 if encode_utf8: 

1736 output = output.encode("utf-8").strip() 

1737 output = str(output).replace("\\n", "\n") 

1738 

1739 return output, return_code 

1740 except asyncio.CancelledError: 

1741 # first, kill the processes if they are still running 

1742 for process in (process_1, process_2): 

1743 if process.returncode is None: 

1744 process.kill() 

1745 raise 

1746 except K8sException: 

1747 raise 

1748 except Exception as e: 

1749 msg = "Exception executing command: {} -> {}".format(command, e) 

1750 self.log.error(msg) 

1751 if raise_exception_on_error: 

1752 raise K8sException(e) from e 

1753 else: 

1754 return "", -1 

1755 

1756 async def _get_service(self, cluster_id, service_name, namespace): 

1757 """ 

1758 Obtains the data of the specified service in the k8cluster. 

1759 

1760 :param cluster_id: id of a K8s cluster known by OSM 

1761 :param service_name: name of the K8s service in the specified namespace 

1762 :param namespace: K8s namespace used by the KDU instance 

1763 :return: If successful, it will return a service with the following data: 

1764 - `name` of the service 

1765 - `type` type of service in the k8 cluster 

1766 - `ports` List of ports offered by the service, for each port includes at least 

1767 name, port, protocol 

1768 - `cluster_ip` Internal ip to be used inside k8s cluster 

1769 - `external_ip` List of external ips (in case they are available) 

1770 """ 

1771 

1772 # init config, env 

1773 paths, env = self._init_paths_env( 

1774 cluster_name=cluster_id, create_if_not_exist=True 

1775 ) 

1776 

1777 command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format( 

1778 self.kubectl_command, 

1779 paths["kube_config"], 

1780 quote(namespace), 

1781 quote(service_name), 

1782 ) 

1783 

1784 output, _rc = await self._local_async_exec( 

1785 command=command, raise_exception_on_error=True, env=env 

1786 ) 

1787 

1788 data = yaml.load(output, Loader=yaml.SafeLoader) 

1789 

1790 service = { 

1791 "name": service_name, 

1792 "type": self._get_deep(data, ("spec", "type")), 

1793 "ports": self._get_deep(data, ("spec", "ports")), 

1794 "cluster_ip": self._get_deep(data, ("spec", "clusterIP")), 

1795 } 

1796 if service["type"] == "LoadBalancer": 

1797 ip_map_list = self._get_deep(data, ("status", "loadBalancer", "ingress")) 

1798 ip_list = [elem["ip"] for elem in ip_map_list] 

1799 service["external_ip"] = ip_list 

1800 

1801 return service 

1802 

1803 async def _exec_get_command( 

1804 self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str 

1805 ): 

1806 """Obtains information about the kdu instance.""" 

1807 

1808 full_command = self._get_get_command( 

1809 get_command, kdu_instance, namespace, kubeconfig 

1810 ) 

1811 

1812 output, _rc = await self._local_async_exec(command=full_command) 

1813 

1814 return output 

1815 

1816 async def _exec_inspect_command( 

1817 self, inspect_command: str, kdu_model: str, repo_url: str = None 

1818 ): 

1819 """Obtains information about an Helm Chart package (´helm show´ command) 

1820 

1821 Args: 

1822 inspect_command: the Helm sub command (`helm show <inspect_command> ...`) 

1823 kdu_model: The name or path of an Helm Chart 

1824 repo_url: Helm Chart repository url 

1825 

1826 Returns: 

1827 str: the requested info about the Helm Chart package 

1828 """ 

1829 

1830 repo_str = "" 

1831 if repo_url: 

1832 repo_str = " --repo {}".format(quote(repo_url)) 

1833 

1834 # Obtain the Chart's name and store it in the var kdu_model 

1835 kdu_model, _ = self._split_repo(kdu_model=kdu_model) 

1836 

1837 kdu_model, version = self._split_version(kdu_model) 

1838 if version: 

1839 version_str = "--version {}".format(quote(version)) 

1840 else: 

1841 version_str = "" 

1842 

1843 full_command = self._get_inspect_command( 

1844 show_command=inspect_command, 

1845 kdu_model=quote(kdu_model), 

1846 repo_str=repo_str, 

1847 version=version_str, 

1848 ) 

1849 

1850 output, _ = await self._local_async_exec(command=full_command) 

1851 

1852 return output 

1853 

1854 async def _get_replica_count_url( 

1855 self, 

1856 kdu_model: str, 

1857 repo_url: str = None, 

1858 resource_name: str = None, 

1859 ) -> tuple[int, str]: 

1860 """Get the replica count value in the Helm Chart Values. 

1861 

1862 Args: 

1863 kdu_model: The name or path of an Helm Chart 

1864 repo_url: Helm Chart repository url 

1865 resource_name: Resource name 

1866 

1867 Returns: 

1868 A tuple with: 

1869 - The number of replicas of the specific instance; if not found, returns None; and 

1870 - The string corresponding to the replica count key in the Helm values 

1871 """ 

1872 

1873 kdu_values = yaml.load( 

1874 await self.values_kdu(kdu_model=kdu_model, repo_url=repo_url), 

1875 Loader=yaml.SafeLoader, 

1876 ) 

1877 

1878 self.log.debug(f"Obtained the Helm package values for the KDU: {kdu_values}") 

1879 

1880 if not kdu_values: 

1881 raise K8sException( 

1882 "kdu_values not found for kdu_model {}".format(kdu_model) 

1883 ) 

1884 

1885 if resource_name: 

1886 kdu_values = kdu_values.get(resource_name, None) 

1887 

1888 if not kdu_values: 

1889 msg = "resource {} not found in the values in model {}".format( 

1890 resource_name, kdu_model 

1891 ) 

1892 self.log.error(msg) 

1893 raise K8sException(msg) 

1894 

1895 duplicate_check = False 

1896 

1897 replica_str = "" 

1898 replicas = None 

1899 

1900 if kdu_values.get("replicaCount") is not None: 

1901 replicas = kdu_values["replicaCount"] 

1902 replica_str = "replicaCount" 

1903 elif kdu_values.get("replicas") is not None: 

1904 duplicate_check = True 

1905 replicas = kdu_values["replicas"] 

1906 replica_str = "replicas" 

1907 else: 

1908 if resource_name: 

1909 msg = ( 

1910 "replicaCount or replicas not found in the resource" 

1911 "{} values in model {}. Cannot be scaled".format( 

1912 resource_name, kdu_model 

1913 ) 

1914 ) 

1915 else: 

1916 msg = ( 

1917 "replicaCount or replicas not found in the values" 

1918 "in model {}. Cannot be scaled".format(kdu_model) 

1919 ) 

1920 self.log.error(msg) 

1921 raise K8sException(msg) 

1922 

1923 # Control if replicas and replicaCount exists at the same time 

1924 msg = "replicaCount and replicas are exists at the same time" 

1925 if duplicate_check: 

1926 if "replicaCount" in kdu_values: 

1927 self.log.error(msg) 

1928 raise K8sException(msg) 

1929 else: 

1930 if "replicas" in kdu_values: 

1931 self.log.error(msg) 

1932 raise K8sException(msg) 

1933 

1934 return replicas, replica_str 

1935 

1936 async def _get_replica_count_instance( 

1937 self, 

1938 kdu_instance: str, 

1939 namespace: str, 

1940 kubeconfig: str, 

1941 resource_name: str = None, 

1942 ) -> int: 

1943 """Get the replica count value in the instance. 

1944 

1945 Args: 

1946 kdu_instance: The name of the KDU instance 

1947 namespace: KDU instance namespace 

1948 kubeconfig: 

1949 resource_name: Resource name 

1950 

1951 Returns: 

1952 The number of replicas of the specific instance; if not found, returns None 

1953 """ 

1954 

1955 kdu_values = yaml.load( 

1956 await self.get_values_kdu(kdu_instance, namespace, kubeconfig), 

1957 Loader=yaml.SafeLoader, 

1958 ) 

1959 

1960 self.log.debug(f"Obtained the Helm values for the KDU instance: {kdu_values}") 

1961 

1962 replicas = None 

1963 

1964 if kdu_values: 

1965 resource_values = ( 

1966 kdu_values.get(resource_name, None) if resource_name else None 

1967 ) 

1968 

1969 for replica_str in ("replicaCount", "replicas"): 

1970 if resource_values: 

1971 replicas = resource_values.get(replica_str) 

1972 else: 

1973 replicas = kdu_values.get(replica_str) 

1974 

1975 if replicas is not None: 

1976 break 

1977 

1978 return replicas 

1979 

1980 async def _labels_dict(self, db_dict, kdu_instance): 

1981 # get the network service registry 

1982 ns_id = db_dict["filter"]["_id"] 

1983 try: 

1984 db_nsr = self.db.get_one("nsrs", {"_id": ns_id}) 

1985 except Exception as e: 

1986 print("nsr {} not found: {}".format(ns_id, e)) 

1987 nsd_id = db_nsr["nsd"]["_id"] 

1988 

1989 # get the kdu registry 

1990 for index, kdu in enumerate(db_nsr["_admin"]["deployed"]["K8s"]): 

1991 if kdu["kdu-instance"] == kdu_instance: 

1992 db_kdur = kdu 

1993 break 

1994 else: 

1995 # No kdur found, could be the case of an EE chart 

1996 return {} 

1997 

1998 kdu_name = db_kdur["kdu-name"] 

1999 member_vnf_index = db_kdur["member-vnf-index"] 

2000 # get the vnf registry 

2001 try: 

2002 db_vnfr = self.db.get_one( 

2003 "vnfrs", 

2004 {"nsr-id-ref": ns_id, "member-vnf-index-ref": member_vnf_index}, 

2005 ) 

2006 except Exception as e: 

2007 print("vnfr {} not found: {}".format(member_vnf_index, e)) 

2008 

2009 vnf_id = db_vnfr["_id"] 

2010 vnfd_id = db_vnfr["vnfd-id"] 

2011 

2012 return { 

2013 "managed-by": "osm.etsi.org", 

2014 "osm.etsi.org/ns-id": ns_id, 

2015 "osm.etsi.org/nsd-id": nsd_id, 

2016 "osm.etsi.org/vnf-id": vnf_id, 

2017 "osm.etsi.org/vnfd-id": vnfd_id, 

2018 "osm.etsi.org/kdu-id": kdu_instance, 

2019 "osm.etsi.org/kdu-name": kdu_name, 

2020 } 

2021 

2022 async def _contains_labels(self, kdu_instance, namespace, kube_config, env): 

2023 command = "env KUBECONFIG={} {} get manifest {} --namespace={}".format( 

2024 kube_config, 

2025 self._helm_command, 

2026 quote(kdu_instance), 

2027 quote(namespace), 

2028 ) 

2029 output, rc = await self._local_async_exec( 

2030 command=command, raise_exception_on_error=False, env=env 

2031 ) 

2032 manifests = yaml.safe_load_all(output) 

2033 for manifest in manifests: 

2034 # Check if the manifest has metadata and labels 

2035 if ( 

2036 manifest is not None 

2037 and "metadata" in manifest 

2038 and "labels" in manifest["metadata"] 

2039 ): 

2040 labels = { 

2041 "managed-by", 

2042 "osm.etsi.org/kdu-id", 

2043 "osm.etsi.org/kdu-name", 

2044 "osm.etsi.org/ns-id", 

2045 "osm.etsi.org/nsd-id", 

2046 "osm.etsi.org/vnf-id", 

2047 "osm.etsi.org/vnfd-id", 

2048 } 

2049 if labels.issubset(manifest["metadata"]["labels"].keys()): 

2050 return True 

2051 return False 

2052 

2053 async def _store_status( 

2054 self, 

2055 cluster_id: str, 

2056 operation: str, 

2057 kdu_instance: str, 

2058 namespace: str = None, 

2059 db_dict: dict = None, 

2060 ) -> None: 

2061 """ 

2062 Obtains the status of the KDU instance based on Helm Charts, and stores it in the database. 

2063 

2064 :param cluster_id (str): the cluster where the KDU instance is deployed 

2065 :param operation (str): The operation related to the status to be updated (for instance, "install" or "upgrade") 

2066 :param kdu_instance (str): The KDU instance in relation to which the status is obtained 

2067 :param namespace (str): The Kubernetes namespace where the KDU instance was deployed. Defaults to None 

2068 :param db_dict (dict): A dictionary with the database necessary information. It shall contain the 

2069 values for the keys: 

2070 - "collection": The Mongo DB collection to write to 

2071 - "filter": The query filter to use in the update process 

2072 - "path": The dot separated keys which targets the object to be updated 

2073 Defaults to None. 

2074 """ 

2075 

2076 try: 

2077 detailed_status = await self._status_kdu( 

2078 cluster_id=cluster_id, 

2079 kdu_instance=kdu_instance, 

2080 yaml_format=False, 

2081 namespace=namespace, 

2082 ) 

2083 

2084 status = detailed_status.get("info").get("description") 

2085 self.log.debug(f"Status for KDU {kdu_instance} obtained: {status}.") 

2086 

2087 # write status to db 

2088 result = await self.write_app_status_to_db( 

2089 db_dict=db_dict, 

2090 status=str(status), 

2091 detailed_status=str(detailed_status), 

2092 operation=operation, 

2093 ) 

2094 

2095 if not result: 

2096 self.log.info("Error writing in database. Task exiting...") 

2097 

2098 except asyncio.CancelledError as e: 

2099 self.log.warning( 

2100 f"Exception in method {self._store_status.__name__} (task cancelled): {e}" 

2101 ) 

2102 except Exception as e: 

2103 self.log.warning(f"Exception in method {self._store_status.__name__}: {e}") 

2104 

2105 # params for use in -f file 

2106 # returns values file option and filename (in order to delete it at the end) 

2107 def _params_to_file_option(self, cluster_id: str, params: dict) -> tuple[str, str]: 

2108 if params and len(params) > 0: 

2109 self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True) 

2110 

2111 def get_random_number(): 

2112 r = random.SystemRandom().randint(1, 99999999) 

2113 s = str(r) 

2114 while len(s) < 10: 

2115 s = "0" + s 

2116 return s 

2117 

2118 params2 = dict() 

2119 for key in params: 

2120 value = params.get(key) 

2121 if "!!yaml" in str(value): 

2122 value = yaml.safe_load(value[7:]) 

2123 params2[key] = value 

2124 

2125 values_file = get_random_number() + ".yaml" 

2126 with open(values_file, "w") as stream: 

2127 yaml.dump(params2, stream, indent=4, default_flow_style=False) 

2128 

2129 return "-f {}".format(values_file), values_file 

2130 

2131 return "", None 

2132 

2133 # params for use in --set option 

2134 @staticmethod 

2135 def _params_to_set_option(params: dict) -> str: 

2136 pairs = [ 

2137 f"{quote(str(key))}={quote(str(value))}" 

2138 for key, value in params.items() 

2139 if value is not None 

2140 ] 

2141 if not pairs: 

2142 return "" 

2143 return "--set " + ",".join(pairs) 

2144 

2145 @staticmethod 

2146 def generate_kdu_instance_name(**kwargs): 

2147 chart_name = kwargs["kdu_model"] 

2148 # check embeded chart (file or dir) 

2149 if chart_name.startswith("/"): 

2150 # extract file or directory name 

2151 chart_name = chart_name[chart_name.rfind("/") + 1 :] 

2152 # check URL 

2153 elif "://" in chart_name: 

2154 # extract last portion of URL 

2155 chart_name = chart_name[chart_name.rfind("/") + 1 :] 

2156 

2157 name = "" 

2158 for c in chart_name: 

2159 if c.isalpha() or c.isnumeric(): 

2160 name += c 

2161 else: 

2162 name += "-" 

2163 if len(name) > 35: 

2164 name = name[0:35] 

2165 

2166 # if does not start with alpha character, prefix 'a' 

2167 if not name[0].isalpha(): 

2168 name = "a" + name 

2169 

2170 name += "-" 

2171 

2172 def get_random_number(): 

2173 r = random.SystemRandom().randint(1, 99999999) 

2174 s = str(r) 

2175 s = s.rjust(10, "0") 

2176 return s 

2177 

2178 name = name + get_random_number() 

2179 return name.lower() 

2180 

2181 def _split_version(self, kdu_model: str) -> tuple[str, str]: 

2182 version = None 

2183 if ( 

2184 not ( 

2185 self._is_helm_chart_a_file(kdu_model) 

2186 or self._is_helm_chart_a_url(kdu_model) 

2187 ) 

2188 and ":" in kdu_model 

2189 ): 

2190 parts = kdu_model.split(sep=":") 

2191 if len(parts) == 2: 

2192 version = str(parts[1]) 

2193 kdu_model = parts[0] 

2194 return kdu_model, version 

2195 

2196 def _split_repo(self, kdu_model: str) -> tuple[str, str]: 

2197 """Obtain the Helm Chart's repository and Chart's names from the KDU model 

2198 

2199 Args: 

2200 kdu_model (str): Associated KDU model 

2201 

2202 Returns: 

2203 (str, str): Tuple with the Chart name in index 0, and the repo name 

2204 in index 2; if there was a problem finding them, return None 

2205 for both 

2206 """ 

2207 

2208 chart_name = None 

2209 repo_name = None 

2210 

2211 idx = kdu_model.find("/") 

2212 if not self._is_helm_chart_a_url(kdu_model) and idx >= 0: 

2213 chart_name = kdu_model[idx + 1 :] 

2214 repo_name = kdu_model[:idx] 

2215 

2216 return chart_name, repo_name 

2217 

2218 async def _find_repo(self, kdu_model: str, cluster_uuid: str) -> str: 

2219 """Obtain the Helm repository for an Helm Chart 

2220 

2221 Args: 

2222 kdu_model (str): the KDU model associated with the Helm Chart instantiation 

2223 cluster_uuid (str): The cluster UUID associated with the Helm Chart instantiation 

2224 

2225 Returns: 

2226 str: the repository URL; if Helm Chart is a local one, the function returns None 

2227 """ 

2228 

2229 _, repo_name = self._split_repo(kdu_model=kdu_model) 

2230 

2231 repo_url = None 

2232 if repo_name: 

2233 # Find repository link 

2234 local_repo_list = await self.repo_list(cluster_uuid) 

2235 for repo in local_repo_list: 

2236 if repo["name"] == repo_name: 

2237 repo_url = repo["url"] 

2238 break # it is not necessary to continue the loop if the repo link was found... 

2239 

2240 return repo_url 

2241 

2242 def _repo_to_oci_url(self, repo): 

2243 db_repo = self.db.get_one("k8srepos", {"name": repo}, fail_on_empty=False) 

2244 if db_repo and "oci" in db_repo: 

2245 return db_repo.get("url") 

2246 

2247 async def _prepare_helm_chart(self, kdu_model, cluster_id): 

2248 # e.g.: "stable/openldap", "1.0" 

2249 kdu_model, version = self._split_version(kdu_model) 

2250 # e.g.: "openldap, stable" 

2251 chart_name, repo = self._split_repo(kdu_model) 

2252 if repo and chart_name: # repo/chart case 

2253 oci_url = self._repo_to_oci_url(repo) 

2254 if oci_url: # oci does not require helm repo update 

2255 kdu_model = f"{oci_url.rstrip('/')}/{chart_name.lstrip('/')}" # urljoin doesn't work for oci schema 

2256 else: 

2257 await self.repo_update(cluster_id, repo) 

2258 return kdu_model, version 

2259 

2260 async def create_certificate( 

2261 self, cluster_uuid, namespace, dns_prefix, name, secret_name, usage 

2262 ): 

2263 paths, env = self._init_paths_env( 

2264 cluster_name=cluster_uuid, create_if_not_exist=True 

2265 ) 

2266 kubectl = Kubectl(config_file=paths["kube_config"]) 

2267 await kubectl.create_certificate( 

2268 namespace=namespace, 

2269 name=name, 

2270 dns_prefix=dns_prefix, 

2271 secret_name=secret_name, 

2272 usages=[usage], 

2273 issuer_name="ca-issuer", 

2274 ) 

2275 

2276 async def delete_certificate(self, cluster_uuid, namespace, certificate_name): 

2277 paths, env = self._init_paths_env( 

2278 cluster_name=cluster_uuid, create_if_not_exist=True 

2279 ) 

2280 kubectl = Kubectl(config_file=paths["kube_config"]) 

2281 await kubectl.delete_certificate(namespace, certificate_name) 

2282 

2283 async def create_namespace( 

2284 self, 

2285 namespace, 

2286 cluster_uuid, 

2287 labels, 

2288 ): 

2289 """ 

2290 Create a namespace in a specific cluster 

2291 

2292 :param namespace: Namespace to be created 

2293 :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig 

2294 :param labels: Dictionary with labels for the new namespace 

2295 :returns: None 

2296 """ 

2297 paths, env = self._init_paths_env( 

2298 cluster_name=cluster_uuid, create_if_not_exist=True 

2299 ) 

2300 kubectl = Kubectl(config_file=paths["kube_config"]) 

2301 await kubectl.create_namespace( 

2302 name=namespace, 

2303 labels=labels, 

2304 ) 

2305 

2306 async def delete_namespace( 

2307 self, 

2308 namespace, 

2309 cluster_uuid, 

2310 ): 

2311 """ 

2312 Delete a namespace in a specific cluster 

2313 

2314 :param namespace: namespace to be deleted 

2315 :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig 

2316 :returns: None 

2317 """ 

2318 paths, env = self._init_paths_env( 

2319 cluster_name=cluster_uuid, create_if_not_exist=True 

2320 ) 

2321 kubectl = Kubectl(config_file=paths["kube_config"]) 

2322 await kubectl.delete_namespace( 

2323 name=namespace, 

2324 ) 

2325 

2326 async def copy_secret_data( 

2327 self, 

2328 src_secret: str, 

2329 dst_secret: str, 

2330 cluster_uuid: str, 

2331 data_key: str, 

2332 src_namespace: str = "osm", 

2333 dst_namespace: str = "osm", 

2334 ): 

2335 """ 

2336 Copy a single key and value from an existing secret to a new one 

2337 

2338 :param src_secret: name of the existing secret 

2339 :param dst_secret: name of the new secret 

2340 :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig 

2341 :param data_key: key of the existing secret to be copied 

2342 :param src_namespace: Namespace of the existing secret 

2343 :param dst_namespace: Namespace of the new secret 

2344 :returns: None 

2345 """ 

2346 paths, env = self._init_paths_env( 

2347 cluster_name=cluster_uuid, create_if_not_exist=True 

2348 ) 

2349 kubectl = Kubectl(config_file=paths["kube_config"]) 

2350 secret_data = await kubectl.get_secret_content( 

2351 name=src_secret, 

2352 namespace=src_namespace, 

2353 ) 

2354 # Only the corresponding data_key value needs to be copy 

2355 data = {data_key: secret_data.get(data_key)} 

2356 await kubectl.create_secret( 

2357 name=dst_secret, 

2358 data=data, 

2359 namespace=dst_namespace, 

2360 secret_type="Opaque", 

2361 ) 

2362 

2363 async def setup_default_rbac( 

2364 self, 

2365 name, 

2366 namespace, 

2367 cluster_uuid, 

2368 api_groups, 

2369 resources, 

2370 verbs, 

2371 service_account, 

2372 ): 

2373 """ 

2374 Create a basic RBAC for a new namespace. 

2375 

2376 :param name: name of both Role and Role Binding 

2377 :param namespace: K8s namespace 

2378 :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig 

2379 :param api_groups: Api groups to be allowed in Policy Rule 

2380 :param resources: Resources to be allowed in Policy Rule 

2381 :param verbs: Verbs to be allowed in Policy Rule 

2382 :param service_account: Service Account name used to bind the Role 

2383 :returns: None 

2384 """ 

2385 paths, env = self._init_paths_env( 

2386 cluster_name=cluster_uuid, create_if_not_exist=True 

2387 ) 

2388 kubectl = Kubectl(config_file=paths["kube_config"]) 

2389 await kubectl.create_role( 

2390 name=name, 

2391 labels={}, 

2392 namespace=namespace, 

2393 api_groups=api_groups, 

2394 resources=resources, 

2395 verbs=verbs, 

2396 ) 

2397 await kubectl.create_role_binding( 

2398 name=name, 

2399 labels={}, 

2400 namespace=namespace, 

2401 role_name=name, 

2402 sa_name=service_account, 

2403 )