Coverage for n2vc/k8s_helm3_conn.py: 73%

230 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-05-07 06:04 +0000

1## 

2# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U. 

3# This file is part of OSM 

4# All Rights Reserved. 

5# 

6# Licensed under the Apache License, Version 2.0 (the "License"); 

7# you may not use this file except in compliance with the License. 

8# You may obtain a copy of the License at 

9# 

10# http://www.apache.org/licenses/LICENSE-2.0 

11# 

12# Unless required by applicable law or agreed to in writing, software 

13# distributed under the License is distributed on an "AS IS" BASIS, 

14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 

15# implied. 

16# See the License for the specific language governing permissions and 

17# limitations under the License. 

18# 

19# For those usages not covered by the Apache License, Version 2.0 please 

20# contact with: nfvlabs@tid.es 

21## 

22from typing import Union 

23from shlex import quote 

24import os 

25import yaml 

26 

27from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector 

28from n2vc.exceptions import K8sException 

29 

30 

31class K8sHelm3Connector(K8sHelmBaseConnector): 

32 

33 """ 

34 #################################################################################### 

35 ################################### P U B L I C #################################### 

36 #################################################################################### 

37 """ 

38 

39 def __init__( 

40 self, 

41 fs: object, 

42 db: object, 

43 kubectl_command: str = "/usr/bin/kubectl", 

44 helm_command: str = "/usr/bin/helm3", 

45 log: object = None, 

46 on_update_db=None, 

47 ): 

48 """ 

49 Initializes helm connector for helm v3 

50 

51 :param fs: file system for kubernetes and helm configuration 

52 :param db: database object to write current operation status 

53 :param kubectl_command: path to kubectl executable 

54 :param helm_command: path to helm executable 

55 :param log: logger 

56 :param on_update_db: callback called when k8s connector updates database 

57 """ 

58 

59 # parent class 

60 K8sHelmBaseConnector.__init__( 

61 self, 

62 db=db, 

63 log=log, 

64 fs=fs, 

65 kubectl_command=kubectl_command, 

66 helm_command=helm_command, 

67 on_update_db=on_update_db, 

68 ) 

69 

70 self.log.info("K8S Helm3 connector initialized") 

71 

72 async def install( 

73 self, 

74 cluster_uuid: str, 

75 kdu_model: str, 

76 kdu_instance: str, 

77 atomic: bool = True, 

78 timeout: float = 300, 

79 params: dict = None, 

80 db_dict: dict = None, 

81 kdu_name: str = None, 

82 namespace: str = None, 

83 **kwargs, 

84 ): 

85 """Install a helm chart 

86 

87 :param cluster_uuid str: The UUID of the cluster to install to 

88 :param kdu_model str: chart/reference (string), which can be either 

89 of these options: 

90 - a name of chart available via the repos known by OSM 

91 (e.g. stable/openldap, stable/openldap:1.2.4) 

92 - a path to a packaged chart (e.g. mychart.tgz) 

93 - a path to an unpacked chart directory or a URL (e.g. mychart) 

94 :param kdu_instance: Kdu instance name 

95 :param atomic bool: If set, waits until the model is active and resets 

96 the cluster on failure. 

97 :param timeout int: The time, in seconds, to wait for the install 

98 to finish 

99 :param params dict: Key-value pairs of instantiation parameters 

100 :param kdu_name: Name of the KDU instance to be installed 

101 :param namespace: K8s namespace to use for the KDU instance 

102 

103 :param kwargs: Additional parameters (None yet) 

104 

105 :return: True if successful 

106 """ 

107 

108 self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid)) 

109 

110 labels_dict = None 

111 if db_dict: 

112 labels_dict = await self._labels_dict(db_dict, kdu_instance) 

113 

114 # sync local dir 

115 self.fs.sync(from_path=cluster_uuid) 

116 

117 # init env, paths 

118 paths, env = self._init_paths_env( 

119 cluster_name=cluster_uuid, create_if_not_exist=True 

120 ) 

121 

122 # for helm3 if namespace does not exist must create it 

123 if namespace and namespace != "kube-system": 

124 if not await self._namespace_exists(cluster_uuid, namespace): 

125 try: 

126 # TODO: refactor to use kubernetes API client 

127 await self._create_namespace(cluster_uuid, namespace) 

128 except Exception as e: 

129 if not await self._namespace_exists(cluster_uuid, namespace): 

130 err_msg = ( 

131 "namespace {} does not exist in cluster_id {} " 

132 "error message: ".format(namespace, e) 

133 ) 

134 self.log.error(err_msg) 

135 raise K8sException(err_msg) 

136 

137 await self._install_impl( 

138 cluster_uuid, 

139 kdu_model, 

140 paths, 

141 env, 

142 kdu_instance, 

143 atomic=atomic, 

144 timeout=timeout, 

145 params=params, 

146 db_dict=db_dict, 

147 labels=labels_dict, 

148 kdu_name=kdu_name, 

149 namespace=namespace, 

150 ) 

151 

152 # sync fs 

153 self.fs.reverse_sync(from_path=cluster_uuid) 

154 

155 self.log.debug("Returning kdu_instance {}".format(kdu_instance)) 

156 return True 

157 

158 async def migrate(self, nsr_id, target): 

159 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) 

160 

161 # check if it has k8s deployed kdus 

162 if len(db_nsr["_admin"]["deployed"]["K8s"]) < 1: 

163 err_msg = "INFO: No deployed KDUs" 

164 self.log.error(err_msg) 

165 raise K8sException(err_msg) 

166 

167 kdu_id = target["vdu"]["vduId"] 

168 for index, kdu in enumerate(db_nsr["_admin"]["deployed"]["K8s"]): 

169 if kdu["kdu-instance"] == kdu_id: 

170 namespace = kdu["namespace"] 

171 cluster_uuid = kdu["k8scluster-uuid"] 

172 kdu_model = kdu["kdu-model"] 

173 db_dict = { 

174 "collection": "nsrs", 

175 "filter": {"_id": nsr_id}, 

176 "path": "_admin.deployed.K8s.{}".format(index), 

177 } 

178 

179 await self.upgrade( 

180 cluster_uuid, 

181 kdu_instance=kdu_id, 

182 kdu_model=kdu_model, 

183 namespace=namespace, 

184 targetHostK8sLabels=target["targetHostK8sLabels"], 

185 atomic=True, 

186 db_dict=db_dict, 

187 force=True, 

188 ) 

189 

190 return True 

191 

192 self.log.debug("ERROR: Unable to retrieve kdu from the database") 

193 

194 async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str: 

195 self.log.debug( 

196 "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url) 

197 ) 

198 

199 return await self._exec_inspect_command( 

200 inspect_command="all", kdu_model=kdu_model, repo_url=repo_url 

201 ) 

202 

203 """ 

204 #################################################################################### 

205 ################################### P R I V A T E ################################## 

206 #################################################################################### 

207 """ 

208 

209 def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True): 

210 """ 

211 Creates and returns base cluster and kube dirs and returns them. 

212 Also created helm3 dirs according to new directory specification, paths are 

213 returned and also environment variables that must be provided to execute commands 

214 

215 Helm 3 directory specification uses XDG categories for variable support: 

216 - Cache: $XDG_CACHE_HOME, for example, ${HOME}/.cache/helm/ 

217 - Configuration: $XDG_CONFIG_HOME, for example, ${HOME}/.config/helm/ 

218 - Data: $XDG_DATA_HOME, for example ${HOME}/.local/share/helm 

219 

220 The variables assigned for this paths are: 

221 (In the documentation the variables names are $HELM_PATH_CACHE, $HELM_PATH_CONFIG, 

222 $HELM_PATH_DATA but looking and helm env the variable names are different) 

223 - Cache: $HELM_CACHE_HOME 

224 - Config: $HELM_CONFIG_HOME 

225 - Data: $HELM_DATA_HOME 

226 - helm kubeconfig: $KUBECONFIG 

227 

228 :param cluster_name: cluster_name 

229 :return: Dictionary with config_paths and dictionary with helm environment variables 

230 """ 

231 

232 base = self.fs.path 

233 if base.endswith("/") or base.endswith("\\"): 

234 base = base[:-1] 

235 

236 # base dir for cluster 

237 cluster_dir = base + "/" + cluster_name 

238 

239 # kube dir 

240 kube_dir = cluster_dir + "/" + ".kube" 

241 if create_if_not_exist and not os.path.exists(kube_dir): 

242 self.log.debug("Creating dir {}".format(kube_dir)) 

243 os.makedirs(kube_dir) 

244 

245 helm_path_cache = cluster_dir + "/.cache/helm" 

246 if create_if_not_exist and not os.path.exists(helm_path_cache): 

247 self.log.debug("Creating dir {}".format(helm_path_cache)) 

248 os.makedirs(helm_path_cache) 

249 

250 helm_path_config = cluster_dir + "/.config/helm" 

251 if create_if_not_exist and not os.path.exists(helm_path_config): 

252 self.log.debug("Creating dir {}".format(helm_path_config)) 

253 os.makedirs(helm_path_config) 

254 

255 helm_path_data = cluster_dir + "/.local/share/helm" 

256 if create_if_not_exist and not os.path.exists(helm_path_data): 

257 self.log.debug("Creating dir {}".format(helm_path_data)) 

258 os.makedirs(helm_path_data) 

259 

260 config_filename = kube_dir + "/config" 

261 

262 # 2 - Prepare dictionary with paths 

263 paths = { 

264 "kube_dir": kube_dir, 

265 "kube_config": config_filename, 

266 "cluster_dir": cluster_dir, 

267 } 

268 

269 # 3 - Prepare environment variables 

270 env = { 

271 "HELM_CACHE_HOME": helm_path_cache, 

272 "HELM_CONFIG_HOME": helm_path_config, 

273 "HELM_DATA_HOME": helm_path_data, 

274 "KUBECONFIG": config_filename, 

275 } 

276 

277 for file_name, file in paths.items(): 

278 if "dir" in file_name and not os.path.exists(file): 

279 err_msg = "{} dir does not exist".format(file) 

280 self.log.error(err_msg) 

281 raise K8sException(err_msg) 

282 

283 return paths, env 

284 

285 async def _namespace_exists(self, cluster_id, namespace) -> bool: 

286 self.log.debug( 

287 "checking if namespace {} exists cluster_id {}".format( 

288 namespace, cluster_id 

289 ) 

290 ) 

291 namespaces = await self._get_namespaces(cluster_id) 

292 return namespace in namespaces if namespaces else False 

293 

294 async def _get_namespaces(self, cluster_id: str): 

295 self.log.debug("get namespaces cluster_id {}".format(cluster_id)) 

296 

297 # init config, env 

298 paths, env = self._init_paths_env( 

299 cluster_name=cluster_id, create_if_not_exist=True 

300 ) 

301 

302 command = "{} --kubeconfig={} get namespaces -o=yaml".format( 

303 self.kubectl_command, quote(paths["kube_config"]) 

304 ) 

305 output, _rc = await self._local_async_exec( 

306 command=command, raise_exception_on_error=True, env=env 

307 ) 

308 

309 data = yaml.load(output, Loader=yaml.SafeLoader) 

310 namespaces = [item["metadata"]["name"] for item in data["items"]] 

311 self.log.debug(f"namespaces {namespaces}") 

312 

313 return namespaces 

314 

315 async def _create_namespace(self, cluster_id: str, namespace: str): 

316 self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}") 

317 

318 # init config, env 

319 paths, env = self._init_paths_env( 

320 cluster_name=cluster_id, create_if_not_exist=True 

321 ) 

322 

323 command = "{} --kubeconfig={} create namespace {}".format( 

324 self.kubectl_command, quote(paths["kube_config"]), quote(namespace) 

325 ) 

326 _, _rc = await self._local_async_exec( 

327 command=command, raise_exception_on_error=True, env=env 

328 ) 

329 self.log.debug(f"namespace {namespace} created") 

330 

331 return _rc 

332 

333 async def _get_services( 

334 self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str 

335 ): 

336 # init config, env 

337 paths, env = self._init_paths_env( 

338 cluster_name=cluster_id, create_if_not_exist=True 

339 ) 

340 

341 command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format( 

342 kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace) 

343 ) 

344 command2 = "{} get --namespace={} -f -".format( 

345 self.kubectl_command, quote(namespace) 

346 ) 

347 output, _rc = await self._local_async_exec_pipe( 

348 command1, command2, env=env, raise_exception_on_error=True 

349 ) 

350 services = self._parse_services(output) 

351 

352 return services 

353 

354 async def _cluster_init(self, cluster_id, namespace, paths, env): 

355 """ 

356 Implements the helm version dependent cluster initialization: 

357 For helm3 it creates the namespace if it is not created 

358 """ 

359 if namespace != "kube-system": 

360 namespaces = await self._get_namespaces(cluster_id) 

361 if namespace not in namespaces: 

362 # TODO: refactor to use kubernetes API client 

363 await self._create_namespace(cluster_id, namespace) 

364 

365 repo_list = await self.repo_list(cluster_id) 

366 stable_repo = [repo for repo in repo_list if repo["name"] == "stable"] 

367 if not stable_repo and self._stable_repo_url: 

368 await self.repo_add(cluster_id, "stable", self._stable_repo_url) 

369 

370 # Returns False as no software needs to be uninstalled 

371 return False 

372 

373 async def _uninstall_sw(self, cluster_id: str, namespace: str): 

374 # nothing to do to uninstall sw 

375 pass 

376 

377 async def _instances_list(self, cluster_id: str): 

378 # init paths, env 

379 paths, env = self._init_paths_env( 

380 cluster_name=cluster_id, create_if_not_exist=True 

381 ) 

382 

383 command = "{} list --all-namespaces --output yaml".format(self._helm_command) 

384 output, _rc = await self._local_async_exec( 

385 command=command, raise_exception_on_error=True, env=env 

386 ) 

387 

388 if output and len(output) > 0: 

389 self.log.debug("instances list output: {}".format(output)) 

390 return yaml.load(output, Loader=yaml.SafeLoader) 

391 else: 

392 return [] 

393 

394 def _get_inspect_command( 

395 self, show_command: str, kdu_model: str, repo_str: str, version: str 

396 ): 

397 """Generates the command to obtain the information about an Helm Chart package 

398 (´helm show ...´ command) 

399 

400 Args: 

401 show_command: the second part of the command (`helm show <show_command>`) 

402 kdu_model: The name or path of a Helm Chart 

403 repo_str: Helm Chart repository url 

404 version: constraint with specific version of the Chart to use 

405 

406 Returns: 

407 str: the generated Helm Chart command 

408 """ 

409 

410 inspect_command = "{} show {} {}{} {}".format( 

411 self._helm_command, show_command, quote(kdu_model), repo_str, version 

412 ) 

413 return inspect_command 

414 

415 def _get_get_command( 

416 self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str 

417 ): 

418 get_command = ( 

419 "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format( 

420 kubeconfig, 

421 self._helm_command, 

422 get_command, 

423 quote(kdu_instance), 

424 quote(namespace), 

425 ) 

426 ) 

427 return get_command 

428 

429 async def _status_kdu( 

430 self, 

431 cluster_id: str, 

432 kdu_instance: str, 

433 namespace: str = None, 

434 yaml_format: bool = False, 

435 show_error_log: bool = False, 

436 ) -> Union[str, dict]: 

437 self.log.debug( 

438 "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace) 

439 ) 

440 

441 if not namespace: 

442 namespace = "kube-system" 

443 

444 # init config, env 

445 paths, env = self._init_paths_env( 

446 cluster_name=cluster_id, create_if_not_exist=True 

447 ) 

448 command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format( 

449 paths["kube_config"], 

450 self._helm_command, 

451 quote(kdu_instance), 

452 quote(namespace), 

453 ) 

454 

455 output, rc = await self._local_async_exec( 

456 command=command, 

457 raise_exception_on_error=True, 

458 show_error_log=show_error_log, 

459 env=env, 

460 ) 

461 

462 if yaml_format: 

463 return str(output) 

464 

465 if rc != 0: 

466 return None 

467 

468 data = yaml.load(output, Loader=yaml.SafeLoader) 

469 

470 # remove field 'notes' and manifest 

471 try: 

472 del data.get("info")["notes"] 

473 except KeyError: 

474 pass 

475 

476 # parse the manifest to a list of dictionaries 

477 if "manifest" in data: 

478 manifest_str = data.get("manifest") 

479 manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader) 

480 

481 data["manifest"] = [] 

482 for doc in manifest_docs: 

483 data["manifest"].append(doc) 

484 

485 return data 

486 

487 def _get_install_command( 

488 self, 

489 kdu_model: str, 

490 kdu_instance: str, 

491 namespace: str, 

492 labels: dict, 

493 params_str: str, 

494 version: str, 

495 atomic: bool, 

496 timeout: float, 

497 kubeconfig: str, 

498 ) -> str: 

499 timeout_str = "" 

500 if timeout: 

501 timeout_str = "--timeout {}s".format(timeout) 

502 

503 # atomic 

504 atomic_str = "" 

505 if atomic: 

506 atomic_str = "--atomic" 

507 # namespace 

508 namespace_str = "" 

509 if namespace: 

510 namespace_str = "--namespace {}".format(quote(namespace)) 

511 

512 # version 

513 version_str = "" 

514 if version: 

515 version_str = "--version {}".format(version) 

516 

517 # labels 

518 post_renderer_args = [] 

519 post_renderer_str = post_renderer_args_str = "" 

520 if labels and self.podLabels_post_renderer_path: 

521 post_renderer_args.append( 

522 "{}={}".format( 

523 self.podLabels_post_renderer_path, 

524 " ".join( 

525 ["{}:{}".format(key, value) for key, value in labels.items()] 

526 ), 

527 ) 

528 ) 

529 

530 if len(post_renderer_args) > 0 and self.main_post_renderer_path: 

531 post_renderer_str = "--post-renderer {}".format( 

532 self.main_post_renderer_path, 

533 ) 

534 post_renderer_args_str += ( 

535 "--post-renderer-args '" + ",".join(post_renderer_args) + "'" 

536 ) 

537 

538 command = ( 

539 "env KUBECONFIG={kubeconfig} {helm} install {name} {atomic} --output yaml " 

540 "{params} {timeout} {ns} {post_renderer} {post_renderer_args} {model} {ver}".format( 

541 kubeconfig=kubeconfig, 

542 helm=self._helm_command, 

543 name=quote(kdu_instance), 

544 atomic=atomic_str, 

545 params=params_str, 

546 timeout=timeout_str, 

547 ns=namespace_str, 

548 post_renderer=post_renderer_str, 

549 post_renderer_args=post_renderer_args_str, 

550 model=quote(kdu_model), 

551 ver=version_str, 

552 ) 

553 ) 

554 return command 

555 

556 def _get_upgrade_scale_command( 

557 self, 

558 kdu_model: str, 

559 kdu_instance: str, 

560 namespace: str, 

561 scale: int, 

562 labels: dict, 

563 version: str, 

564 atomic: bool, 

565 replica_str: str, 

566 timeout: float, 

567 resource_name: str, 

568 kubeconfig: str, 

569 ) -> str: 

570 """Generates the command to scale a Helm Chart release 

571 

572 Args: 

573 kdu_model (str): Kdu model name, corresponding to the Helm local location or repository 

574 kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question 

575 namespace (str): Namespace where this KDU instance is deployed 

576 scale (int): Scale count 

577 version (str): Constraint with specific version of the Chart to use 

578 atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. 

579 The --wait flag will be set automatically if --atomic is used 

580 replica_str (str): The key under resource_name key where the scale count is stored 

581 timeout (float): The time, in seconds, to wait 

582 resource_name (str): The KDU's resource to scale 

583 kubeconfig (str): Kubeconfig file path 

584 

585 Returns: 

586 str: command to scale a Helm Chart release 

587 """ 

588 

589 # scale 

590 if resource_name: 

591 scale_dict = {"{}.{}".format(resource_name, replica_str): scale} 

592 else: 

593 scale_dict = {replica_str: scale} 

594 

595 scale_str = self._params_to_set_option(scale_dict) 

596 

597 return self._get_upgrade_command( 

598 kdu_model=kdu_model, 

599 kdu_instance=kdu_instance, 

600 namespace=namespace, 

601 params_str=scale_str, 

602 labels=labels, 

603 version=version, 

604 atomic=atomic, 

605 timeout=timeout, 

606 kubeconfig=kubeconfig, 

607 ) 

608 

609 def _get_upgrade_command( 

610 self, 

611 kdu_model: str, 

612 kdu_instance: str, 

613 namespace: str, 

614 params_str: str, 

615 labels: dict, 

616 version: str, 

617 atomic: bool, 

618 timeout: float, 

619 kubeconfig: str, 

620 targetHostK8sLabels: dict = None, 

621 reset_values: bool = False, 

622 reuse_values: bool = True, 

623 reset_then_reuse_values: bool = False, 

624 force: bool = False, 

625 ) -> str: 

626 """Generates the command to upgrade a Helm Chart release 

627 

628 Args: 

629 kdu_model (str): Kdu model name, corresponding to the Helm local location or repository 

630 kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question 

631 namespace (str): Namespace where this KDU instance is deployed 

632 params_str (str): Params used to upgrade the Helm Chart release 

633 version (str): Constraint with specific version of the Chart to use 

634 atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. 

635 The --wait flag will be set automatically if --atomic is used 

636 timeout (float): The time, in seconds, to wait 

637 kubeconfig (str): Kubeconfig file path 

638 reset_values(bool): If set, helm resets values instead of reusing previous values. 

639 reuse_values(bool): If set, helm reuses previous values. 

640 reset_then_reuse_values(bool): If set, helm resets values, then apply the last release's values 

641 force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods. 

642 Returns: 

643 str: command to upgrade a Helm Chart release 

644 """ 

645 

646 timeout_str = "" 

647 if timeout: 

648 timeout_str = "--timeout {}s".format(timeout) 

649 

650 # atomic 

651 atomic_str = "" 

652 if atomic: 

653 atomic_str = "--atomic" 

654 

655 # force 

656 force_str = "" 

657 if force: 

658 force_str = "--force " 

659 

660 # version 

661 version_str = "" 

662 if version: 

663 version_str = "--version {}".format(quote(version)) 

664 

665 # namespace 

666 namespace_str = "" 

667 if namespace: 

668 namespace_str = "--namespace {}".format(quote(namespace)) 

669 

670 # reset, reuse or reset_then_reuse values 

671 on_values_str = "--reuse-values" 

672 if reset_values: 

673 on_values_str = "--reset-values" 

674 elif reuse_values: 

675 on_values_str = "--reuse-values" 

676 elif reset_then_reuse_values: 

677 on_values_str = "--reset-then-reuse-values" 

678 

679 # labels 

680 post_renderer_args = [] 

681 post_renderer_str = post_renderer_args_str = "" 

682 if labels and self.podLabels_post_renderer_path: 

683 post_renderer_args.append( 

684 "{}={}".format( 

685 self.podLabels_post_renderer_path, 

686 " ".join( 

687 ["{}:{}".format(key, value) for key, value in labels.items()] 

688 ), 

689 ) 

690 ) 

691 

692 # migration 

693 if targetHostK8sLabels and self.nodeSelector_post_renderer_path: 

694 post_renderer_args.append( 

695 "{}={}".format( 

696 self.nodeSelector_post_renderer_path, 

697 " ".join( 

698 [ 

699 "{}:{}".format(key, value) 

700 for key, value in targetHostK8sLabels.items() 

701 ] 

702 ), 

703 ) 

704 ) 

705 

706 if len(post_renderer_args) > 0 and self.main_post_renderer_path: 

707 post_renderer_str = "--post-renderer {}".format( 

708 self.main_post_renderer_path, 

709 ) 

710 post_renderer_args_str += ( 

711 "--post-renderer-args '" + ",".join(post_renderer_args) + "'" 

712 ) 

713 

714 command = ( 

715 "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} {force}" 

716 "--output yaml {params} {timeout} {post_renderer} {post_renderer_args} {on_values} {ver}" 

717 ).format( 

718 kubeconfig=kubeconfig, 

719 helm=self._helm_command, 

720 name=quote(kdu_instance), 

721 namespace=namespace_str, 

722 atomic=atomic_str, 

723 force=force_str, 

724 params=params_str, 

725 timeout=timeout_str, 

726 post_renderer=post_renderer_str, 

727 post_renderer_args=post_renderer_args_str, 

728 model=quote(kdu_model), 

729 on_values=on_values_str, 

730 ver=version_str, 

731 ) 

732 return command 

733 

734 def _get_rollback_command( 

735 self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str 

736 ) -> str: 

737 return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format( 

738 kubeconfig, 

739 self._helm_command, 

740 quote(kdu_instance), 

741 revision, 

742 quote(namespace), 

743 ) 

744 

745 def _get_uninstall_command( 

746 self, kdu_instance: str, namespace: str, kubeconfig: str 

747 ) -> str: 

748 return "env KUBECONFIG={} {} uninstall {} --namespace={}".format( 

749 kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace) 

750 ) 

751 

752 def _get_helm_chart_repos_ids(self, cluster_uuid) -> list: 

753 repo_ids = [] 

754 cluster_filter = {"_admin.helm-chart-v3.id": cluster_uuid} 

755 cluster = self.db.get_one("k8sclusters", cluster_filter) 

756 if cluster: 

757 repo_ids = cluster.get("_admin").get("helm_chart_repos") or [] 

758 return repo_ids 

759 else: 

760 raise K8sException( 

761 "k8cluster with helm-id : {} not found".format(cluster_uuid) 

762 )