Fix bug 2088 by quoting inputs for commands
[osm/N2VC.git] / n2vc / k8s_helm_conn.py
1 ##
2 # Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
3 # This file is part of OSM
4 # All Rights Reserved.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15 # implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19 # For those usages not covered by the Apache License, Version 2.0 please
20 # contact with: nfvlabs@tid.es
21 ##
22 import asyncio
23 from typing import Union
24 from shlex import quote
25 import os
26 import yaml
27
28 from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector
29 from n2vc.exceptions import K8sException
30
31
32 class K8sHelmConnector(K8sHelmBaseConnector):
33
34 """
35 ####################################################################################
36 ################################### P U B L I C ####################################
37 ####################################################################################
38 """
39
40 def __init__(
41 self,
42 fs: object,
43 db: object,
44 kubectl_command: str = "/usr/bin/kubectl",
45 helm_command: str = "/usr/bin/helm",
46 log: object = None,
47 on_update_db=None,
48 ):
49 """
50 Initializes helm connector for helm v2
51
52 :param fs: file system for kubernetes and helm configuration
53 :param db: database object to write current operation status
54 :param kubectl_command: path to kubectl executable
55 :param helm_command: path to helm executable
56 :param log: logger
57 :param on_update_db: callback called when k8s connector updates database
58 """
59
60 # parent class
61 K8sHelmBaseConnector.__init__(
62 self,
63 db=db,
64 log=log,
65 fs=fs,
66 kubectl_command=kubectl_command,
67 helm_command=helm_command,
68 on_update_db=on_update_db,
69 )
70
71 self.log.info("Initializing K8S Helm2 connector")
72
73 # initialize helm client-only
74 self.log.debug("Initializing helm client-only...")
75 command = "{} init --client-only {} ".format(
76 self._helm_command,
77 "--stable-repo-url {}".format(quote(self._stable_repo_url))
78 if self._stable_repo_url
79 else "--skip-repos",
80 )
81 try:
82 asyncio.ensure_future(
83 self._local_async_exec(command=command, raise_exception_on_error=False)
84 )
85 # loop = asyncio.get_event_loop()
86 # loop.run_until_complete(self._local_async_exec(command=command,
87 # raise_exception_on_error=False))
88 except Exception as e:
89 self.warning(
90 msg="helm init failed (it was already initialized): {}".format(e)
91 )
92
93 self.log.info("K8S Helm2 connector initialized")
94
95 async def install(
96 self,
97 cluster_uuid: str,
98 kdu_model: str,
99 kdu_instance: str,
100 atomic: bool = True,
101 timeout: float = 300,
102 params: dict = None,
103 db_dict: dict = None,
104 kdu_name: str = None,
105 namespace: str = None,
106 **kwargs,
107 ):
108 """
109 Deploys of a new KDU instance. It would implicitly rely on the `install` call
110 to deploy the Chart/Bundle properly parametrized (in practice, this call would
111 happen before any _initial-config-primitive_of the VNF is called).
112
113 :param cluster_uuid: UUID of a K8s cluster known by OSM
114 :param kdu_model: chart/reference (string), which can be either
115 of these options:
116 - a name of chart available via the repos known by OSM
117 (e.g. stable/openldap, stable/openldap:1.2.4)
118 - a path to a packaged chart (e.g. mychart.tgz)
119 - a path to an unpacked chart directory or a URL (e.g. mychart)
120 :param kdu_instance: Kdu instance name
121 :param atomic: If set, installation process purges chart/bundle on fail, also
122 will wait until all the K8s objects are active
123 :param timeout: Time in seconds to wait for the install of the chart/bundle
124 (defaults to Helm default timeout: 300s)
125 :param params: dictionary of key-value pairs for instantiation parameters
126 (overriding default values)
127 :param dict db_dict: where to write into database when the status changes.
128 It contains a dict with {collection: <str>, filter: {},
129 path: <str>},
130 e.g. {collection: "nsrs", filter:
131 {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
132 :param kdu_name: Name of the KDU instance to be installed
133 :param namespace: K8s namespace to use for the KDU instance
134 :param kwargs: Additional parameters (None yet)
135 :return: True if successful
136 """
137 self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
138
139 # sync local dir
140 self.fs.sync(from_path=cluster_uuid)
141
142 # init env, paths
143 paths, env = self._init_paths_env(
144 cluster_name=cluster_uuid, create_if_not_exist=True
145 )
146
147 await self._install_impl(
148 cluster_uuid,
149 kdu_model,
150 paths,
151 env,
152 kdu_instance,
153 atomic=atomic,
154 timeout=timeout,
155 params=params,
156 db_dict=db_dict,
157 kdu_name=kdu_name,
158 namespace=namespace,
159 )
160
161 # sync fs
162 self.fs.reverse_sync(from_path=cluster_uuid)
163
164 self.log.debug("Returning kdu_instance {}".format(kdu_instance))
165 return True
166
167 async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
168 self.log.debug(
169 "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
170 )
171
172 return await self._exec_inspect_command(
173 inspect_command="", kdu_model=kdu_model, repo_url=repo_url
174 )
175
176 """
177 ####################################################################################
178 ################################### P R I V A T E ##################################
179 ####################################################################################
180 """
181
182 def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True):
183 """
184 Creates and returns base cluster and kube dirs and returns them.
185 Also created helm3 dirs according to new directory specification, paths are
186 returned and also environment variables that must be provided to execute commands
187
188 Helm 2 directory specification uses helm_home dir:
189
190 The variables assigned for this paths are:
191 - Helm hone: $HELM_HOME
192 - helm kubeconfig: $KUBECONFIG
193
194 :param cluster_name: cluster_name
195 :return: Dictionary with config_paths and dictionary with helm environment variables
196 """
197 base = self.fs.path
198 if base.endswith("/") or base.endswith("\\"):
199 base = base[:-1]
200
201 # base dir for cluster
202 cluster_dir = base + "/" + cluster_name
203
204 # kube dir
205 kube_dir = cluster_dir + "/" + ".kube"
206 if create_if_not_exist and not os.path.exists(kube_dir):
207 self.log.debug("Creating dir {}".format(kube_dir))
208 os.makedirs(kube_dir)
209
210 # helm home dir
211 helm_dir = cluster_dir + "/" + ".helm"
212 if create_if_not_exist and not os.path.exists(helm_dir):
213 self.log.debug("Creating dir {}".format(helm_dir))
214 os.makedirs(helm_dir)
215
216 config_filename = kube_dir + "/config"
217
218 # 2 - Prepare dictionary with paths
219 paths = {
220 "kube_dir": kube_dir,
221 "kube_config": config_filename,
222 "cluster_dir": cluster_dir,
223 "helm_dir": helm_dir,
224 }
225
226 for file_name, file in paths.items():
227 if "dir" in file_name and not os.path.exists(file):
228 err_msg = "{} dir does not exist".format(file)
229 self.log.error(err_msg)
230 raise K8sException(err_msg)
231
232 # 3 - Prepare environment variables
233 env = {"HELM_HOME": helm_dir, "KUBECONFIG": config_filename}
234
235 return paths, env
236
237 async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig):
238 # init config, env
239 paths, env = self._init_paths_env(
240 cluster_name=cluster_id, create_if_not_exist=True
241 )
242
243 command1 = "env KUBECONFIG={} {} get manifest {} ".format(
244 kubeconfig, self._helm_command, quote(kdu_instance)
245 )
246 command2 = "{} get --namespace={} -f -".format(
247 self.kubectl_command, quote(namespace)
248 )
249 output, _rc = await self._local_async_exec_pipe(
250 command1, command2, env=env, raise_exception_on_error=True
251 )
252 services = self._parse_services(output)
253
254 return services
255
256 async def _cluster_init(
257 self, cluster_id: str, namespace: str, paths: dict, env: dict
258 ):
259 """
260 Implements the helm version dependent cluster initialization:
261 For helm2 it initialized tiller environment if needed
262 """
263
264 # check if tiller pod is up in cluster
265 command = "{} --kubeconfig={} --namespace={} get deployments".format(
266 self.kubectl_command, paths["kube_config"], quote(namespace)
267 )
268 output, _rc = await self._local_async_exec(
269 command=command, raise_exception_on_error=True, env=env
270 )
271
272 output_table = self._output_to_table(output=output)
273
274 # find 'tiller' pod in all pods
275 already_initialized = False
276 try:
277 for row in output_table:
278 if row[0].startswith("tiller-deploy"):
279 already_initialized = True
280 break
281 except Exception:
282 pass
283
284 # helm init
285 n2vc_installed_sw = False
286 if not already_initialized:
287 self.log.info(
288 "Initializing helm in client and server: {}".format(cluster_id)
289 )
290 command = "{} --kubeconfig={} --namespace kube-system create serviceaccount {}".format(
291 self.kubectl_command, paths["kube_config"], quote(self.service_account)
292 )
293 _, _rc = await self._local_async_exec(
294 command=command, raise_exception_on_error=False, env=env
295 )
296
297 command = (
298 "{} --kubeconfig={} create clusterrolebinding osm-tiller-cluster-rule "
299 "--clusterrole=cluster-admin --serviceaccount=kube-system:{}"
300 ).format(
301 self.kubectl_command, paths["kube_config"], quote(self.service_account)
302 )
303 _, _rc = await self._local_async_exec(
304 command=command, raise_exception_on_error=False, env=env
305 )
306
307 command = (
308 "{} init --kubeconfig={} --tiller-namespace={} --home={} --service-account {} "
309 " {}"
310 ).format(
311 self._helm_command,
312 paths["kube_config"],
313 quote(namespace),
314 quote(paths["helm_dir"]),
315 quote(self.service_account),
316 "--stable-repo-url {}".format(quote(self._stable_repo_url))
317 if self._stable_repo_url
318 else "--skip-repos",
319 )
320 _, _rc = await self._local_async_exec(
321 command=command, raise_exception_on_error=True, env=env
322 )
323 n2vc_installed_sw = True
324 else:
325 # check client helm installation
326 check_file = paths["helm_dir"] + "/repository/repositories.yaml"
327 if not self._check_file_exists(
328 filename=check_file, exception_if_not_exists=False
329 ):
330 self.log.info("Initializing helm in client: {}".format(cluster_id))
331 command = (
332 "{} init --kubeconfig={} --tiller-namespace={} "
333 "--home={} --client-only {} "
334 ).format(
335 self._helm_command,
336 paths["kube_config"],
337 quote(namespace),
338 quote(paths["helm_dir"]),
339 "--stable-repo-url {}".format(quote(self._stable_repo_url))
340 if self._stable_repo_url
341 else "--skip-repos",
342 )
343 output, _rc = await self._local_async_exec(
344 command=command, raise_exception_on_error=True, env=env
345 )
346 else:
347 self.log.info("Helm client already initialized")
348
349 repo_list = await self.repo_list(cluster_id)
350 for repo in repo_list:
351 if repo["name"] == "stable" and repo["url"] != self._stable_repo_url:
352 self.log.debug("Add new stable repo url: {}")
353 await self.repo_remove(cluster_id, "stable")
354 if self._stable_repo_url:
355 await self.repo_add(cluster_id, "stable", self._stable_repo_url)
356 break
357
358 return n2vc_installed_sw
359
360 async def _uninstall_sw(self, cluster_id: str, namespace: str):
361 # uninstall Tiller if necessary
362
363 self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id))
364
365 # init paths, env
366 paths, env = self._init_paths_env(
367 cluster_name=cluster_id, create_if_not_exist=True
368 )
369
370 if not namespace:
371 # find namespace for tiller pod
372 command = "{} --kubeconfig={} get deployments --all-namespaces".format(
373 self.kubectl_command, quote(paths["kube_config"])
374 )
375 output, _rc = await self._local_async_exec(
376 command=command, raise_exception_on_error=False, env=env
377 )
378 output_table = self._output_to_table(output=output)
379 namespace = None
380 for r in output_table:
381 try:
382 if "tiller-deploy" in r[1]:
383 namespace = r[0]
384 break
385 except Exception:
386 pass
387 else:
388 msg = "Tiller deployment not found in cluster {}".format(cluster_id)
389 self.log.error(msg)
390
391 self.log.debug("namespace for tiller: {}".format(namespace))
392
393 if namespace:
394 # uninstall tiller from cluster
395 self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id))
396 command = "{} --kubeconfig={} --home={} reset".format(
397 self._helm_command,
398 quote(paths["kube_config"]),
399 quote(paths["helm_dir"]),
400 )
401 self.log.debug("resetting: {}".format(command))
402 output, _rc = await self._local_async_exec(
403 command=command, raise_exception_on_error=True, env=env
404 )
405 # Delete clusterrolebinding and serviceaccount.
406 # Ignore if errors for backward compatibility
407 command = (
408 "{} --kubeconfig={} delete clusterrolebinding.rbac.authorization.k8s."
409 "io/osm-tiller-cluster-rule"
410 ).format(self.kubectl_command, quote(paths["kube_config"]))
411 output, _rc = await self._local_async_exec(
412 command=command, raise_exception_on_error=False, env=env
413 )
414 command = (
415 "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format(
416 self.kubectl_command,
417 quote(paths["kube_config"]),
418 quote(namespace),
419 quote(self.service_account),
420 )
421 )
422 output, _rc = await self._local_async_exec(
423 command=command, raise_exception_on_error=False, env=env
424 )
425
426 else:
427 self.log.debug("namespace not found")
428
429 async def _instances_list(self, cluster_id):
430 # init paths, env
431 paths, env = self._init_paths_env(
432 cluster_name=cluster_id, create_if_not_exist=True
433 )
434
435 command = "{} list --output yaml".format(self._helm_command)
436
437 output, _rc = await self._local_async_exec(
438 command=command, raise_exception_on_error=True, env=env
439 )
440
441 if output and len(output) > 0:
442 # parse yaml and update keys to lower case to unify with helm3
443 instances = yaml.load(output, Loader=yaml.SafeLoader).get("Releases")
444 new_instances = []
445 for instance in instances:
446 new_instance = dict((k.lower(), v) for k, v in instance.items())
447 new_instances.append(new_instance)
448 return new_instances
449 else:
450 return []
451
452 def _get_inspect_command(
453 self, show_command: str, kdu_model: str, repo_str: str, version: str
454 ):
455 inspect_command = "{} inspect {} {}{} {}".format(
456 self._helm_command, show_command, quote(kdu_model), repo_str, version
457 )
458 return inspect_command
459
460 def _get_get_command(
461 self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
462 ):
463 get_command = "env KUBECONFIG={} {} get {} {} --output yaml".format(
464 kubeconfig, self._helm_command, get_command, quote(kdu_instance)
465 )
466 return get_command
467
468 async def _status_kdu(
469 self,
470 cluster_id: str,
471 kdu_instance: str,
472 namespace: str = None,
473 yaml_format: bool = False,
474 show_error_log: bool = False,
475 ) -> Union[str, dict]:
476 self.log.debug(
477 "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
478 )
479
480 # init config, env
481 paths, env = self._init_paths_env(
482 cluster_name=cluster_id, create_if_not_exist=True
483 )
484 command = ("env KUBECONFIG={} {} status {} --output yaml").format(
485 paths["kube_config"], self._helm_command, quote(kdu_instance)
486 )
487 output, rc = await self._local_async_exec(
488 command=command,
489 raise_exception_on_error=True,
490 show_error_log=show_error_log,
491 env=env,
492 )
493
494 if yaml_format:
495 return str(output)
496
497 if rc != 0:
498 return None
499
500 data = yaml.load(output, Loader=yaml.SafeLoader)
501
502 # remove field 'notes'
503 try:
504 del data.get("info").get("status")["notes"]
505 except KeyError:
506 pass
507
508 # parse the manifest to a list of dictionaries
509 if "manifest" in data:
510 manifest_str = data.get("manifest")
511 manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader)
512
513 data["manifest"] = []
514 for doc in manifest_docs:
515 data["manifest"].append(doc)
516
517 # parse field 'resources'
518 try:
519 resources = str(data.get("info").get("status").get("resources"))
520 resource_table = self._output_to_table(resources)
521 data.get("info").get("status")["resources"] = resource_table
522 except Exception:
523 pass
524
525 # set description to lowercase (unify with helm3)
526 try:
527 data.get("info")["description"] = data.get("info").pop("Description")
528 except KeyError:
529 pass
530
531 return data
532
533 def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
534 repo_ids = []
535 cluster_filter = {"_admin.helm-chart.id": cluster_uuid}
536 cluster = self.db.get_one("k8sclusters", cluster_filter)
537 if cluster:
538 repo_ids = cluster.get("_admin").get("helm_chart_repos") or []
539 return repo_ids
540 else:
541 raise K8sException(
542 "k8cluster with helm-id : {} not found".format(cluster_uuid)
543 )
544
545 async def _is_install_completed(self, cluster_id: str, kdu_instance: str) -> bool:
546 # init config, env
547 paths, env = self._init_paths_env(
548 cluster_name=cluster_id, create_if_not_exist=True
549 )
550
551 status = await self._status_kdu(
552 cluster_id=cluster_id, kdu_instance=kdu_instance, yaml_format=False
553 )
554
555 # extract info.status.resources-> str
556 # format:
557 # ==> v1/Deployment
558 # NAME READY UP-TO-DATE AVAILABLE AGE
559 # halting-horse-mongodb 0/1 1 0 0s
560 # halting-petit-mongodb 1/1 1 0 0s
561 # blank line
562 resources = K8sHelmBaseConnector._get_deep(
563 status, ("info", "status", "resources")
564 )
565
566 # convert to table
567 resources = K8sHelmBaseConnector._output_to_table(resources)
568
569 num_lines = len(resources)
570 index = 0
571 ready = True
572 while index < num_lines:
573 try:
574 line1 = resources[index]
575 index += 1
576 # find '==>' in column 0
577 if line1[0] == "==>":
578 line2 = resources[index]
579 index += 1
580 # find READY in column 1
581 if line2[1] == "READY":
582 # read next lines
583 line3 = resources[index]
584 index += 1
585 while len(line3) > 1 and index < num_lines:
586 ready_value = line3[1]
587 parts = ready_value.split(sep="/")
588 current = int(parts[0])
589 total = int(parts[1])
590 if current < total:
591 self.log.debug("NOT READY:\n {}".format(line3))
592 ready = False
593 line3 = resources[index]
594 index += 1
595
596 except Exception:
597 pass
598
599 return ready
600
601 def _get_install_command(
602 self,
603 kdu_model,
604 kdu_instance,
605 namespace,
606 params_str,
607 version,
608 atomic,
609 timeout,
610 kubeconfig,
611 ) -> str:
612 timeout_str = ""
613 if timeout:
614 timeout_str = "--timeout {}".format(timeout)
615
616 # atomic
617 atomic_str = ""
618 if atomic:
619 atomic_str = "--atomic"
620 # namespace
621 namespace_str = ""
622 if namespace:
623 namespace_str = "--namespace {}".format(quote(namespace))
624
625 # version
626 version_str = ""
627 if version:
628 version_str = "--version {}".format(version)
629
630 command = (
631 "env KUBECONFIG={kubeconfig} {helm} install {atomic} --output yaml "
632 "{params} {timeout} --name={name} {ns} {model} {ver}".format(
633 kubeconfig=kubeconfig,
634 helm=self._helm_command,
635 atomic=atomic_str,
636 params=params_str,
637 timeout=timeout_str,
638 name=quote(kdu_instance),
639 ns=namespace_str,
640 model=quote(kdu_model),
641 ver=version_str,
642 )
643 )
644 return command
645
646 def _get_upgrade_scale_command(
647 self,
648 kdu_model: str,
649 kdu_instance: str,
650 namespace: str,
651 scale: int,
652 version: str,
653 atomic: bool,
654 replica_str: str,
655 timeout: float,
656 resource_name: str,
657 kubeconfig: str,
658 ) -> str:
659 """Generates the command to scale a Helm Chart release
660
661 Args:
662 kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
663 kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
664 namespace (str): Namespace where this KDU instance is deployed
665 scale (int): Scale count
666 version (str): Constraint with specific version of the Chart to use
667 atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
668 The --wait flag will be set automatically if --atomic is used
669 replica_str (str): The key under resource_name key where the scale count is stored
670 timeout (float): The time, in seconds, to wait
671 resource_name (str): The KDU's resource to scale
672 kubeconfig (str): Kubeconfig file path
673
674 Returns:
675 str: command to scale a Helm Chart release
676 """
677
678 # scale
679 if resource_name:
680 scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
681 else:
682 scale_dict = {replica_str: scale}
683
684 scale_str = self._params_to_set_option(scale_dict)
685
686 return self._get_upgrade_command(
687 kdu_model=kdu_model,
688 kdu_instance=kdu_instance,
689 namespace=namespace,
690 params_str=scale_str,
691 version=version,
692 atomic=atomic,
693 timeout=timeout,
694 kubeconfig=kubeconfig,
695 )
696
697 def _get_upgrade_command(
698 self,
699 kdu_model,
700 kdu_instance,
701 namespace,
702 params_str,
703 version,
704 atomic,
705 timeout,
706 kubeconfig,
707 ) -> str:
708 """Generates the command to upgrade a Helm Chart release
709
710 Args:
711 kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
712 kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
713 namespace (str): Namespace where this KDU instance is deployed
714 params_str (str): Params used to upgrade the Helm Chart release
715 version (str): Constraint with specific version of the Chart to use
716 atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
717 The --wait flag will be set automatically if --atomic is used
718 timeout (float): The time, in seconds, to wait
719 kubeconfig (str): Kubeconfig file path
720
721 Returns:
722 str: command to upgrade a Helm Chart release
723 """
724
725 timeout_str = ""
726 if timeout:
727 timeout_str = "--timeout {}".format(timeout)
728
729 # atomic
730 atomic_str = ""
731 if atomic:
732 atomic_str = "--atomic"
733
734 # version
735 version_str = ""
736 if version:
737 version_str = "--version {}".format(quote(version))
738
739 command = (
740 "env KUBECONFIG={kubeconfig} {helm} upgrade {atomic} --output yaml {params} {timeout} "
741 "--reuse-values {name} {model} {ver}"
742 ).format(
743 kubeconfig=kubeconfig,
744 helm=self._helm_command,
745 atomic=atomic_str,
746 params=params_str,
747 timeout=timeout_str,
748 name=quote(kdu_instance),
749 model=quote(kdu_model),
750 ver=version_str,
751 )
752 return command
753
754 def _get_rollback_command(
755 self, kdu_instance, namespace, revision, kubeconfig
756 ) -> str:
757 return "env KUBECONFIG={} {} rollback {} {} --wait".format(
758 kubeconfig, self._helm_command, quote(kdu_instance), revision
759 )
760
761 def _get_uninstall_command(
762 self, kdu_instance: str, namespace: str, kubeconfig: str
763 ) -> str:
764 return "env KUBECONFIG={} {} delete --purge {}".format(
765 kubeconfig, self._helm_command, quote(kdu_instance)
766 )