return Kubectl(config_file=kubeconfig_path)
+class NodeGroupLcm(GitOpsLcm):
+ db_collection = "nodegroups"
+
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+ super().__init__(msg, lcm_tasks, config)
+ self._workflows = {
+ "add_nodegroup": {
+ "check_resource_function": self.check_add_nodegroup,
+ },
+ "scale_nodegroup": {
+ "check_resource_function": self.check_scale_nodegroup,
+ },
+ "delete_nodegroup": {
+ "check_resource_function": self.check_delete_nodegroup,
+ },
+ }
+
+ async def create(self, params, order_id):
+ self.logger.info("Add NodeGroup Enter")
+
+ # To get the nodegroup and op ids
+ nodegroup_id = params["nodegroup_id"]
+ op_id = params["operation_id"]
+
+ # To initialize the operation states
+ self.initialize_operation(nodegroup_id, op_id)
+
+ # To get the nodegroup details and control plane from DB
+ db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
+ db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
+
+ # To get the operation params details
+ op_params = self.get_operation_params(db_nodegroup, op_id)
+ self.logger.info(f"Operations Params: {op_params}")
+
+ db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
+
+ # To copy the cluster content and decrypting fields to use in workflows
+ workflow_content = {
+ "nodegroup": db_nodegroup,
+ "cluster": db_cluster,
+ "vim_account": db_vim,
+ }
+ self.logger.info(f"Workflow content: {workflow_content}")
+
+ workflow_res, workflow_name = await self.odu.launch_workflow(
+ "add_nodegroup", op_id, op_params, workflow_content
+ )
+ self.logger.info("workflow_name is: {}".format(workflow_name))
+
+ workflow_status = await self.check_workflow_and_update_db(
+ op_id, workflow_name, db_nodegroup
+ )
+
+ # Clean items used in the workflow, no matter if the workflow succeeded
+ clean_status, clean_msg = await self.odu.clean_items_workflow(
+ "add_nodegroup", op_id, op_params, workflow_content
+ )
+ self.logger.info(
+ f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
+ )
+ if workflow_status:
+ resource_status, content = await self.check_resource_and_update_db(
+ "add_nodegroup", op_id, op_params, db_nodegroup
+ )
+ self.db.set_one(self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup)
+ self.logger.info(f"Add NodeGroup Exit with resource status: {resource_status}")
+ return
+
+ async def check_add_nodegroup(self, op_id, op_params, content):
+ self.logger.info(f"check_add_nodegroup Operation {op_id}. Params: {op_params}.")
+ self.logger.info(f"Content: {content}")
+ db_nodegroup = content
+ nodegroup_name = db_nodegroup["git_name"].lower()
+ nodegroup_kustomization_name = nodegroup_name
+ checkings_list = [
+ {
+ "item": "kustomization",
+ "name": nodegroup_kustomization_name,
+ "namespace": "managed-resources",
+ "condition": {
+ "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
+ "value": "True",
+ },
+ "timeout": self._checkloop_kustomization_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
+ },
+ {
+ "item": "nodepool_aws",
+ "name": nodegroup_name,
+ "namespace": "",
+ "condition": {
+ "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
+ "value": "True",
+ },
+ "timeout": self._checkloop_resource_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
+ },
+ {
+ "item": "nodepool_aws",
+ "name": nodegroup_name,
+ "namespace": "",
+ "condition": {
+ "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
+ "value": "True",
+ },
+ "timeout": self._checkloop_resource_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
+ },
+ ]
+ self.logger.info(f"Checking list: {checkings_list}")
+ result, message = await self.common_check_list(
+ op_id, checkings_list, "nodegroups", db_nodegroup
+ )
+ if not result:
+ return False, message
+ return True, "OK"
+
+ async def scale(self, params, order_id):
+ self.logger.info("Scale nodegroup Enter")
+
+ op_id = params["operation_id"]
+ nodegroup_id = params["nodegroup_id"]
+
+ # To initialize the operation states
+ self.initialize_operation(nodegroup_id, op_id)
+
+ db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
+ db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
+ op_params = self.get_operation_params(db_nodegroup, op_id)
+ db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
+
+ workflow_content = {
+ "nodegroup": db_nodegroup,
+ "cluster": db_cluster,
+ "vim_account": db_vim,
+ }
+ self.logger.info(f"Workflow content: {workflow_content}")
+
+ workflow_res, workflow_name = await self.odu.launch_workflow(
+ "scale_nodegroup", op_id, op_params, workflow_content
+ )
+ self.logger.info("workflow_name is: {}".format(workflow_name))
+
+ workflow_status = await self.check_workflow_and_update_db(
+ op_id, workflow_name, db_nodegroup
+ )
+
+ # Clean items used in the workflow, no matter if the workflow succeeded
+ clean_status, clean_msg = await self.odu.clean_items_workflow(
+ "scale_nodegroup", op_id, op_params, workflow_content
+ )
+ self.logger.info(
+ f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
+ )
+
+ if workflow_status:
+ resource_status, content = await self.check_resource_and_update_db(
+ "scale_nodegroup", op_id, op_params, db_nodegroup
+ )
+
+ if resource_status:
+ db_nodegroup["state"] = "READY"
+ self.db.set_one(
+ self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
+ )
+ self.logger.info(
+ f"Nodegroup Scale Exit with resource status: {resource_status}"
+ )
+ return
+
+ async def check_scale_nodegroup(self, op_id, op_params, content):
+ self.logger.info(
+ f"check_scale_nodegroup Operation {op_id}. Params: {op_params}."
+ )
+ self.logger.debug(f"Content: {content}")
+ db_nodegroup = content
+ nodegroup_name = db_nodegroup["git_name"].lower()
+ nodegroup_kustomization_name = nodegroup_name
+ checkings_list = [
+ {
+ "item": "kustomization",
+ "name": nodegroup_kustomization_name,
+ "namespace": "managed-resources",
+ "condition": {
+ "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
+ "value": "True",
+ },
+ "timeout": self._checkloop_kustomization_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
+ },
+ {
+ "item": "nodepool_aws",
+ "name": nodegroup_name,
+ "namespace": "",
+ "condition": {
+ "jsonpath_filter": "status.atProvider.scalingConfig[0].desiredSize",
+ "value": f"{op_params['node_count']}",
+ },
+ "timeout": self._checkloop_resource_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
+ },
+ ]
+ self.logger.info(f"Checking list: {checkings_list}")
+ return await self.common_check_list(
+ op_id, checkings_list, "nodegroups", db_nodegroup
+ )
+
+ async def delete(self, params, order_id):
+ self.logger.info("Delete nodegroup Enter")
+
+ op_id = params["operation_id"]
+ nodegroup_id = params["nodegroup_id"]
+
+ # To initialize the operation states
+ self.initialize_operation(nodegroup_id, op_id)
+
+ db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
+ db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
+ op_params = self.get_operation_params(db_nodegroup, op_id)
+
+ workflow_content = {"nodegroup": db_nodegroup, "cluster": db_cluster}
+
+ workflow_res, workflow_name = await self.odu.launch_workflow(
+ "delete_nodegroup", op_id, op_params, workflow_content
+ )
+ self.logger.info("workflow_name is: {}".format(workflow_name))
+
+ workflow_status = await self.check_workflow_and_update_db(
+ op_id, workflow_name, db_nodegroup
+ )
+
+ # Clean items used in the workflow, no matter if the workflow succeeded
+ clean_status, clean_msg = await self.odu.clean_items_workflow(
+ "delete_nodegroup", op_id, op_params, workflow_content
+ )
+ self.logger.info(
+ f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
+ )
+
+ if workflow_status:
+ resource_status, content = await self.check_resource_and_update_db(
+ "delete_nodegroup", op_id, op_params, db_nodegroup
+ )
+
+ if resource_status:
+ node_count = db_cluster.get("node_count")
+ new_node_count = node_count - 1
+ self.logger.info(f"New Node count: {new_node_count}")
+ db_cluster["node_count"] = new_node_count
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ db_nodegroup["state"] = "DELETED"
+ self.db.set_one(
+ self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
+ )
+ self.db.del_one(self.db_collection, {"_id": db_nodegroup["_id"]})
+ self.logger.info(
+ f"Nodegroup Delete Exit with resource status: {resource_status}"
+ )
+ return
+
+ async def check_delete_nodegroup(self, op_id, op_params, content):
+ self.logger.info(
+ f"check_delete_nodegroup Operation {op_id}. Params: {op_params}."
+ )
+ db_nodegroup = content
+ nodegroup_name = db_nodegroup["git_name"].lower()
+ nodegroup_kustomization_name = nodegroup_name
+ checkings_list = [
+ {
+ "item": "kustomization",
+ "name": nodegroup_kustomization_name,
+ "namespace": "managed-resources",
+ "deleted": True,
+ "timeout": self._checkloop_kustomization_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
+ },
+ {
+ "item": "nodepool_aws",
+ "name": nodegroup_name,
+ "namespace": "",
+ "deleted": True,
+ "timeout": self._checkloop_resource_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.RESOURCE_DELETED.NODEGROUP",
+ },
+ ]
+ self.logger.info(f"Checking list: {checkings_list}")
+ return await self.common_check_list(
+ op_id, checkings_list, "nodegroups", db_nodegroup
+ )
+
+
class ClusterLcm(GitOpsLcm):
db_collection = "clusters"
task = asyncio.ensure_future(self.ksu.move(params, order_id))
self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_move", task)
return
+ elif topic == "nodegroup":
+ nodegroup_id = params["nodegroup_id"]
+ op_id = params["operation_id"]
+ if command == "add_nodegroup":
+ task = asyncio.ensure_future(self.nodegroup.create(params, order_id))
+ self.lcm_tasks.register(
+ "nodegroup", nodegroup_id, op_id, "add_node", task
+ )
+ return
+ elif command == "scale_nodegroup":
+ task = asyncio.ensure_future(self.nodegroup.scale(params, order_id))
+ self.lcm_tasks.register(
+ "nodegroup", nodegroup_id, op_id, "scale_node", task
+ )
+ return
+ elif command == "delete_nodegroup":
+ task = asyncio.ensure_future(self.nodegroup.delete(params, order_id))
+ self.lcm_tasks.register(
+ "nodegroup", nodegroup_id, op_id, "delete_node", task
+ )
+ return
self.logger.critical("unknown topic {} and command '{}'".format(topic, command))
"k8s_infra_config",
"oka",
"ksu",
+ "nodegroup",
)
self.logger.debug(
"Consecutive errors: {} first start: {}".format(
)
self.oka = k8s.OkaLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
self.ksu = k8s.KsuLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
+ self.nodegroup = k8s.NodeGroupLcm(
+ self.msg, self.lcm_tasks, self.main_config.to_dict()
+ )
self.logger.info(
"Msg: {} lcm tasks: {} main config: {}".format(
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+
+
+def gather_age_key(cluster):
+ pubkey = cluster.get("age_pubkey")
+ privkey = cluster.get("age_privkey")
+ # return both public and private key
+ return pubkey, privkey
+
+
+async def add_nodegroup(self, op_id, op_params, content):
+ self.logger.info(f"Add Nodegroup Enter. Operation {op_id}. Params: {op_params}")
+
+ db_nodegroup = content["nodegroup"]
+ db_cluster = content["cluster"]
+ db_vim_account = content["vim_account"]
+
+ workflow_template = "launcher-add-nodegroup.j2"
+ workflow_name = f"add-nodegroup-{db_nodegroup['_id']}"
+ nodegroup_name = db_nodegroup["git_name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+ configmap_name = f"{nodegroup_name}-subnet-parameters"
+
+ # Get age key
+ public_key_new_cluster, private_key_new_cluster = gather_age_key(db_cluster)
+
+ # Test kubectl connection
+ self.logger.debug(self._kubectl._get_kubectl_version())
+
+ # Create temporal secret with agekey
+ secret_name = f"secret-age-{nodegroup_name}"
+ secret_namespace = "osm-workflows"
+ secret_key = "agekey"
+ secret_value = private_key_new_cluster
+ try:
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+ except Exception as e:
+ self.logger.info(f"Cannot create secret {secret_name}: {e}")
+ return False, f"Cannot create secret {secret_name}: {e}"
+
+ private_subnet = op_params.get("private_subnet", [])
+ public_subnet = op_params.get("public_subnet", [])
+ subnet = private_subnet + public_subnet
+ self.logger.info(f"Subnets: {subnet}")
+ # formatted_subnet = f"{json.dumps(subnet)}"
+ # self.logger.info(f"Formatted Subnet: {formatted_subnet}")
+ # Create the ConfigMap for the subnets
+ # TODO: this should be done in a declarative way, not imperative
+ try:
+ await self.create_configmap(
+ configmap_name,
+ "managed-resources",
+ {"subnet": subnet},
+ )
+ except Exception as e:
+ self.logger.info(f"Cannot create configmap {configmap_name}: {e}")
+ return False, f"Cannot create configmap {configmap_name}: {e}"
+
+ # Additional params for the workflow
+ nodegroup_kustomization_name = nodegroup_name
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ vim_account_id = db_cluster["vim_account"]
+ providerconfig_name = f"{vim_account_id}-config"
+ vim_type = db_vim_account["vim_type"]
+ if db_cluster.get("bootstrap", True):
+ skip_bootstrap = "false"
+ else:
+ skip_bootstrap = "true"
+ if vim_type == "azure":
+ cluster_type = "aks"
+ elif vim_type == "aws":
+ cluster_type = "eks"
+ elif vim_type == "gcp":
+ cluster_type = "gke"
+ else:
+ raise Exception("Not suitable VIM account to register cluster")
+
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ nodegroup_name=nodegroup_name,
+ nodegroup_kustomization_name=nodegroup_kustomization_name,
+ cluster_name=cluster_name,
+ cluster_type=cluster_type,
+ role=db_nodegroup.get("iam_role", "default"),
+ providerconfig_name=providerconfig_name,
+ public_key_mgmt=self._pubkey,
+ public_key_new_cluster=public_key_new_cluster,
+ secret_name_private_key_new_cluster=secret_name,
+ configmap_name=configmap_name,
+ vm_size=db_nodegroup["node_size"],
+ node_count=db_nodegroup["node_count"],
+ cluster_location=db_cluster["region_name"],
+ osm_project_name=osm_project_name,
+ rg_name=db_cluster.get("resource_group", "''"),
+ preemptible_nodes=db_cluster.get("preemptible_nodes", "false"),
+ skip_bootstrap=skip_bootstrap,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return True, workflow_name
+
+
+async def scale_nodegroup(self, op_id, op_params, content):
+ self.logger.info(f"Scale nodegroup Enter. Operation {op_id}. Params: {op_params}")
+
+ db_nodegroup = content["nodegroup"]
+ db_cluster = content["cluster"]
+ db_vim_account = content["vim_account"]
+
+ workflow_template = "launcher-scale-nodegroup.j2"
+ workflow_name = f"scale-nodegroup-{db_nodegroup['_id']}"
+ nodegroup_name = db_nodegroup["git_name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Get age key
+ public_key_new_cluster, private_key_new_cluster = gather_age_key(db_cluster)
+
+ # Test kubectl connection
+ self.logger.debug(self._kubectl._get_kubectl_version())
+
+ # Create temporal secret with agekey
+ secret_name = f"secret-age-{nodegroup_name}"
+ secret_namespace = "osm-workflows"
+ secret_key = "agekey"
+ secret_value = private_key_new_cluster
+ try:
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+ except Exception as e:
+ self.logger.info(f"Cannot create secret {secret_name}: {e}")
+ return False, f"Cannot create secret {secret_name}: {e}"
+
+ # Additional params for the workflow
+ nodegroup_kustomization_name = nodegroup_name
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ vim_type = db_vim_account["vim_type"]
+ if vim_type == "azure":
+ cluster_type = "aks"
+ elif vim_type == "aws":
+ cluster_type = "eks"
+ elif vim_type == "gcp":
+ cluster_type = "gke"
+ else:
+ raise Exception("Not suitable VIM account to register cluster")
+
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ nodegroup_name=nodegroup_name,
+ nodegroup_kustomization_name=nodegroup_kustomization_name,
+ cluster_name=cluster_name,
+ cluster_type=cluster_type,
+ node_count=op_params["node_count"],
+ public_key_mgmt=self._pubkey,
+ public_key_new_cluster=public_key_new_cluster,
+ secret_name_private_key_new_cluster=secret_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return True, workflow_name
+
+
+async def delete_nodegroup(self, op_id, op_params, content):
+ self.logger.info(f"Delete nodegroup Enter. Operation {op_id}. Params: {op_params}")
+
+ db_nodegroup = content["nodegroup"]
+ db_cluster = content["cluster"]
+
+ workflow_template = "launcher-delete-nodegroup.j2"
+ workflow_name = f"delete-nodegroup-{db_nodegroup['_id']}"
+ nodegroup_name = db_nodegroup["git_name"].lower()
+
+ # Additional params for the workflow
+ nodegroup_kustomization_name = nodegroup_name
+ osm_project_name = "osm_admin" # TODO: get project name from DB
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ nodegroup_name=nodegroup_name,
+ cluster_name=db_cluster["name"],
+ nodegroup_kustomization_name=nodegroup_kustomization_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(f"Workflow Manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return True, workflow_name
+
+
+async def clean_items_nodegroup_add(self, op_id, op_params, content):
+ self.logger.info(
+ f"clean_items_nodegroup_add Enter. Operation {op_id}. Params: {op_params}"
+ )
+ items = {
+ "secrets": [
+ {
+ "name": f"secret-age-{content['nodegroup']['git_name'].lower()}",
+ "namespace": "osm-workflows",
+ }
+ ],
+ }
+ try:
+ await self.clean_items(items)
+ return True, "OK"
+ except Exception as e:
+ return False, f"Error while cleaning items: {e}"
+
+
+async def clean_items_nodegroup_delete(self, op_id, op_params, content):
+ self.logger.info(
+ f"clean_items_nodegroup_delete Enter. Operation {op_id}. Params: {op_params}"
+ )
+ self.logger.info(
+ f"clean_items_nodegroup_delete Enter. Operation {op_id}. Params: {op_params}"
+ )
+ self.logger.debug(f"Content: {content}")
+ items = {
+ "configmaps": [
+ {
+ "name": f"{content['nodegroup']['git_name'].lower()}-subnet-parameters",
+ "namespace": "managed-resources",
+ }
+ ],
+ }
+ try:
+ await self.clean_items(items)
+ return True, "OK"
+ except Exception as e:
+ return False, f"Error while cleaning items: {e}"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - Generic cluster creation
+ - name: nodegroup_name
+ value: {{ nodegroup_name }}
+ - name: nodegroup_kustomization_name
+ value: {{ nodegroup_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ ## As of today, one among `aks`, `eks` or `gke`:
+ - name: cluster_type
+ value: {{ cluster_type }}
+ - name: vm_size
+ value: {{ vm_size }}
+ - name: node_count
+ value: "{{ node_count }}"
+ - name: cluster_location
+ value: {{ cluster_location }}
+ - name: providerconfig_name
+ value: {{ providerconfig_name }}
+ - name: configmap_name
+ value: {{ configmap_name }}
+ - name: role
+ value: {{role}}
+ # Specific parameters - Bootstrap and credentials
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: mgmt_project_name
+ value: "{{ osm_project_name }}"
+
+ # Specific parameters - AKS only
+ - name: rg_name
+ value: {{ rg_name }}
+
+ # Specific parameters - GKE only
+ - name: preemptible_nodes
+ value: "{{ preemptible_nodes }}"
+
+ # Advanced parameters - Recommended to keep defaults
+ - name: skip_bootstrap
+ value: "{{ skip_bootstrap }}"
+ - name: mgmt_cluster_name
+ value: "_management"
+ - name: base_templates_path
+ value: "cloud-resources"
+ - name: cloned_fleet_folder_name
+ value: "fleet-osm"
+ - name: cloned_sw_catalogs_folder_name
+ value: "sw-catalogs-osm"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-nodegroup-wft
\ No newline at end of file
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: nodegroup_kustomization_name
+ value: {{ nodegroup_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ - name: project_name
+ value: "{{ osm_project_name }}"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 1000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 500 # Time to live after workflow is successful
+ secondsAfterFailure: 500 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-nodegroup-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - Generic cluster creation
+ - name: nodegroup_name
+ value: {{ nodegroup_name }}
+ - name: nodegroup_kustomization_name
+ value: {{ nodegroup_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ ## As of today, one among `aks`, `eks` or `gke`:
+ - name: cluster_type
+ value: {{ cluster_type }}
+ - name: node_count
+ value: "{{ node_count }}"
+ # Specific parameters - Bootstrap and credentials
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: mgmt_project_name
+ value: "{{ osm_project_name }}"
+
+ # Advanced parameters - Recommended to keep defaults
+ - name: mgmt_cluster_name
+ value: "_management"
+ - name: base_templates_path
+ value: "cloud-resources"
+ - name: cloned_fleet_folder_name
+ value: "fleet-osm"
+ - name: cloned_sw_catalogs_folder_name
+ value: "sw-catalogs-osm"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-scale-nodegroup-wft
\ No newline at end of file
from osm_lcm.odu_libs import (
vim_mgmt as odu_vim_mgmt,
cluster_mgmt as odu_cluster_mgmt,
+ nodegroup as odu_nodegroup,
ksu as odu_ksu,
oka as odu_oka,
profiles as odu_profiles,
"dummy_operation": {
"workflow_function": self.dummy_operation,
},
+ "add_nodegroup": {
+ "workflow_function": self.add_nodegroup,
+ "clean_function": self.clean_items_nodegroup_add,
+ },
+ "scale_nodegroup": {
+ "workflow_function": self.scale_nodegroup,
+ },
+ "delete_nodegroup": {
+ "workflow_function": self.delete_nodegroup,
+ "clean_function": self.clean_items_nodegroup_delete,
+ },
}
super().__init__(msg, self.logger)
clean_items_cluster_register = odu_cluster_mgmt.clean_items_cluster_register
clean_items_cluster_deregister = odu_cluster_mgmt.clean_items_cluster_deregister
get_cluster_credentials = odu_cluster_mgmt.get_cluster_credentials
+ add_nodegroup = odu_nodegroup.add_nodegroup
+ scale_nodegroup = odu_nodegroup.scale_nodegroup
+ delete_nodegroup = odu_nodegroup.delete_nodegroup
+ clean_items_nodegroup_add = odu_nodegroup.clean_items_nodegroup_add
+ clean_items_nodegroup_delete = odu_nodegroup.clean_items_nodegroup_delete
create_ksus = odu_ksu.create_ksus
update_ksus = odu_ksu.update_ksus
delete_ksus = odu_ksu.delete_ksus