include README.rst
recursive-include osm_lcm *.py *.xml *.sh lcm.cfg *.txt
recursive-include devops-stages *
+recursive-include osm_lcm/odu_libs/templates *.j2
self.logger_name = "lcm.prometheus"
+class MonitoringConfig(OsmConfigman):
+ old_sa: bool = True
+
+
+class GitopsConfig(OsmConfigman):
+ repo_base_url: str = None
+ repo_user: str = None
+ pubkey: str = None
+ mgmtcluster_kubeconfig: str = "/etc/osm/mgmtcluster-kubeconfig.yaml"
+ loglevel: str = "DEBUG"
+ logfile: str = None
+ logger_name: str = None
+
+ def transform(self):
+ self.logger_name = "lcm.gitops"
+
+
# Main configuration Template
storage: StorageConfig = StorageConfig()
message: MessageConfig = MessageConfig()
tsdb: TsdbConfig = TsdbConfig()
+ servicekpi: MonitoringConfig = MonitoringConfig()
+ gitops: GitopsConfig = GitopsConfig()
def transform(self):
for attribute in dir(self):
--- /dev/null
+# -*- coding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = (
+ "Shrinithi R <shrinithi.r@tataelxsi.co.in>",
+ "Shahithya Y <shahithya.y@tataelxsi.co.in>",
+)
+
+import logging
+from osm_lcm.lcm_utils import LcmBase
+from copy import deepcopy
+from osm_lcm import odu_workflows
+from osm_lcm import vim_sdn
+
+
+class ClusterLcm(LcmBase):
+ db_collection = "clusters"
+
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.clusterlcm")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+ self.regist = vim_sdn.K8sClusterLcm(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def create(self, op_id, op_params, content):
+ self.logger.info("cluster Create Enter")
+ db_cluster = content["cluster"]
+
+ workflow_name = await self.odu.launch_workflow(
+ "create_cluster", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ db_cluster["state"] = "CREATED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_cluster["state"] = "FAILED_CREATION"
+ db_cluster["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ db_cluster["resourceState"] = "READY"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ self.update_profile_state(db_cluster, workflow_status, resource_status)
+ return
+
+ def update_profile_state(self, db_cluster, workflow_status, resource_status):
+ profiles = [
+ "infra_controller_profiles",
+ "infra_config_profiles",
+ "app_profiles",
+ "resource_profiles",
+ ]
+ profiles_collection = {
+ "infra_controller_profiles": "k8sinfra_controller",
+ "infra_config_profiles": "k8sinfra_config",
+ "app_profiles": "k8sapp",
+ "resource_profiles": "k8sresource",
+ }
+ for profile_type in profiles:
+ profile_id = db_cluster[profile_type]
+ self.logger.info("profile id is : {}".format(profile_id))
+ db_collection = profiles_collection[profile_type]
+ self.logger.info("the db_collection is :{}".format(db_collection))
+ db_profile = self.db.get_one(db_collection, {"_id": profile_id})
+ self.logger.info("the db_profile is :{}".format(db_profile))
+ db_profile["state"] = db_cluster["state"]
+ db_profile["resourceState"] = db_cluster["resourceState"]
+ db_profile["operatingState"] = db_cluster["operatingState"]
+ db_profile = self.update_operation_history(
+ db_profile, workflow_status, resource_status
+ )
+ self.logger.info("the db_profile is :{}".format(db_profile))
+ self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
+
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("cluster delete Enter")
+ db_cluster = content["cluster"]
+
+ workflow_name = await self.odu.launch_workflow(
+ "delete_cluster", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ db_cluster["state"] = "DELETED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_cluster["state"] = "FAILED_DELETION"
+ db_cluster["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ db_cluster["resourceState"] = "READY"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ # To delete it from DB
+ if db_cluster["state"] == "DELETED":
+ self.delete_cluster(db_cluster)
+ return
+
+ def delete_cluster(self, db_cluster):
+ # Actually, item_content is equal to db_cluster
+ # item_content = self.db.get_one("clusters", {"_id": db_cluster["_id"]})
+ # self.logger.debug("item_content is : {}".format(item_content))
+
+ # detach profiles
+ update_dict = None
+ profiles_to_detach = [
+ "infra_controller_profiles",
+ "infra_config_profiles",
+ "app_profiles",
+ "resource_profiles",
+ ]
+ profiles_collection = {
+ "infra_controller_profiles": "k8sinfra_controller",
+ "infra_config_profiles": "k8sinfra_config",
+ "app_profiles": "k8sapp",
+ "resource_profiles": "k8sresource",
+ }
+ for profile_type in profiles_to_detach:
+ if db_cluster.get(profile_type):
+ self.logger.info("the profile_type is :{}".format(profile_type))
+ profile_ids = db_cluster[profile_type]
+ self.logger.info("the profile_ids is :{}".format(profile_ids))
+ profile_ids_copy = deepcopy(profile_ids)
+ self.logger.info("the profile_ids_copy is :{}".format(profile_ids_copy))
+ for profile_id in profile_ids_copy:
+ self.logger.info("the profile_id is :{}".format(profile_id))
+ db_collection = profiles_collection[profile_type]
+ self.logger.info("the db_collection is :{}".format(db_collection))
+ db_profile = self.db.get_one(db_collection, {"_id": profile_id})
+ self.logger.info("the db_profile is :{}".format(db_profile))
+ self.logger.info(
+ "the item_content name is :{}".format(db_cluster["name"])
+ )
+ self.logger.info(
+ "the db_profile name is :{}".format(db_profile["name"])
+ )
+ if db_cluster["name"] == db_profile["name"]:
+ self.logger.info("it is getting into if default")
+ self.db.del_one(db_collection, {"_id": profile_id})
+ else:
+ self.logger.info("it is getting into else non default")
+ profile_ids.remove(profile_id)
+ update_dict = {profile_type: profile_ids}
+ self.logger.info(f"the update dict is :{update_dict}")
+ self.db.set_one(
+ "clusters", {"_id": db_cluster["_id"]}, update_dict
+ )
+ self.db.del_one("clusters", {"_id": db_cluster["_id"]})
+ self.logger.info("the id is :{}".format(db_cluster["_id"]))
+
+ async def attach_profile(self, op_id, op_params, content):
+ self.logger.info("profile attach Enter")
+ db_cluster = content["cluster"]
+ db_profile = content["profile"]
+ profile_type = db_profile["profile_type"]
+ profile_id = db_profile["_id"]
+ self.logger.info("profile type is : {}".format(profile_type))
+ self.logger.info("profile id is : {}".format(profile_id))
+
+ workflow_name = await self.odu.launch_workflow(
+ "attach_profile_to_cluster", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "attach_profile_to_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ db_cluster["resourceState"] = "READY"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ profile_list = db_cluster[profile_type]
+ self.logger.info("profile list is : {}".format(profile_list))
+ if resource_status:
+ self.logger.info("it is getting into resource status true")
+ profile_list.append(profile_id)
+ self.logger.info("profile list is : {}".format(profile_list))
+ db_cluster[profile_type] = profile_list
+ self.logger.info("db cluster is : {}".format(db_cluster))
+ # update_dict = {item: profile_list}
+ # self.logger.info("the update_dict is :{}".format(update_dict))
+ # self.db.set_one(self.topic, filter_q, update_dict)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ return
+
+ async def detach_profile(self, op_id, op_params, content):
+ self.logger.info("profile dettach Enter")
+ db_cluster = content["cluster"]
+ db_profile = content["profile"]
+ profile_type = db_profile["profile_type"]
+ profile_id = db_profile["_id"]
+ self.logger.info("profile type is : {}".format(profile_type))
+ self.logger.info("profile id is : {}".format(profile_id))
+
+ workflow_name = await self.odu.launch_workflow(
+ "detach_profile_from_cluster", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "detach_profile_from_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ db_cluster["resourceState"] = "READY"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ profile_list = db_cluster[profile_type]
+ self.logger.info("profile list is : {}".format(profile_list))
+ if resource_status:
+ self.logger.info("it is getting into resource status true")
+ profile_list.remove(profile_id)
+ self.logger.info("profile list is : {}".format(profile_list))
+ db_cluster[profile_type] = profile_list
+ self.logger.info("db cluster is : {}".format(db_cluster))
+ # update_dict = {item: profile_list}
+ # self.logger.info("the update_dict is :{}".format(update_dict))
+ # self.db.set_one(self.topic, filter_q, update_dict)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ return
+
+ async def register(self, op_id, op_params, content):
+ self.logger.info("cluster register enter")
+ db_cluster = content["cluster"]
+
+ workflow_name = await self.odu.launch_workflow(
+ "register_cluster", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ db_cluster["state"] = "CREATED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_cluster["state"] = "FAILED_CREATION"
+ db_cluster["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "register_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ db_cluster["resourceState"] = "READY"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ self.update_profile_state(db_cluster, workflow_status, resource_status)
+ return
+
+ async def deregister(self, op_id, op_params, content):
+ self.logger.info("cluster deregister enter")
+ db_cluster = content["cluster"]
+
+ self.logger.info("db_cluster is : {}".format(db_cluster))
+
+ workflow_name = await self.odu.launch_workflow(
+ "deregister_cluster", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ db_cluster["state"] = "DELETED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_cluster["state"] = "FAILED_DELETION"
+ db_cluster["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "deregister_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ db_cluster["resourceState"] = "READY"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ # To delete it from DB
+ if db_cluster["state"] == "DELETED":
+ self.db.del_one("clusters", {"_id": db_cluster["_id"]})
+ return
+
+ async def get_creds(self, db_cluster):
+ self.logger.info("Cluster get creds Enter")
+ result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
+ if result:
+ db_cluster["credentials"] = cluster_creds
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ return
+
+ async def update(self, op_id, op_params, content):
+ self.logger.info("Cluster update Enter")
+ db_cluster = content["cluster"]
+
+ workflow_name = await self.odu.launch_workflow(
+ "update_cluster", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ if workflow_status:
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ # self.logger.info("Db content: {}".format(db_content))
+ # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "update_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ if resource_status:
+ db_cluster["resourceState"] = "READY"
+ else:
+ db_cluster["resourceState"] = "ERROR"
+
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ # self.logger.info("db_cluster: {}".format(db_cluster))
+ # TODO: verify enxtcondition
+ # For the moment, if the workflow completed successfully, then we update the db accordingly.
+ if workflow_status:
+ if "k8s_version" in op_params:
+ db_cluster["k8s_version"] = op_params["k8s_version"]
+ elif "node_count" in op_params:
+ db_cluster["node_count"] = op_params["node_count"]
+ # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ return
+
+
+class CloudCredentialsLcm(LcmBase):
+ db_collection = "vim_accounts"
+
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.cloud_credentials")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def add(self, op_id, op_params, content):
+ self.logger.info("Cloud Credentials create")
+ workflow_name = await self.odu.launch_workflow(
+ "create_cloud_credentials", op_id, op_params, content
+ )
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+
+ self.logger.info(
+ "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
+ )
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_cloud_credentials", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+ return
+
+ async def edit(self, op_id, op_params, content):
+ workflow_name = await self.odu.launch_workflow(
+ "update_cloud_credentials", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
+ )
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "update_cloud_credentials", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+ return
+
+ async def remove(self, op_id, op_params, content):
+ self.logger.info("Cloud Credentials delete")
+ workflow_name = await self.odu.launch_workflow(
+ "delete_cloud_credentials", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
+ )
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_cloud_credentials", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+ self.db.del_one(self.db_collection, {"_id": content["_id"]})
+ return
+
+
+class K8sAppLcm(LcmBase):
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.clusterlcm")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def create(self, op_id, op_params, content):
+ self.logger.info("App Create Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "CREATED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_CREATION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
+
+ return
+
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("App delete Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
+
+ # To delete it from DB
+ if content["state"] == "DELETED":
+ self.db.del_one("k8sapp", {"_id": content["_id"]})
+ return
+
+
+class K8sResourceLcm(LcmBase):
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.clusterlcm")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def create(self, op_id, op_params, content):
+ self.logger.info("Resource Create Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "CREATED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_CREATION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
+
+ return
+
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("Resource delete Enter")
+ content = self.db.get_one("k8sresource", {"_id": content["_id"]})
+
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
+
+ # To delete it from DB
+ if content["state"] == "DELETED":
+ self.db.del_one("k8sresource", {"_id": content["_id"]})
+ return
+
+
+class K8sInfraControllerLcm(LcmBase):
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.clusterlcm")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def create(self, op_id, op_params, content):
+ self.logger.info("Infra controller Create Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "CREATED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_CREATION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
+
+ return
+
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("Infra controller delete Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
+
+ # To delete it from DB
+ if content["state"] == "DELETED":
+ self.db.del_one("k8sinfra_controller", {"_id": content["_id"]})
+ return
+
+
+class K8sInfraConfigLcm(LcmBase):
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.clusterlcm")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def create(self, op_id, op_params, content):
+ self.logger.info("Infra config Create Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "CREATED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_CREATION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
+
+ return
+
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("Infra config delete Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info("workflow_name is :{}".format(workflow_name))
+
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "workflow_status is :{} and workflow_msg is :{}".format(
+ workflow_status, workflow_msg
+ )
+ )
+ if workflow_status:
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
+ # has to call update_operation_history return content
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
+
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
+ )
+ self.logger.info(
+ "resource_status is :{} and resource_msg is :{}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ content["resourceState"] = "READY"
+ else:
+ content["resourceState"] = "ERROR"
+
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
+
+ # To delete it from DB
+ if content["state"] == "DELETED":
+ self.db.del_one("k8sinfra_config", {"_id": content["_id"]})
+ return
+
+
+class OkaLcm(LcmBase):
+ db_collection = "okas"
+
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.oka")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def create(self, op_id, op_params, content):
+ self.logger.info("OKA Create Enter")
+ db_content = content
+
+ workflow_name = await self.odu.launch_workflow(
+ "create_oka", op_id, op_params, db_content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ if workflow_status:
+ db_content["state"] = "CREATED"
+ db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_content["state"] = "FAILED_CREATION"
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(db_content, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_oka", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ if resource_status:
+ db_content["resourceState"] = "READY"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ # self.logger.info("Db content: {}".format(db_content))
+ db_content = self.update_operation_history(
+ db_content, workflow_status, resource_status
+ )
+
+ db_content["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+
+ return
+
+ async def edit(self, op_id, op_params, content):
+ self.logger.info("OKA Edit Enter")
+ db_content = content
+
+ workflow_name = await self.odu.launch_workflow(
+ "update_oka", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ if workflow_status:
+ db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(db_content, workflow_status, None)
+ # self.logger.info("Db content: {}".format(db_content))
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "update_oka", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ if resource_status:
+ db_content["resourceState"] = "READY"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(
+ db_content, workflow_status, resource_status
+ )
+
+ db_content["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+ return
+
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("OKA delete Enter")
+ db_content = content
+
+ workflow_name = await self.odu.launch_workflow(
+ "delete_oka", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ if workflow_status:
+ db_content["state"] = "DELETED"
+ db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_content["state"] = "FAILED_DELETION"
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(db_content, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_oka", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ if resource_status:
+ db_content["resourceState"] = "READY"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(
+ db_content, workflow_status, resource_status
+ )
+
+ db_content["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+
+ if db_content["state"] == "DELETED":
+ self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
+ return
+
+
+class KsuLcm(LcmBase):
+ db_collection = "ksus"
+
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.ksu")
+ self.lcm_tasks = lcm_tasks
+ self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
+
+ super().__init__(msg, self.logger)
+
+ async def create(self, op_id, op_params, content):
+ self.logger.info("ksu Create Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "create_ksus", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ for db_ksu in content:
+ if workflow_status:
+ db_ksu["state"] = "CREATED"
+ db_ksu["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_ksu["state"] = "FAILED_CREATION"
+ db_ksu["resourceState"] = "ERROR"
+
+ db_ksu = self.update_operation_history(db_ksu, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_ksus", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ for db_ksu in content:
+ if resource_status:
+ db_ksu["resourceState"] = "READY"
+ else:
+ db_ksu["resourceState"] = "ERROR"
+
+ db_ksu = self.update_operation_history(
+ db_ksu, workflow_status, resource_status
+ )
+
+ for db_ksu in content:
+ db_ksu["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
+
+ return
+
+ async def edit(self, op_id, op_params, content):
+ self.logger.info("ksu edit Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "update_ksus", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ for db_ksu in content:
+ if workflow_status:
+ db_ksu["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_ksu["resourceState"] = "ERROR"
+
+ db_ksu = self.update_operation_history(db_ksu, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "update_ksus", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ for db_ksu in content:
+ if resource_status:
+ db_ksu["resourceState"] = "READY"
+ else:
+ db_ksu["resourceState"] = "ERROR"
+
+ db_ksu = self.update_operation_history(
+ db_ksu, workflow_status, resource_status
+ )
+
+ for db_ksu, ksu_params in zip(content, op_params):
+ db_ksu["operatingState"] = "IDLE"
+ if workflow_status:
+ db_ksu["name"] = ksu_params["name"]
+ db_ksu["description"] = ksu_params["description"]
+ db_ksu["profile"]["profile_type"] = ksu_params["profile"][
+ "profile_type"
+ ]
+ db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
+ db_ksu["oka"] = ksu_params["oka"]
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
+
+ return
+
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("ksu delete Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "delete_ksus", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ for db_ksu in content:
+ if workflow_status:
+ db_ksu["state"] = "DELETED"
+ db_ksu["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_ksu["state"] = "FAILED_DELETION"
+ db_ksu["resourceState"] = "ERROR"
+
+ db_ksu = self.update_operation_history(db_ksu, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_ksus", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ for db_ksu in content:
+ if resource_status:
+ db_ksu["resourceState"] = "READY"
+ else:
+ db_ksu["resourceState"] = "ERROR"
+
+ db_ksu = self.update_operation_history(
+ db_ksu, workflow_status, resource_status
+ )
+
+ for db_ksu in content:
+ db_ksu["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
+
+ if db_ksu["state"] == "DELETED":
+ self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
+ return
+
+ async def clone(self, op_id, op_params, db_content):
+ self.logger.info("ksu clone Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "clone_ksus", op_id, op_params, db_content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ if workflow_status:
+ db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(db_content, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "clone_ksus", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+
+ if resource_status:
+ db_content["resourceState"] = "READY"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(
+ db_content, workflow_status, resource_status
+ )
+
+ db_content["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+ return
+
+ async def move(self, op_id, op_params, db_content):
+ self.logger.info("ksu move Enter")
+
+ workflow_name = await self.odu.launch_workflow(
+ "move_ksus", op_id, op_params, db_content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
+
+ if workflow_status:
+ db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(db_content, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+
+ if workflow_status:
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "move_ksus", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
+ if resource_status:
+ db_content["resourceState"] = "READY"
+ else:
+ db_content["resourceState"] = "ERROR"
+
+ db_content = self.update_operation_history(
+ db_content, workflow_status, resource_status
+ )
+
+ db_content["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
+ return
uri: http://prometheus:9090/
# loglevel: DEBUG
# logfile: /var/log/osm/lcm-tsdb.log
+
+gitops:
+ mgmtcluster_kubeconfig: /etc/osm/mgmtcluster-kubeconfig.yaml
+ repo_user: osm-developer
+ # repo_base_url: http://git.<IP_ADDRESS>.nip.io
+ # pubkey: pubkey
+
# DEBUG WITH PDB
import pdb
+import os
import asyncio
import yaml
import logging
import sys
from random import SystemRandom
-from osm_lcm import ns, vim_sdn, netslice
+from osm_lcm import ns, vim_sdn, netslice, k8s
from osm_lcm.ng_ro import NgRoException, NgRoClient
from osm_lcm.ROclient import ROClient, ROClientException
from osm_lcm.data_utils.database.database import Database
from osm_lcm.data_utils.filesystem.filesystem import Filesystem
from osm_lcm.data_utils.lcm_config import LcmCfg
+from osm_lcm.data_utils.list_utils import find_in_list
from osm_lcm.lcm_hc import get_health_check_file
from os import path, getenv
from n2vc import version as n2vc_version
class Lcm:
+ profile_collection_mapping = {
+ "infra_controller_profiles": "k8sinfra_controller",
+ "infra_config_profiles": "k8sinfra_config",
+ "resource_profiles": "k8sresource",
+ "app_profiles": "k8sapp",
+ }
+
ping_interval_pace = (
120 # how many time ping is send once is confirmed all is running
)
self.netslice
) = (
self.vim
- ) = self.wim = self.sdn = self.k8scluster = self.vca = self.k8srepo = None
+ ) = (
+ self.wim
+ ) = (
+ self.sdn
+ ) = (
+ self.k8scluster
+ ) = (
+ self.vca
+ ) = (
+ self.k8srepo
+ ) = (
+ self.cluster
+ ) = (
+ self.k8s_app
+ ) = self.k8s_resource = self.k8s_infra_controller = self.k8s_infra_config = None
# logging
log_format_simple = (
# contains created tasks/futures to be able to cancel
self.lcm_tasks = TaskRegistry(self.worker_id, self.logger)
+ self.logger.info(
+ "Worker_id: {} main_config: {} lcm tasks: {}".format(
+ self.worker_id, self.main_config, self.lcm_tasks
+ )
+ )
+
async def check_RO_version(self):
tries = 14
last_error = None
wait_time = 2 if not first_start else 5
await asyncio.sleep(wait_time)
+ def get_operation_params(self, item, operation_id):
+ operation_history = item.get("operationHistory", [])
+ operation = find_in_list(
+ operation_history, lambda op: op["op_id"] == operation_id
+ )
+ return operation.get("operationParams", {})
+
async def kafka_read_callback(self, topic, command, params):
order_id = 1
-
+ self.logger.info(
+ "Topic: {} command: {} params: {} order ID: {}".format(
+ topic, command, params, order_id
+ )
+ )
if topic != "admin" and command != "ping":
self.logger.debug(
"Task kafka_read receives {} {}: {}".format(topic, command, params)
self.consecutive_errors = 0
self.first_start = False
order_id += 1
+ self.logger.info(
+ "Consecutive error: {} First start: {}".format(
+ self.consecutive_errors, self.first_start
+ )
+ )
if command == "exit":
raise LcmExceptionExit
elif command.startswith("#"):
elif topic == "ns":
if command == "instantiate":
# self.logger.debug("Deploying NS {}".format(nsr_id))
+ self.logger.info("NS instantiate")
nslcmop = params
nslcmop_id = nslcmop["_id"]
nsr_id = nslcmop["nsInstanceId"]
+ self.logger.info(
+ "NsLCMOP: {} NsLCMOP_ID:{} nsr_id: {}".format(
+ nslcmop, nslcmop_id, nsr_id
+ )
+ )
task = asyncio.ensure_future(self.ns.instantiate(nsr_id, nslcmop_id))
self.lcm_tasks.register(
"ns", nsr_id, nslcmop_id, "ns_instantiate", task
task = asyncio.ensure_future(self.ns.migrate(nsr_id, nslcmop_id))
self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_migrate", task)
return
- elif command == "verticalscale":
- nslcmop = params
- nslcmop_id = nslcmop["_id"]
- nsr_id = nslcmop["nsInstanceId"]
- task = asyncio.ensure_future(self.ns.vertical_scale(nsr_id, nslcmop_id))
- self.logger.debug(
- "nsr_id,nslcmop_id,task {},{},{}".format(nsr_id, nslcmop_id, task)
- )
- self.lcm_tasks.register(
- "ns", nsr_id, nslcmop_id, "ns_verticalscale", task
- )
- self.logger.debug(
- "LCM task registered {},{},{} ".format(nsr_id, nslcmop_id, task)
- )
- return
elif command == "show":
nsr_id = params
try:
return
elif topic == "vim_account":
vim_id = params["_id"]
+ op_id = vim_id
+ op_params = params
+ db_vim = self.db.get_one("vim_accounts", {"_id": vim_id})
+ vim_config = db_vim.get("config", {})
+ self.logger.debug("Db Vim: {}".format(db_vim))
if command in ("create", "created"):
+ self.logger.debug("Main config: {}".format(self.main_config.to_dict()))
+ if "credentials" in vim_config:
+ self.logger.info("Vim add cloud credentials")
+ task = asyncio.ensure_future(
+ self.cloud_credentials.add(op_id, op_params, db_vim)
+ )
+ self.lcm_tasks.register(
+ "vim_account", vim_id, op_id, "cloud_credentials_add", task
+ )
if not self.main_config.RO.ng:
+ self.logger.info("Calling RO to create VIM (no NG-RO)")
task = asyncio.ensure_future(self.vim.create(params, order_id))
self.lcm_tasks.register(
"vim_account", vim_id, order_id, "vim_create", task
return
elif command == "delete" or command == "deleted":
self.lcm_tasks.cancel(topic, vim_id)
+ if "credentials" in vim_config:
+ self.logger.info("Vim remove cloud credentials")
+ task = asyncio.ensure_future(
+ self.cloud_credentials.remove(op_id, op_params, db_vim)
+ )
+ self.lcm_tasks.register(
+ "vim_account", vim_id, op_id, "cloud_credentials_remove", task
+ )
task = asyncio.ensure_future(self.vim.delete(params, order_id))
self.lcm_tasks.register(
"vim_account", vim_id, order_id, "vim_delete", task
sys.stdout.flush()
return
elif command in ("edit", "edited"):
+ if "credentials" in vim_config:
+ self.logger.info("Vim update cloud credentials")
+ task = asyncio.ensure_future(
+ self.cloud_credentials.edit(op_id, op_params, db_vim)
+ )
+ self.lcm_tasks.register(
+ "vim_account", vim_id, op_id, "cloud_credentials_update", task
+ )
if not self.main_config.RO.ng:
task = asyncio.ensure_future(self.vim.edit(params, order_id))
self.lcm_tasks.register(
return
elif command == "deleted":
return # TODO cleaning of task just in case should be done
+ elif topic == "cluster":
+ if command != "get_creds":
+ op_id = params["operation_id"]
+ cluster_id = params["cluster_id"]
+ db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
+ op_params = self.get_operation_params(db_cluster, op_id)
+ db_content = {
+ "cluster": db_cluster,
+ }
+ if command == "create" or command == "created":
+ self.logger.debug("cluster_id = {}".format(cluster_id))
+ # db_vim = self.db.get_one("vim_accounts", {"_id": db_cluster["vim_account"]})
+ db_vim = self.db.get_one(
+ "vim_accounts", {"name": db_cluster["vim_account"]}
+ )
+ db_content["vim_account"] = db_vim
+ task = asyncio.ensure_future(
+ self.cluster.create(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "cluster_create", task
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ task = asyncio.ensure_future(
+ self.cluster.delete(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "cluster_delete", task
+ )
+ return
+ elif command == "add" or command == "added":
+ profile_type = params["profile_type"]
+ profile_collection = self.profile_collection_mapping[profile_type]
+ db_profile = self.db.get_one(
+ profile_collection, {"_id": params["profile_id"]}
+ )
+ db_profile["profile_type"] = profile_type
+ db_content["profile"] = db_profile
+ task = asyncio.ensure_future(
+ self.cluster.attach_profile(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "profile_add", task
+ )
+ return
+ elif command == "remove" or command == "removed":
+ profile_type = params["profile_type"]
+ profile_collection = self.profile_collection_mapping[profile_type]
+ db_profile = self.db.get_one(
+ profile_collection, {"_id": params["profile_id"]}
+ )
+ db_profile["profile_type"] = profile_type
+ db_content["profile"] = db_profile
+ task = asyncio.ensure_future(
+ self.cluster.detach_profile(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "profile_remove", task
+ )
+ return
+ elif command == "register" or command == "registered":
+ task = asyncio.ensure_future(
+ self.cluster.register(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "cluster_register", task
+ )
+ return
+ elif command == "deregister" or command == "deregistered":
+ task = asyncio.ensure_future(
+ self.cluster.deregister(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "cluster_deregister", task
+ )
+ return
+ elif command == "get_creds":
+ cluster_id = params["_id"]
+ db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
+ task = asyncio.ensure_future(self.cluster.get_creds(db_cluster))
+ self.lcm_tasks.register(
+ "cluster", cluster_id, cluster_id, "cluster_get_credentials", task
+ )
+ return
+ elif command == "upgrade" or command == "scale":
+ # db_vim = self.db.get_one("vim_accounts", {"_id": db_cluster["vim_account"]})
+ db_vim = self.db.get_one(
+ "vim_accounts", {"name": db_cluster["vim_account"]}
+ )
+ db_content["vim_account"] = db_vim
+ task = asyncio.ensure_future(
+ self.cluster.update(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "cluster_update", task
+ )
+ return
+ elif topic == "k8s_app":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sapp", {"_id": profile_id})
+ db_profile["profile_type"] = "applications"
+ op_params = self.get_operation_params(db_profile, op_id)
+ if command == "profile_create" or command == "profile_created":
+ self.logger.debug("Create k8s_app_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_app.create(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_app", profile_id, op_id, "k8s_app_create", task
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ self.logger.debug("Delete k8s_app_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_app.delete(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_app", profile_id, op_id, "k8s_app_delete", task
+ )
+ return
+ elif topic == "k8s_resource":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sresource", {"_id": profile_id})
+ db_profile["profile_type"] = "managed-resources"
+ op_params = self.get_operation_params(db_profile, op_id)
+ if command == "profile_create" or command == "profile_created":
+ self.logger.debug("Create k8s_resource_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_resource.create(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_resource",
+ profile_id,
+ op_id,
+ "k8s_resource_create",
+ task,
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ self.logger.debug("Delete k8s_resource_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_resource.delete(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_resource",
+ profile_id,
+ op_id,
+ "k8s_resource_delete",
+ task,
+ )
+ return
+
+ elif topic == "k8s_infra_controller":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
+ db_profile["profile_type"] = "infra-controllers"
+ op_params = self.get_operation_params(db_profile, op_id)
+ if command == "profile_create" or command == "profile_created":
+ self.logger.debug(
+ "Create k8s_infra_controller_id = {}".format(profile_id)
+ )
+ task = asyncio.ensure_future(
+ self.k8s_infra_controller.create(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_infra_controller",
+ profile_id,
+ op_id,
+ "k8s_infra_controller_create",
+ task,
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ self.logger.debug(
+ "Delete k8s_infra_controller_id = {}".format(profile_id)
+ )
+ task = asyncio.ensure_future(
+ self.k8s_infra_controller.delete(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_infra_controller",
+ profile_id,
+ op_id,
+ "k8s_infra_controller_delete",
+ task,
+ )
+ return
+
+ elif topic == "k8s_infra_config":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sinfra_config", {"_id": profile_id})
+ db_profile["profile_type"] = "infra-configs"
+ op_params = self.get_operation_params(db_profile, op_id)
+ if command == "profile_create" or command == "profile_created":
+ self.logger.debug("Create k8s_infra_config_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_infra_config.create(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_infra_config",
+ profile_id,
+ op_id,
+ "k8s_infra_config_create",
+ task,
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ self.logger.debug("Delete k8s_infra_config_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_infra_config.delete(op_id, op_params, db_profile)
+ )
+ self.lcm_tasks.register(
+ "k8s_infra_config",
+ profile_id,
+ op_id,
+ "k8s_infra_config_delete",
+ task,
+ )
+ return
+ elif topic == "oka":
+ op_id = params["operation_id"]
+ oka_id = params["oka_id"]
+ db_oka = self.db.get_one("okas", {"_id": oka_id})
+ op_params = self.get_operation_params(db_oka, op_id)
+ if command == "create":
+ task = asyncio.ensure_future(self.oka.create(op_id, op_params, db_oka))
+ self.lcm_tasks.register("oka", oka_id, op_id, "oka_create", task)
+ return
+ elif command == "edit":
+ task = asyncio.ensure_future(self.oka.edit(op_id, op_params, db_oka))
+ self.lcm_tasks.register("oka", oka_id, op_id, "oka_edit", task)
+ return
+ elif command == "delete":
+ task = asyncio.ensure_future(self.oka.delete(op_id, op_params, db_oka))
+ self.lcm_tasks.register("oka", oka_id, op_id, "oka_delete", task)
+ return
+ elif topic == "ksu":
+ op_id = params["operation_id"]
+ op_params = None
+ db_content = None
+ if not (command == "clone" or command == "move"):
+ # op_params is a list
+ # db_content is a list of KSU
+ db_content = []
+ op_params = []
+ for ksu_id in params["ksus_list"]:
+ db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
+ db_content.append(db_ksu)
+ ksu_params = {}
+ if command == "delete":
+ ksu_params["profile"] = {}
+ ksu_params["profile"]["profile_type"] = db_ksu["profile"][
+ "profile_type"
+ ]
+ ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
+ else:
+ ksu_params = self.get_operation_params(db_ksu, op_id)
+ # Update ksu_params["profile"] with profile name and age-pubkey
+ profile_type = ksu_params["profile"]["profile_type"]
+ profile_id = ksu_params["profile"]["_id"]
+ profile_collection = self.profile_collection_mapping[profile_type]
+ db_profile = self.db.get_one(
+ profile_collection, {"_id": profile_id}
+ )
+ ksu_params["profile"]["name"] = db_profile["name"]
+ ksu_params["profile"]["age_pubkey"] = db_profile.get(
+ "age_pubkey", ""
+ )
+ if command == "create" or command == "edit" or command == "edited":
+ # Update ksu_params["oka"] with sw_catalog_path (when missing)
+ for oka in ksu_params["oka"]:
+ if "sw_catalog_path" not in oka:
+ oka_id = oka["_id"]
+ db_oka = self.db.get_one("okas", {"_id": oka_id})
+ oka[
+ "sw_catalog_path"
+ ] = f"infra-controllers/{db_oka['git_name']}"
+ op_params.append(ksu_params)
+ else:
+ # db_content and op_params are single items
+ db_content = self.db.get_one("ksus", {"_id": params["_id"]})
+ db_content = db_ksu
+ op_params = self.get_operation_params(db_ksu, op_id)
+ if command == "create":
+ task = asyncio.ensure_future(
+ self.ksu.create(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_create", task)
+ return
+ elif command == "edit" or command == "edited":
+ task = asyncio.ensure_future(
+ self.ksu.edit(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_edit", task)
+ return
+ elif command == "delete":
+ task = asyncio.ensure_future(
+ self.ksu.delete(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_delete", task)
+ return
+ elif command == "clone":
+ task = asyncio.ensure_future(
+ self.ksu.clone(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_clone", task)
+ return
+ elif command == "move":
+ task = asyncio.ensure_future(
+ self.ksu.move(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_move", task)
+ return
+
self.logger.critical("unknown topic {} and command '{}'".format(topic, command))
async def kafka_read(self):
)
self.consecutive_errors = 0
self.first_start = True
+ self.logger.info(
+ "Consecutive errors: {} first start: {}".format(
+ self.consecutive_errors, self.first_start
+ )
+ )
while self.consecutive_errors < 10:
try:
topics = (
"k8srepo",
"pla",
"nslcmops",
+ "cluster",
+ "k8s_app",
+ "k8s_resource",
+ "k8s_infra_controller",
+ "k8s_infra_config",
+ "oka",
+ "ksu",
+ )
+ self.logger.info(
+ "Consecutive errors: {} first start: {}".format(
+ self.consecutive_errors, self.first_start
+ )
)
topics_admin = ("admin",)
await asyncio.gather(
await asyncio.gather(self.kafka_read(), self.kafka_ping())
async def start(self):
+ self.logger.info("Start LCM")
# check RO version
await self.check_RO_version()
self.k8srepo = vim_sdn.K8sRepoLcm(
self.msg, self.lcm_tasks, self.main_config.to_dict()
)
+ self.cluster = k8s.ClusterLcm(
+ self.msg, self.lcm_tasks, self.main_config.to_dict()
+ )
+ self.k8s_app = k8s.K8sAppLcm(
+ self.msg, self.lcm_tasks, self.main_config.to_dict()
+ )
+ self.k8s_resource = k8s.K8sResourceLcm(
+ self.msg, self.lcm_tasks, self.main_config.to_dict()
+ )
+ self.k8s_infra_controller = k8s.K8sInfraControllerLcm(
+ self.msg, self.lcm_tasks, self.main_config.to_dict()
+ )
+ self.k8s_infra_config = k8s.K8sInfraConfigLcm(
+ self.msg, self.lcm_tasks, self.main_config.to_dict()
+ )
+ self.cloud_credentials = k8s.CloudCredentialsLcm(
+ self.msg, self.lcm_tasks, self.main_config.to_dict()
+ )
+ self.oka = k8s.OkaLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
+ self.ksu = k8s.KsuLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
+
+ self.logger.info(
+ "Msg: {} lcm tasks: {} main config: {}".format(
+ self.msg, self.lcm_tasks, self.main_config
+ )
+ )
await self.kafka_read_ping()
file=sys.stderr,
)
exit(1)
+ config_file = os.path.realpath(os.path.normpath(os.path.abspath(config_file)))
lcm = Lcm(config_file)
asyncio.run(lcm.start())
except (LcmException, getopt.GetoptError) as e:
return
now = time()
_desc["_admin.modified"] = now
+ self.logger.info("Desc: {} Item: {} _id: {}".format(_desc, item, _id))
self.db.set_one(item, {"_id": _id}, _desc)
_desc.clear()
# except DbException as e:
# self.logger.error("Updating {} _id={} with '{}'. Error: {}".format(item, _id, _desc, e))
+ def update_operation_history(
+ self, content, workflow_status=None, resource_status=None
+ ):
+ self.logger.info("Update Operation History in LcmBase")
+ self.logger.info(
+ "Content: {} Workflow Status: {} Resource Status: {}".format(
+ content, workflow_status, resource_status
+ )
+ )
+
+ op_id = content["current_operation"]
+ self.logger.info("OP_id: {}".format(op_id))
+ length = 0
+ for operation in content["operationHistory"]:
+ self.logger.info("Operations: {}".format(operation))
+ if operation["op_id"] == op_id:
+ self.logger.info("Length: {}".format(length))
+ now = time()
+ if workflow_status:
+ content["operationHistory"][length]["workflowState"] = "COMPLETED"
+ else:
+ content["operationHistory"][length]["workflowState"] = "ERROR"
+
+ if resource_status:
+ content["operationHistory"][length]["resourceState"] = "READY"
+ else:
+ content["operationHistory"][length]["resourceState"] = "NOT_READY"
+
+ content["operationHistory"][length]["endDate"] = now
+ break
+ length += 1
+ self.logger.info("content: {}".format(content))
+
+ return content
+
@staticmethod
def calculate_charm_hash(zipped_file):
"""Calculate the hash of charm files which ends with .charm
# NS/NSI: "services" VIM/WIM/SDN: "accounts"
topic_service_list = ["ns", "nsi"]
- topic_account_list = ["vim", "wim", "sdn", "k8scluster", "vca", "k8srepo"]
+ topic_account_list = [
+ "vim",
+ "wim",
+ "sdn",
+ "k8scluster",
+ "vca",
+ "k8srepo",
+ "cluster",
+ "k8s_app",
+ "k8s_resource",
+ "k8s_infra_controller",
+ "k8s_infra_config",
+ "oka",
+ "ksu",
+ ]
# Map topic to InstanceID
topic2instid_dict = {"ns": "nsInstanceId", "nsi": "netsliceInstanceId"}
"k8scluster": "k8sclusters",
"vca": "vca",
"k8srepo": "k8srepos",
+ "cluster": "k8sclusters",
+ "k8s_app": "k8sapp",
+ "k8s_resource": "k8sresource",
+ "k8s_infra_controller": "k8sinfra_controller",
+ "k8s_infra_config": "k8sinfra_config",
+ "oka": "oka",
+ "ksu": "ksus",
}
def __init__(self, worker_id=None, logger=None):
"k8scluster": {},
"vca": {},
"k8srepo": {},
+ "cluster": {},
+ "k8s_app": {},
+ "k8s_resource": {},
+ "k8s_infra_controller": {},
+ "k8s_infra_config": {},
+ "oka": {},
+ "ksu": {},
+ "odu": {},
}
self.worker_id = worker_id
self.db = Database().instance.db
self.logger = logger
+ # self.logger.info("Task registry: {}".format(self.task_registry))
def register(self, topic, _id, op_id, task_name, task):
"""
:param task: Task class
:return: none
"""
+ self.logger.info(
+ "topic : {}, _id:{}, op_id:{}, taskname:{}, task:{}".format(
+ topic, _id, op_id, task_name, task
+ )
+ )
if _id not in self.task_registry[topic]:
self.task_registry[topic][_id] = OrderedDict()
if op_id not in self.task_registry[topic][_id]:
self.task_registry[topic][_id][op_id] = {task_name: task}
else:
self.task_registry[topic][_id][op_id][task_name] = task
+ self.logger.info("Task resgistry: {}".format(self.task_registry))
# print("registering task", topic, _id, op_id, task_name, task)
def remove(self, topic, _id, op_id, task_name=None):
"""
# Backward compatibility for VIM/WIM/SDN/k8scluster without op_id
+ self.logger.info("Lock_HA")
if self._is_account_type_HA(topic) and op_id is None:
return True
# Try to lock this task
db_table_name = self.topic2dbtable_dict[topic]
q_filter, update_dict = self._get_dbparams_for_lock_HA(topic, op_type, op_id)
+ self.logger.info(
+ "db table name: {} update dict: {}".format(db_table_name, update_dict)
+ )
db_lock_task = self.db.set_one(
db_table_name,
q_filter=q_filter,
:param op_id: Account ID + ':' + Operation Index
:return: nothing
"""
-
+ self.logger.info("Unlock HA")
# Backward compatibility
if not self._is_account_type_HA(topic) or not op_id:
return
# Get Account ID and Operation Index
account_id, op_index = self._get_account_and_op_HA(op_id)
db_table_name = self.topic2dbtable_dict[topic]
-
+ self.logger.info("db_table_name: {}".format(db_table_name))
# If this is a 'delete' operation, the account may have been deleted (SUCCESS) or may still exist (FAILED)
# If the account exist, register the HA task.
# Update DB for HA tasks
"_admin.operations.{}.worker".format(op_index): None,
"_admin.current_operation": None,
}
+ self.logger.info("Update dict: {}".format(update_dict))
self.db.set_one(
db_table_name,
q_filter=q_filter,
step = "Waiting for {} related tasks to be completed.".format(
new_num_related_tasks
)
+ self.logger.info("{}".format(step))
update_dict = {}
q_filter = {"_id": _id}
# NS/NSI
self.timeout = config.timeout
self.ro_config = config.RO
self.vca_config = config.VCA
+ self.service_kpi = config.servicekpi
# create N2VC connector
self.n2vc = N2VCJujuConnector(
nsr = self.db.get_one(table="nsrs", q_filter=filter)
current_ns_status = nsr.get("nsState")
- # get vca status for NS
+ # First, we need to verify if the current vcaStatus is null, because if that is the case,
+ # MongoDB will not be able to create the fields used within the update key in the database
+ if not nsr.get("vcaStatus"):
+ # Write an empty dictionary to the vcaStatus field, it its value is null
+ self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
+
+ # Get vca status for NS
status_dict = await self.n2vc.get_status(
namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
)
- # vcaStatus
+ # Update the vcaStatus
+ db_key = f"vcaStatus.{nsr_id}.VNF"
db_dict = dict()
- db_dict["vcaStatus"] = status_dict
+
+ db_dict[db_key] = status_dict[nsr_id]
+ await self.n2vc.update_vca_status(db_dict[db_key], vca_id=vca_id)
# update configurationStatus for this VCA
try:
vca_id=vca_id,
)
- # vcaStatus
+ # First, we need to verify if the current vcaStatus is null, because if that is the case,
+ # MongoDB will not be able to create the fields used within the update key in the database
+ nsr = self.db.get_one(table="nsrs", q_filter=filter)
+ if not nsr.get("vcaStatus"):
+ # Write an empty dictionary to the vcaStatus field, it its value is null
+ self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
+
+ # Update the vcaStatus
+ db_key = f"vcaStatus.{nsr_id}.KNF"
db_dict = dict()
- db_dict["vcaStatus"] = {nsr_id: vca_status}
+
+ db_dict[db_key] = vca_status
+
+ if cluster_type in ("juju-bundle", "juju"):
+ # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
+ # status in a similar way between Juju Bundles and Helm Charts on this side
+ await self.k8sclusterjuju.update_vca_status(
+ db_dict[db_key],
+ kdu_instance,
+ vca_id=vca_id,
+ )
self.logger.debug(
f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
vdur["additionalParams"]["OSM"][
"vim_flavor_id"
] = vdu_instantiation_params.get("vim-flavor-id")
+ vdur["additionalParams"]["OSM"][
+ "instance_name"
+ ] = vdu_instantiation_params.get("instance_name")
vdur_list.append(vdur)
target_vnf["vdur"] = vdur_list
target["vnf"].append(target_vnf)
for vdur in monitored_vdurs:
vdu_id = vdur["vdu-id-ref"]
metric_name = vnf_monitoring_param.get("performance-metric")
- metric_name = f"osm_{metric_name}"
+ if "exporters-endpoints" not in df:
+ metric_name = f"osm_{metric_name}"
vnf_member_index = vnfr["member-vnf-index-ref"]
scalein_threshold = scaling_criteria.get(
"scale-in-threshold"
)
metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
+ if (
+ "exporters-endpoints" in df
+ and metric_name.startswith("kpi_")
+ ):
+ new_metric_name = (
+ f'osm_{metric_name.replace("kpi_", "").strip()}'
+ )
+ metric_port = df["exporters-endpoints"].get(
+ "metric-port", 9100
+ )
+ vdu_ip = vdur["ip-address"]
+ ip_port = str(vdu_ip) + ":" + str(metric_port)
+ metric_selector = (
+ f'{new_metric_name}{{instance="{ip_port}"}}'
+ )
+ expression = f"({metric_selector} {rel_operator} {scalein_threshold})"
labels = {
"ns_id": nsr_id,
"vnf_member_index": vnf_member_index,
)
metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
+ if (
+ "exporters-endpoints" in df
+ and metric_name.startswith("kpi_")
+ ):
+ new_metric_name = (
+ f'osm_{metric_name.replace("kpi_", "").strip()}'
+ )
+ metric_port = df["exporters-endpoints"].get(
+ "metric-port", 9100
+ )
+ vdu_ip = vdur["ip-address"]
+ ip_port = str(vdu_ip) + ":" + str(metric_port)
+ metric_selector = (
+ f'{new_metric_name}{{instance="{ip_port}"}}'
+ )
+ expression = f"({metric_selector} {rel_operator} {scaleout_threshold})"
labels = {
"ns_id": nsr_id,
"vnf_member_index": vnf_member_index,
operation_state=nslcmop_operation_state,
other_update=db_nslcmop_update,
)
+ if nslcmop_operation_state == "COMPLETED":
+ self.db.del_list("prometheus_jobs", {"nsr_id": nsr_id})
if ns_state == "NOT_INSTANTIATED":
try:
self.db.set_list(
vca_id=vca_id,
cluster_type=cluster_type,
)
- else:
+ if db_nsr["_admin"]["deployed"]["VCA"]:
for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
table, filter = "nsrs", {"_id": nsr_id}
path = "_admin.deployed.VCA.{}.".format(vca_index)
if kdu_name and (
primitive_name in ("upgrade", "rollback", "status") or kdu_action
):
- # kdur and desc_params already set from before
- if primitive_params:
- desc_params.update(primitive_params)
# TODO Check if we will need something at vnf level
for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
if (
+ "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
)
step = "Executing kdu {}".format(primitive_name)
- if primitive_name == "upgrade":
- if desc_params.get("kdu_model"):
- kdu_model = desc_params.get("kdu_model")
- del desc_params["kdu_model"]
+ if primitive_name == "upgrade" and primitive_params:
+ if primitive_params.get("kdu_model"):
+ kdu_model = primitive_params.pop("kdu_model")
else:
kdu_model = kdu.get("kdu-model")
if kdu_model.count("/") < 2: # helm chart is not embedded
parts = kdu_model.split(sep=":")
if len(parts) == 2:
kdu_model = parts[0]
- if desc_params.get("kdu_atomic_upgrade"):
- atomic_upgrade = desc_params.get(
+ if primitive_params.get("kdu_atomic_upgrade"):
+ atomic_upgrade = primitive_params.get(
"kdu_atomic_upgrade"
).lower() in ("yes", "true", "1")
- del desc_params["kdu_atomic_upgrade"]
+ del primitive_params["kdu_atomic_upgrade"]
else:
atomic_upgrade = True
-
+ # Type of upgrade: reset, reuse, reset_then_reuse
+ reset_values = False
+ reuse_values = False
+ reset_then_reuse_values = False
+ # If no option is specified, default behaviour is reuse_values
+ # Otherwise, options will be parsed and used
+ if (
+ ("kdu_reset_values" not in primitive_params)
+ and ("kdu_reuse_values" not in primitive_params)
+ and ("kdu_reset_then_reuse_values" not in primitive_params)
+ ):
+ reuse_values = True
+ else:
+ if primitive_params.get("kdu_reset_values"):
+ reset_values = primitive_params.pop(
+ "kdu_reset_values"
+ ).lower() in ("yes", "true", "1")
+ if primitive_params.get("kdu_reuse_values"):
+ reuse_values = primitive_params.pop(
+ "kdu_reuse_values"
+ ).lower() in ("yes", "true", "1")
+ if primitive_params.get("kdu_reset_then_reuse_values"):
+ reset_then_reuse_values = primitive_params.get(
+ "kdu_reset_then_reuse_values"
+ ).lower() in ("yes", "true", "1")
+ # Two true options are not possible
+ if (
+ sum([reset_values, reuse_values, reset_then_reuse_values])
+ >= 2
+ ):
+ raise LcmException(
+ "Cannot upgrade the KDU simultaneously with two true options to handle values"
+ )
+ # kdur and desc_params already set from before
+ if reset_values:
+ desc_params = primitive_params
+ else:
+ desc_params.update(primitive_params)
detailed_status = await asyncio.wait_for(
self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
cluster_uuid=kdu.get("k8scluster-uuid"),
kdu_instance=kdu.get("kdu-instance"),
atomic=atomic_upgrade,
+ reset_values=reset_values,
+ reuse_values=reuse_values,
+ reset_then_reuse_values=reset_then_reuse_values,
kdu_model=kdu_model,
params=desc_params,
db_dict=db_dict,
nslcmop_operation_state, detailed_status
)
)
+ elif update_type == "VERTICAL_SCALE":
+ self.logger.debug(
+ "Prepare for VERTICAL_SCALE update operation {}".format(db_nslcmop)
+ )
+ # Get the input parameters given through update request
+ vnf_instance_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get(
+ "vnfInstanceId"
+ )
+
+ vnfd_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get(
+ "vnfdId"
+ )
+ step = "Getting vnfr from database"
+ db_vnfr = self.db.get_one(
+ "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
+ )
+ self.logger.debug(step)
+ step = "Getting vnfds from database"
+ self.logger.debug("Start" + step)
+ # Latest VNFD
+ latest_vnfd = self.db.get_one(
+ "vnfds", {"_id": vnfd_id}, fail_on_empty=False
+ )
+ latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
+ # Current VNFD
+ current_vnf_revision = db_vnfr.get("revision", 1)
+ current_vnfd = self.db.get_one(
+ "vnfds_revisions",
+ {"_id": vnfd_id + ":" + str(current_vnf_revision)},
+ fail_on_empty=False,
+ )
+ self.logger.debug("End" + step)
+ # verify flavor changes
+ step = "Checking for flavor change"
+ if find_software_version(current_vnfd) != find_software_version(
+ latest_vnfd
+ ):
+ self.logger.debug("Start" + step)
+ if current_vnfd.get("virtual-compute-desc") == latest_vnfd.get(
+ "virtual-compute-desc"
+ ) and current_vnfd.get("virtual-storage-desc") == latest_vnfd.get(
+ "virtual-storage-desc"
+ ):
+ raise LcmException(
+ "No change in flavor check vnfd {}".format(vnfd_id)
+ )
+ else:
+ raise LcmException(
+ "No change in software_version of vnfd {}".format(vnfd_id)
+ )
+
+ self.logger.debug("End" + step)
+
+ (result, detailed_status) = await self.vertical_scale(
+ nsr_id, nslcmop_id
+ )
+ self.logger.debug(
+ "vertical_scale result: {} detailed_status :{}".format(
+ result, detailed_status
+ )
+ )
+ if result == "FAILED":
+ nslcmop_operation_state = result
+ error_description_nslcmop = detailed_status
+ db_nslcmop_update["detailed-status"] = detailed_status
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+ self.logger.debug(
+ logging_text
+ + " task Done with result {} {}".format(
+ nslcmop_operation_state, detailed_status
+ )
+ )
# If nslcmop_operation_state is None, so any operation is not failed.
# All operations are executed in overall.
scale_process = None
# POST-SCALE END
# Check if each vnf has exporter for metric collection if so update prometheus job records
- if scaling_type == "SCALE_OUT":
+ if scaling_type == "SCALE_OUT" and bool(self.service_kpi.old_sa):
if "exporters-endpoints" in db_vnfd.get("df")[0]:
vnfr_id = db_vnfr["id"]
db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
old_config_status = db_nsr["config-status"]
db_nsr_update = {
+ "operational-status": "healing",
"_admin.deployed.RO.operational-status": "healing",
}
self.update_db_2("nsrs", nsr_id, db_nsr_update)
task_instantiation_info=tasks_dict_info,
stage=stage,
)
-
except (
ROclient.ROClientException,
DbException,
)
finally:
error_list = list()
+ if db_vnfrs_list and target_list:
+ for vnfrs in db_vnfrs_list:
+ for vnf_instance in target_list:
+ if vnfrs["_id"] == vnf_instance.get("vnfInstanceId"):
+ self.db.set_list(
+ "vnfrs",
+ {"_id": vnfrs["_id"]},
+ {"_admin.modified": time()},
+ )
if exc:
error_list.append(str(exc))
try:
:param: nslcmop_id: nslcmop ID of migrate
"""
- # Try to lock HA task here
- task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
- if not task_is_locked_by_me:
- return
logging_text = "Task ns={} vertical scale ".format(nsr_id)
- self.logger.debug(logging_text + "Enter")
+ self.logger.info(logging_text + "Enter")
+ stage = ["Preparing the environment", ""]
# get all needed from database
db_nslcmop = None
- db_nslcmop_update = {}
- nslcmop_operation_state = None
- old_db_update = {}
- q_filter = {}
- old_vdu_index = None
- old_flavor_id = None
db_nsr_update = {}
target = {}
exc = None
start_deploy = time()
try:
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ operationParams = db_nslcmop.get("operationParams")
+ vertical_scale_data = operationParams["verticalScaleVnf"]
+ vnfd_id = vertical_scale_data["vnfdId"]
+ count_index = vertical_scale_data["countIndex"]
+ vdu_id_ref = vertical_scale_data["vduId"]
+ vnfr_id = vertical_scale_data["vnfInstanceId"]
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ db_flavor = db_nsr.get("flavor")
+ db_flavor_index = str(len(db_flavor))
+
+ def set_flavor_refrence_to_vdur(diff=0):
+ """
+ Utility function to add and remove the
+ ref to new ns-flavor-id to vdurs
+ :param: diff: default 0
+ """
+ q_filter = {}
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
+ if (
+ vdur.get("count-index") == count_index
+ and vdur.get("vdu-id-ref") == vdu_id_ref
+ ):
+ filter_text = {
+ "_id": vnfr_id,
+ "vdur.count-index": count_index,
+ "vdur.vdu-id-ref": vdu_id_ref,
+ }
+ q_filter.update(filter_text)
+ db_update = {}
+ db_update["vdur.{}.ns-flavor-id".format(vdu_index)] = str(
+ int(db_flavor_index) - diff
+ )
+ self.db.set_one(
+ "vnfrs",
+ q_filter=q_filter,
+ update_dict=db_update,
+ fail_on_empty=True,
+ )
+
# wait for any previous tasks in process
- step = "Waiting for previous operations to terminate"
+ stage[1] = "Waiting for previous operations to terminate"
await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
self._write_ns_status(
nsr_id=nsr_id,
ns_state=None,
- current_operation="VerticalScale",
+ current_operation="VERTICALSCALE",
current_operation_id=nslcmop_id,
)
- step = "Getting nslcmop from database"
+ self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
self.logger.debug(
- step + " after having waited for previous tasks to be completed"
+ stage[1] + " after having waited for previous tasks to be completed"
)
- db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
- operationParams = db_nslcmop.get("operationParams")
- # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
- db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
- db_flavor = db_nsr.get("flavor")
- db_flavor_index = str(len(db_flavor))
- change_vnf_flavor_data = operationParams["changeVnfFlavorData"]
- flavor_dict = change_vnf_flavor_data["additionalParams"]
- count_index = flavor_dict["vduCountIndex"]
- vdu_id_ref = flavor_dict["vduid"]
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ virtual_compute = vnfd["virtual-compute-desc"][0]
+ virtual_memory = round(
+ float(virtual_compute["virtual-memory"]["size"]) * 1024
+ )
+ virtual_cpu = virtual_compute["virtual-cpu"]["num-virtual-cpu"]
+ virtual_storage = vnfd["virtual-storage-desc"][0]["size-of-storage"]
flavor_dict_update = {
"id": db_flavor_index,
- "memory-mb": flavor_dict["virtualMemory"],
+ "memory-mb": virtual_memory,
"name": f"{vdu_id_ref}-{count_index}-flv",
- "storage-gb": flavor_dict["sizeOfStorage"],
- "vcpu-count": flavor_dict["numVirtualCpu"],
+ "storage-gb": str(virtual_storage),
+ "vcpu-count": virtual_cpu,
}
db_flavor.append(flavor_dict_update)
db_update = {}
db_update["flavor"] = db_flavor
- ns_q_filter = {
+ q_filter = {
"_id": nsr_id,
}
+ # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
self.db.set_one(
"nsrs",
- q_filter=ns_q_filter,
+ q_filter=q_filter,
update_dict=db_update,
fail_on_empty=True,
)
- db_vnfr = self.db.get_one(
- "vnfrs", {"_id": change_vnf_flavor_data["vnfInstanceId"]}
- )
- for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
- if (
- vdur.get("count-index") == count_index
- and vdur.get("vdu-id-ref") == vdu_id_ref
- ):
- old_flavor_id = vdur.get("ns-flavor-id", 0)
- old_vdu_index = vdu_index
- filter_text = {
- "_id": change_vnf_flavor_data["vnfInstanceId"],
- "vdur.count-index": count_index,
- "vdur.vdu-id-ref": vdu_id_ref,
- }
- q_filter.update(filter_text)
- db_update = {}
- db_update[
- "vdur.{}.ns-flavor-id".format(vdu_index)
- ] = db_flavor_index
- self.db.set_one(
- "vnfrs",
- q_filter=q_filter,
- update_dict=db_update,
- fail_on_empty=True,
- )
+ set_flavor_refrence_to_vdur()
target = {}
- target.update(operationParams)
+ new_operationParams = {
+ "lcmOperationType": "verticalscale",
+ "verticalScale": "CHANGE_VNFFLAVOR",
+ "nsInstanceId": nsr_id,
+ "changeVnfFlavorData": {
+ "vnfInstanceId": vnfr_id,
+ "additionalParams": {
+ "vduid": vdu_id_ref,
+ "vduCountIndex": count_index,
+ "virtualMemory": virtual_memory,
+ "numVirtualCpu": int(virtual_cpu),
+ "sizeOfStorage": int(virtual_storage),
+ },
+ },
+ }
+ target.update(new_operationParams)
+
+ stage[1] = "Sending vertical scale request to RO... {}".format(target)
+ self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
+ self.logger.info("RO target > {}".format(target))
desc = await self.RO.vertical_scale(nsr_id, target)
- self.logger.debug("RO return > {}".format(desc))
+ self.logger.info("RO.vertical_scale return value - {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
nsr_id,
self.logger.error("Exit Exception {}".format(e))
exc = e
except asyncio.CancelledError:
- self.logger.error("Cancelled Exception while '{}'".format(step))
+ self.logger.error("Cancelled Exception while '{}'".format(stage))
exc = "Operation was cancelled"
except Exception as e:
exc = traceback.format_exc()
"Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
)
finally:
- self._write_ns_status(
- nsr_id=nsr_id,
- ns_state=None,
- current_operation="IDLE",
- current_operation_id=None,
- )
if exc:
- db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
- nslcmop_operation_state = "FAILED"
- old_db_update[
- "vdur.{}.ns-flavor-id".format(old_vdu_index)
- ] = old_flavor_id
- else:
- nslcmop_operation_state = "COMPLETED"
- db_nslcmop_update["detailed-status"] = "Done"
- db_nsr_update["detailed-status"] = "Done"
-
- self._write_op_status(
- op_id=nslcmop_id,
- stage="",
- error_message="",
- operation_state=nslcmop_operation_state,
- other_update=db_nslcmop_update,
- )
- if old_vdu_index and old_db_update != {}:
self.logger.critical(
- "Reverting Old Flavor -- : {}".format(old_db_update)
+ "Vertical-Scale operation Failed, cleaning up nsrs and vnfrs flavor detail"
)
self.db.set_one(
- "vnfrs",
- q_filter=q_filter,
- update_dict=old_db_update,
- fail_on_empty=True,
+ "nsrs",
+ {"_id": nsr_id},
+ None,
+ pull={"flavor": {"id": db_flavor_index}},
)
- if nslcmop_operation_state:
- try:
- msg = {
- "nsr_id": nsr_id,
- "nslcmop_id": nslcmop_id,
- "operationState": nslcmop_operation_state,
- }
- await self.msg.aiowrite("ns", "verticalscaled", msg)
- except Exception as e:
- self.logger.error(
- logging_text + "kafka_write notification Exception {}".format(e)
- )
- self.logger.debug(logging_text + "Exit")
- self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")
+ set_flavor_refrence_to_vdur(diff=1)
+ return "FAILED", "Error in verticalscale VNF {}".format(exc)
+ else:
+ return "COMPLETED", "Done"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+from pyrage import x25519
+import yaml
+import base64
+
+
+def gather_age_key(cluster):
+ pubkey = cluster.get("age_pubkey")
+ privkey = cluster.get("age_privkey")
+ # return both public and private key
+ return pubkey, privkey
+
+
+def generate_age_key():
+ ident = x25519.Identity.generate()
+ # gets the public key
+ pubkey = ident.to_public()
+ # gets the private key
+ privkey = str(ident)
+ # return both public and private key
+ return pubkey, privkey
+
+
+async def create_cluster(self, op_id, op_params, content, bootstrap_only=False):
+ self.logger.info("Create cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+ db_vim_account = content["vim_account"]
+
+ # workflow_template = "launcher-create-aks-cluster-and-bootstrap.j2"
+ workflow_template = "launcher-create-crossplane-cluster-and-bootstrap.j2"
+ workflow_name = f"create-cluster-{db_cluster['_id']}"
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Generate age key
+ # public_key_new_cluster, private_key_new_cluster = generate_age_key()
+ # Get age key
+ public_key_new_cluster, private_key_new_cluster = gather_age_key(db_cluster)
+ self.logger.debug(f"public_key_new_cluster={public_key_new_cluster}")
+ self.logger.debug(f"private_key_new_cluster={private_key_new_cluster}")
+
+ # Test kubectl connection
+ self.logger.debug(self._kubectl._get_kubectl_version())
+
+ # Create secret with agekey
+ secret_name = f"secret-age-{cluster_name}"
+ secret_namespace = "osm-workflows"
+ secret_key = "agekey"
+ secret_value = private_key_new_cluster
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ # Additional params for the workflow
+ cluster_kustomization_name = cluster_name
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ if bootstrap_only:
+ cluster_type = ""
+ providerconfig_name = ""
+ else:
+ vim_account_id = db_cluster["vim_account"]
+ providerconfig_name = f"{vim_account_id}-config"
+ vim_type = db_vim_account["vim_type"]
+ if vim_type == "azure":
+ cluster_type = "aks"
+ elif vim_type == "aws":
+ cluster_type = "eks"
+ elif vim_type == "gcp":
+ cluster_type = "gke"
+ else:
+ raise Exception("Not suitable VIM account to register cluster")
+
+ # Render workflow
+ # workflow_kwargs = {
+ # "git_fleet_url": f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ # "git_sw_catalogs_url": f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ # }
+ # manifest = self.render_jinja_template(
+ # workflow_template,
+ # output_file=None,
+ # **workflow_kwargs
+ # )
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ cluster_name=cluster_name,
+ cluster_type=cluster_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ providerconfig_name=providerconfig_name,
+ public_key_mgmt=self._pubkey,
+ public_key_new_cluster=public_key_new_cluster,
+ secret_name_private_key_new_cluster=secret_name,
+ vm_size=db_cluster["node_size"],
+ node_count=db_cluster["node_count"],
+ k8s_version=db_cluster["k8s_version"],
+ cluster_location=db_cluster["region_name"],
+ osm_project_name=osm_project_name,
+ rg_name=db_cluster["resource_group"],
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+ # self.logger.info(f"Deleting secret {secret_name} in namespace {secret_namespace} ...")
+ # self._kubectl.delete_secret(name=secret_name, namespace=secret_namespace)
+ # self.logger.info("DONE")
+
+ # self.logger.info(f"Listing secrets in namespace {secret_namespace} ...")
+ # secret_list = self._kubectl.get_secrets(secret_namespace)
+ # # print(secret_list)
+ # for item in secret_list:
+ # print(item.metadata.name)
+ # self.logger.info("DONE")
+
+ # self.logger.info(f"Deleting secrets in namespace {secret_namespace} ...")
+ # for item in secret_list:
+ # print(f"Deleting {item.metadata.name} ...")
+ # self._kubectl.delete_secret(
+ # name=item.metadata.name,
+ # namespace=secret_namespace,
+ # )
+ # self.logger.info("DELETED")
+ # self.logger.info("DONE")
+
+
+async def update_cluster(self, op_id, op_params, content):
+ self.logger.info("Update cluster eks workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+ db_vim_account = content["vim_account"]
+
+ workflow_template = "launcher-update-crossplane-cluster.j2"
+ workflow_name = f"delete-cluster-{db_cluster['_id']}"
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Get age key
+ public_key_cluster, private_key_cluster = gather_age_key(db_cluster)
+ self.logger.debug(f"public_key_new_cluster={public_key_cluster}")
+ self.logger.debug(f"private_key_new_cluster={private_key_cluster}")
+
+ # Create secret with agekey
+ secret_name = f"secret-age-{cluster_name}"
+ secret_namespace = "osm-workflows"
+ secret_key = "agekey"
+ secret_value = private_key_cluster
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ # Additional params for the workflow
+ cluster_kustomization_name = cluster_name
+ osm_project_name = "osm_admin" # TODO: get project name from db_cluster
+ vim_account_id = db_cluster["vim_account"]
+ providerconfig_name = f"{vim_account_id}-config"
+ vim_type = db_vim_account["vim_type"]
+ if vim_type == "azure":
+ cluster_type = "aks"
+ elif vim_type == "aws":
+ cluster_type = "eks"
+ elif vim_type == "gcp":
+ cluster_type = "gke"
+ else:
+ raise Exception("Not suitable VIM account to update cluster")
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ cluster_name=cluster_name,
+ cluster_type=cluster_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ providerconfig_name=providerconfig_name,
+ public_key_mgmt=self._pubkey,
+ public_key_new_cluster=public_key_cluster,
+ secret_name_private_key_new_cluster=secret_name,
+ vm_size=db_cluster["node_size"],
+ node_count=db_cluster["node_count"],
+ k8s_version=db_cluster["k8s_version"],
+ cluster_location=db_cluster["region_name"],
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_cluster(self, op_id, op_params, content):
+ self.logger.info("Delete cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+
+ workflow_template = "launcher-delete-cluster.j2"
+ workflow_name = f"delete-cluster-{db_cluster['_id']}"
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Additional params for the workflow
+ cluster_kustomization_name = cluster_name
+ osm_project_name = "osm_admin" # TODO: get project name from DB
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ cluster_name=cluster_name,
+ cluster_kustomization_name=cluster_kustomization_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def register_cluster(self, op_id, op_params, content):
+ self.logger.info("Register cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Create secret with kubeconfig
+ secret_name = f"kubeconfig-{cluster_name}"
+ secret_namespace = "managed-resources"
+ secret_key = "kubeconfig"
+ secret_value = yaml.safe_dump(
+ db_cluster["credentials"], indent=4, default_flow_style=False, sort_keys=False
+ )
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ workflow_name = await self.create_cluster(op_id, op_params, content, True)
+ return workflow_name
+
+
+async def deregister_cluster(self, op_id, op_params, content):
+ self.logger.info("Deregister cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ workflow_name = await self.delete_cluster(op_id, op_params, content)
+ return workflow_name
+
+
+async def get_cluster_credentials(self, db_cluster):
+ """
+ returns the kubeconfig file of a K8s cluster in a dictionary
+ """
+ self.logger.info("Get cluster credentials Enter")
+ self.logger.info(f"Content: {db_cluster}")
+
+ secret_name = f"kubeconfig-{db_cluster['git_name'].lower()}"
+ secret_namespace = "managed-resources"
+ secret_key = "kubeconfig"
+
+ self.logger.info(f"Checking content of secret {secret_name} ...")
+ try:
+ returned_secret_data = await self._kubectl.get_secret_content(
+ name=secret_name,
+ namespace=secret_namespace,
+ )
+ returned_secret_value = base64.b64decode(
+ returned_secret_data[secret_key]
+ ).decode("utf-8")
+ return True, yaml.safe_load(returned_secret_value)
+ except Exception as e:
+ message = f"Not possible to get the credentials of the cluster. Exception: {e}"
+ self.logger.critical(message)
+ return False, message
+
+
+async def check_create_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_register_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_deregister_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import base64
+
+
+async def create_secret(self, secret_name, secret_namespace, secret_key, secret_value):
+ async def check_secret(secret_name, secret_namespace, secret_key, secret_value):
+ self.logger.info(f"Checking content of secret {secret_name} ...")
+ returned_secret_data = await self._kubectl.get_secret_content(
+ name=secret_name,
+ namespace=secret_namespace,
+ )
+ self.logger.debug(f"Result from async call: { returned_secret_data }")
+
+ self.logger.debug("Comparing secret values")
+ returned_secret_value = base64.b64decode(
+ returned_secret_data[secret_key]
+ ).decode("utf-8")
+ self.logger.debug(f"secret_data_original={secret_value}")
+ self.logger.debug(f"secret_data_received={returned_secret_value}")
+ self.logger.info(
+ f"Result of secret comparison: {secret_value==returned_secret_value} ..."
+ )
+
+ self.logger.info(
+ f"Creating secret {secret_name} in namespace {secret_namespace} ..."
+ )
+ secret_data = {secret_key: base64.b64encode(secret_value.encode()).decode("utf-8")}
+ self.logger.info(f"Secret name: {secret_name}")
+ self.logger.info(f"Secret data {secret_data}")
+ self.logger.info(f"Namespace: {secret_namespace}")
+ self.logger.info("Calling N2VC kubectl to create secret...")
+ await self._kubectl.create_secret(
+ name=secret_name,
+ data=secret_data,
+ namespace=secret_namespace,
+ secret_type="Opaque",
+ )
+ self.logger.info(f"Secret {secret_name} CREATED")
+
+ await check_secret(secret_name, secret_namespace, secret_key, secret_value)
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+
+
+async def create_ksus(self, op_id, op_params_list, content_list):
+ self.logger.info("Create KSU workflow Enter")
+ self.logger.info(
+ f"Operation {op_id}. Params: {op_params_list}. Content: {content_list}"
+ )
+
+ if len(content_list) > 1:
+ raise Exception("There is no ODU workflow yet able to manage multiple KSUs")
+ db_ksu = content_list[0]
+ ksu_params = op_params_list[0]
+ oka_list = ksu_params["oka"]
+ if len(oka_list) > 1:
+ raise Exception(
+ "There is no ODU workflow yet able to manage multiple OKAs for a KSU"
+ )
+ oka_path = oka_list[0]["sw_catalog_path"]
+
+ workflow_template = "launcher-create-ksu-hr.j2"
+ workflow_name = f"create-ksus-{op_id}"
+ ksu_name = db_ksu["git_name"].lower()
+
+ # Additional params for the workflow
+ osm_project_name = "osm_admin" # TODO: get project name from db_ksu
+ kustomization_name = ksu_name
+ helmrelease_name = ksu_name
+ target_ns = ksu_params.get("namespace")
+ profile_type = ksu_params.get("profile", {}).get("profile_type")
+ profile_name = ksu_params.get("profile", {}).get("name")
+ age_public_key = ksu_params.get("profile", {}).get("age_pubkey")
+ substitute_environment = ksu_params.get("substitute_environment", "false")
+ substitution_filter = ksu_params.get("substitution_filter", "")
+ custom_env_vars = ksu_params.get("custom_env_vars", "")
+ if custom_env_vars:
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 12 + f"{k}={v}" for k, v in custom_env_vars.items()]
+ )
+ inline_values = ksu_params.get("inline_values", "")
+ if inline_values:
+ yaml_string = yaml.safe_dump(
+ inline_values, sort_keys=False, default_flow_style=False
+ )
+ inline_values = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_cm = "false"
+ values_configmap_name = f"cm-{ksu_name}"
+ cm_values = ksu_params.get("configmap_values", "")
+ if cm_values:
+ yaml_string = yaml.safe_dump(
+ cm_values, sort_keys=False, default_flow_style=False
+ )
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_secret = "false"
+ secret_values = ksu_params.get("secret_values", "")
+ if secret_values:
+ values_secret_name = f"secret-{ksu_name}"
+ reference_secret_for_values = f"ref-secret-{ksu_name}"
+ reference_key_for_values = f"ref-key-{ksu_name}"
+ secret_values = yaml.safe_dump(
+ secret_values, sort_keys=False, default_flow_style=False
+ )
+ else:
+ values_secret_name = ""
+ reference_secret_for_values = ""
+ reference_key_for_values = ""
+ sync = "true"
+
+ if secret_values:
+ secret_namespace = "osm-workflows"
+ # Create secret
+ await self.create_secret(
+ reference_secret_for_values,
+ secret_namespace,
+ reference_key_for_values,
+ secret_values,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ templates_path=oka_path,
+ substitute_environment=substitute_environment,
+ substitution_filter=substitution_filter,
+ custom_env_vars=custom_env_vars,
+ kustomization_name=kustomization_name,
+ helmrelease_name=helmrelease_name,
+ inline_values=inline_values,
+ is_preexisting_secret=is_preexisting_secret,
+ target_ns=target_ns,
+ age_public_key=age_public_key,
+ values_secret_name=values_secret_name,
+ reference_secret_for_values=reference_secret_for_values,
+ reference_key_for_values=reference_key_for_values,
+ is_preexisting_cm=is_preexisting_cm,
+ values_configmap_name=values_configmap_name,
+ cm_values=cm_values,
+ ksu_name=ksu_name,
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ sync=sync,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def update_ksus(self, op_id, op_params_list, content_list):
+ self.logger.info("Update KSU workflow Enter")
+ self.logger.info(
+ f"Operation {op_id}. Params: {op_params_list}. Content: {content_list}"
+ )
+
+ if len(content_list) > 1:
+ raise Exception("There is no ODU workflow yet able to manage multiple KSUs")
+ db_ksu = content_list[0]
+ ksu_params = op_params_list[0]
+ oka_list = ksu_params["oka"]
+ if len(oka_list) > 1:
+ raise Exception(
+ "There is no ODU workflow yet able to manage multiple OKAs for a KSU"
+ )
+ oka_path = oka_list[0]["sw_catalog_path"]
+
+ workflow_template = "launcher-update-ksu-hr.j2"
+ workflow_name = f"update-ksus-{op_id}"
+ ksu_name = db_ksu["git_name"].lower()
+
+ # Additional params for the workflow
+ osm_project_name = "osm_admin" # TODO: get project name from db_ksu
+ kustomization_name = ksu_name
+ helmrelease_name = ksu_name
+ target_ns = ksu_params.get("namespace")
+ profile_type = ksu_params.get("profile", {}).get("profile_type")
+ profile_name = ksu_params.get("profile", {}).get("name")
+ age_public_key = ksu_params.get("profile", {}).get("age_pubkey")
+ substitute_environment = ksu_params.get("substitute_environment", "false")
+ substitution_filter = ksu_params.get("substitution_filter", "")
+ custom_env_vars = ksu_params.get("custom_env_vars", "")
+ if custom_env_vars:
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 12 + f"{k}={v}" for k, v in custom_env_vars.items()]
+ )
+ inline_values = ksu_params.get("inline_values", "")
+ if inline_values:
+ yaml_string = yaml.safe_dump(
+ inline_values, sort_keys=False, default_flow_style=False
+ )
+ inline_values = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_cm = "false"
+ values_configmap_name = f"cm-{ksu_name}"
+ cm_values = ksu_params.get("configmap_values", "")
+ if cm_values:
+ yaml_string = yaml.safe_dump(
+ cm_values, sort_keys=False, default_flow_style=False
+ )
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_secret = "false"
+ secret_values = ksu_params.get("secret_values", "")
+ if secret_values:
+ values_secret_name = f"secret-{ksu_name}"
+ reference_secret_for_values = f"ref-secret-{ksu_name}"
+ reference_key_for_values = f"ref-key-{ksu_name}"
+ secret_values = yaml.safe_dump(
+ secret_values, sort_keys=False, default_flow_style=False
+ )
+ else:
+ values_secret_name = ""
+ reference_secret_for_values = ""
+ reference_key_for_values = ""
+
+ if secret_values:
+ secret_namespace = "osm-workflows"
+ # Create secret
+ await self.create_secret(
+ reference_secret_for_values,
+ secret_namespace,
+ reference_key_for_values,
+ secret_values,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ templates_path=oka_path,
+ substitute_environment=substitute_environment,
+ substitution_filter=substitution_filter,
+ custom_env_vars=custom_env_vars,
+ kustomization_name=kustomization_name,
+ helmrelease_name=helmrelease_name,
+ inline_values=inline_values,
+ is_preexisting_secret=is_preexisting_secret,
+ target_ns=target_ns,
+ age_public_key=age_public_key,
+ values_secret_name=values_secret_name,
+ reference_secret_for_values=reference_secret_for_values,
+ reference_key_for_values=reference_key_for_values,
+ is_preexisting_cm=is_preexisting_cm,
+ values_configmap_name=values_configmap_name,
+ cm_values=cm_values,
+ ksu_name=ksu_name,
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_ksus(self, op_id, op_params_list, content_list):
+ self.logger.info("Delete KSU workflow Enter")
+ self.logger.info(
+ f"Operation {op_id}. Params: {op_params_list}. Content: {content_list}"
+ )
+
+ if len(content_list) > 1:
+ raise Exception("There is no ODU workflow yet able to manage multiple KSUs")
+ db_ksu = content_list[0]
+ ksu_params = op_params_list[0]
+
+ workflow_template = "launcher-delete-ksu.j2"
+ workflow_name = f"delete-ksus-{op_id}"
+ ksu_name = db_ksu["git_name"].lower()
+
+ # Additional params for the workflow
+ osm_project_name = "osm_admin" # TODO: get project name from db_ksu
+ profile_name = ksu_params.get("profile", {}).get("name")
+ profile_type = ksu_params.get("profile", {}).get("profile_type")
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ ksu_name=ksu_name,
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def clone_ksu(self, op_id, op_params, content):
+ self.logger.info("Clone KSU workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ workflow_name = f"clone-ksu-{content['_id']}"
+ return workflow_name
+
+
+async def move_ksu(self, op_id, op_params, content):
+ self.logger.info("Move KSU workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ workflow_name = f"move-ksu-{content['_id']}"
+ return workflow_name
+
+
+async def check_create_ksus(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_ksus(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_ksus(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_clone_ksu(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_move_ksu(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+from osm_lcm.lcm_utils import LcmException
+
+
+async def create_oka(self, op_id, op_params, content):
+ self.logger.info("Create OKA workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-create-oka.j2"
+ workflow_name = f"create-oka-{content['_id']}"
+
+ # Additional params for the workflow
+ oka_name = content["git_name"].lower()
+ oka_type = "infra-controllers"
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Get the OKA package
+ oka_fs_info = content["_admin"]["storage"]
+ oka_folder = f"{oka_fs_info['path']}{oka_fs_info['folder']}"
+ oka_filename = oka_fs_info["zipfile"]
+ self.fs.sync(oka_folder)
+ if not self.fs.file_exists(f"{oka_folder}/{oka_filename}"):
+ raise LcmException(message="Not able to find oka", bad_args=["oka_path"])
+
+ # Create temporary volume for the OKA package and copy the content
+ temp_volume_name = f"temp-pvc-oka-{op_id}"
+ await self._kubectl.create_pvc_with_content(
+ name=temp_volume_name,
+ namespace="osm-workflows",
+ src_folder=oka_folder,
+ filename=oka_filename,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ oka_name=oka_name,
+ oka_type=oka_type,
+ osm_project_name=osm_project_name,
+ temp_volume_name=temp_volume_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def update_oka(self, op_id, op_params, content):
+ self.logger.info("Update OKA workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-update-oka.j2"
+ workflow_name = f"update-oka-{content['_id']}"
+
+ # Additional params for the workflow
+ oka_name = content["git_name"].lower()
+ oka_type = "infra-controllers"
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Get the OKA package
+ oka_fs_info = content["_admin"]["storage"]
+ oka_folder = (
+ f"{oka_fs_info['path']}/{oka_fs_info['folder']}/{oka_fs_info['zipfile']}"
+ )
+ oka_filename = "package.tar.gz"
+ # Sync fs?
+
+ # Create temporary volume for the OKA package and copy the content
+ temp_volume_name = f"temp-pvc-oka-{op_id}"
+ await self._kubectl.create_pvc_with_content(
+ name=temp_volume_name,
+ namespace="osm-workflows",
+ src_folder=oka_folder,
+ filename=oka_filename,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ oka_name=oka_name,
+ oka_type=oka_type,
+ osm_project_name=osm_project_name,
+ temp_volume_name=temp_volume_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_oka(self, op_id, op_params, content):
+ self.logger.info("Delete OKA workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-delete-oka.j2"
+ workflow_name = f"delete-oka-{content['_id']}"
+
+ # Additional params for the workflow
+ oka_name = content["git_name"].lower()
+ oka_type = "infra-controllers"
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ oka_name=oka_name,
+ oka_type=oka_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def check_create_oka(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_oka(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_oka(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+
+
+async def create_profile(self, op_id, op_params, content):
+ self.logger.info("Create profile workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-create-profile.j2"
+ workflow_name = f"create-profile-{content['_id']}"
+
+ # Additional params for the workflow
+ profile_name = content["git_name"].lower()
+ profile_type = content["profile_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_profile(self, op_id, op_params, content):
+ self.logger.info("Delete profile workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-delete-profile.j2"
+ workflow_name = f"delete-profile-{content['_id']}"
+
+ # Additional params for the workflow
+ profile_name = content["git_name"].lower()
+ profile_type = content["profile_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def attach_profile_to_cluster(self, op_id, op_params, content):
+ self.logger.info("Attach profile to cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ profile = content["profile"]
+ cluster = content["cluster"]
+ workflow_template = "launcher-attach-profile.j2"
+ workflow_name = f"attach-profile-{op_id}"
+
+ # Additional params for the workflow
+ profile_name = profile["git_name"].lower()
+ profile_type = profile["profile_type"]
+ cluster_kustomization_name = cluster["git_name"].lower()
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def detach_profile_from_cluster(self, op_id, op_params, content):
+ self.logger.info("Detach profile to cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ profile = content["profile"]
+ cluster = content["cluster"]
+ workflow_template = "launcher-detach-profile.j2"
+ workflow_name = f"detach-profile-{op_id}"
+
+ # Additional params for the workflow
+ # Additional params for the workflow
+ profile_name = profile["git_name"].lower()
+ profile_type = profile["profile_type"]
+ cluster_kustomization_name = cluster["git_name"].lower()
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def check_create_profile(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_profile(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_attach_profile_to_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_detach_profile_from_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+from jinja2 import Environment, PackageLoader, select_autoescape
+import json
+import yaml
+
+
+def render_jinja_template(self, template_file, output_file=None, **kwargs):
+ """Renders a jinja template with the provided values
+
+ Args:
+ template_file: Jinja template to be rendered
+ output_file: Output file
+ kwargs: (key,value) pairs to be replaced in the template
+
+ Returns:
+ content: The content of the rendered template
+ """
+
+ # Load the template from file
+ # loader = FileSystemLoader("osm_lcm/odu_libs/templates")
+ loader = PackageLoader("osm_lcm", "odu_libs/templates")
+ self.logger.debug(f"Loader: {loader}")
+ env = Environment(loader=loader, autoescape=select_autoescape())
+ self.logger.debug(f"Env: {env}")
+
+ template_list = env.list_templates()
+ self.logger.debug(f"Template list: {template_list}")
+ template = env.get_template(template_file)
+ self.logger.debug(f"Template: {template}")
+
+ # Replace kwargs
+ self.logger.debug(f"Kwargs: {kwargs}")
+ content = template.render(kwargs)
+ if output_file:
+ with open(output_file, "w") as c_file:
+ c_file.write(content)
+ return content
+
+
+def render_yaml_template(self, template_file, output_file=None, **kwargs):
+ """Renders a YAML template with the provided values
+
+ Args:
+ template_file: Yaml template to be rendered
+ output_file: Output file
+ kwargs: (key,value) pairs to be replaced in the template
+
+ Returns:
+ content: The content of the rendered template
+ """
+
+ def print_yaml_json(document, to_json=False):
+ if to_json:
+ print(json.dumps(document, indent=4))
+ else:
+ print(
+ yaml.safe_dump(
+ document, indent=4, default_flow_style=False, sort_keys=False
+ )
+ )
+
+ # Load template in dictionary
+ with open(template_file, "r") as t_file:
+ content_dict = yaml.safe_load(t_file.read())
+ # Replace kwargs
+ self.self.logger.debug(f"Kwargs: {kwargs}")
+ for k, v in kwargs:
+ content_dict[k] = v
+
+ content = print_yaml_json(content_dict)
+ if output_file:
+ with open(output_file, "w") as c_file:
+ c_file.write(content)
+ return content
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: cluster_kustomization_name
+ value: "{{ cluster_kustomization_name }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-attach-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters - Source and destination KSU
+ ## Source KSU:
+ - name: source_ksu_name
+ value: "jenkins"
+ - name: source_profile_name
+ value: "myakscluster01"
+ - name: source_profile_type
+ value: "applications"
+ - name: source_project_name
+ value: "osm_admin"
+ ## Destination KSU:
+ ## - If any of the destination parameters are not specified, it will assume
+ ## they are the same as in source.
+ ## - It will reject if all are empty or equal to source, to avoid cloning a KSU over itself
+ - name: destination_ksu_name
+ value: ""
+ - name: destination_profile_name
+ value: "myprofile"
+ - name: destination_profile_type
+ value: "applications"
+ - name: destination_project_name
+ value: ""
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-clone-ksu-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - AKS cluster
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ - name: vm_size
+ value: {{ vm_size }}
+ - name: node_count
+ value: "{{ node_count }}"
+ - name: cluster_location
+ value: {{ cluster_location }}
+ - name: rg_name
+ value: {{ rg_name }}
+ - name: k8s_version
+ value: "'{{ k8s_version }}'"
+ - name: providerconfig_name
+ value: {{ providerconfig_name }}
+
+ # Specific parameters - Bootstrap
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: fleet_repo_url
+ value: {{ git_fleet_url }}
+ - name: sw_catalogs_repo_url
+ value: {{ git_sw_catalogs_url }}
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-aks-cluster-and-bootstrap-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - Generic cluster creation
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ ## As of today, one among `aks`, `eks` or `gke`:
+ - name: cluster_type
+ value: {{ cluster_type }}
+ - name: vm_size
+ value: {{ vm_size }}
+ - name: node_count
+ value: "{{ node_count }}"
+ - name: cluster_location
+ value: {{ cluster_location }}
+ - name: k8s_version
+ value: "'{{ k8s_version }}'"
+ - name: providerconfig_name
+ value: {{ providerconfig_name }}
+
+ # Specific parameters - Bootstrap and credentials
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: mgmt_project_name
+ value: "{{ osm_project_name }}"
+
+ # Specific parameters - AKS only
+ - name: rg_name
+ value: {{ rg_name }}
+
+ # Specific parameters - GKE only
+ - name: preemptible_nodes
+ value: "false"
+
+ # Advanced parameters - Recommended to keep defaults
+ - name: skip_bootstrap
+ value: "false"
+ - name: mgmt_cluster_name
+ value: "_management"
+ - name: base_templates_path
+ value: "cloud-resources"
+ - name: cloned_fleet_folder_name
+ value: "fleet-osm"
+ - name: cloned_sw_catalogs_folder_name
+ value: "sw-catalogs-osm"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-crossplane-cluster-and-bootstrap-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # HelmRelease generation
+ - name: helmrelease_name
+ value: "jenkins"
+ - name: chart_name
+ value: "jenkins"
+ - name: chart_version
+ value: '13.4.x'
+ - name: target_ns
+ value: "jenkins"
+ - name: create_ns
+ value: "true"
+ # Repo source generation
+ - name: is_preexisting_repo
+ value: "false"
+ - name: helmrepo_name
+ value: "bitnamicharts"
+ - name: helmrepo_url
+ value: oci://registry-1.docker.io/bitnamicharts
+ - name: helmrepo_ns
+ value: "jenkins"
+ - name: helmrepo_secret_ref
+ value: ""
+ # HelmRelease inline values (if any)
+ - name: inline_values
+ # Install some Jenkins plugins:
+ value: |
+ plugins:
+ - kubernetes:3852.v41ea_166a_ed1b_
+ - workflow-aggregator:590.v6a_d052e5a_a_b_5
+ - git:4.13.0
+ - configuration-as-code:1569.vb_72405b_80249
+ # overridePlugins: true
+ # Secret reference and generation (if required)
+ - name: is_preexisting_secret
+ value: "false"
+ - name: values_secret_name
+ value: "mysecret"
+ - name: secret_key
+ value: "values.yaml"
+ - name: age_public_key
+ value: "age1s236gmpr7myjjyqfrl6hwz0npqjgxa9t6tjj46yq28j2c4nk653saqreav"
+ - name: reference_secret_for_values
+ value: "jenkins-credentials"
+ - name: reference_key_for_values
+ value: "creds"
+ # ConfigMap reference and generation (if required)
+ - name: is_preexisting_cm
+ value: "false"
+ - name: values_cm_name
+ value: ""
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: ""
+ # value: |
+ # cm-key1: cm-value1
+ # cm-key2: cm-value2
+ # KSU rendering
+ - name: ksu_name
+ value: "jenkins"
+ - name: profile_name
+ value: "myakscluster01"
+ - name: profile_type
+ value: "applications"
+ - name: project_name
+ value: "osm_admin"
+ # Will it syncronize the KSU folder with the results of the rendering?
+ ## If left empty, it does not syncronize, so that we can easily accumulate more than
+ ## one Helm chart into the same KSU if desired
+ ## In this example, we will syncronize explicitly to make the example easier to follow.
+ - name: sync
+ value: "true"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-ksu-generated-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - Base KSU generation from template
+ ## Relative path from "SW Catalogs" repo root
+ - name: templates_path
+ value: "{{ templates_path }}"
+ ## Should substitute environment variables in the template?
+ - name: substitute_environment
+ value: "{{ substitute_environment }}"
+ ## Filter for substitution of environment variables
+ - name: substitution_filter
+ value: "{{ substitution_filter }}"
+ ## Custom environment variables (formatted as .env), to be used for template parametrization
+ - name: custom_env_vars
+ value: "{{ custom_env_vars }}"
+ # value: |
+ # KEY1=value1
+ # KEY2=value2
+ # Specific parameters - Patch HelmRelease in KSU with inline values
+ - name: kustomization_name
+ value: "{{ kustomization_name }}"
+ - name: helmrelease_name
+ value: "{{ helmrelease_name }}"
+ - name: inline_values
+ # Install some Jenkins plugins:
+ value: {{ inline_values }}
+ # Specific parameters - Secret generation
+ - name: is_preexisting_secret
+ value: "{{ is_preexisting_secret }}"
+ - name: target_ns
+ value: "{{ target_ns }}"
+ - name: age_public_key
+ value: "{{ age_public_key }}"
+ - name: values_secret_name
+ value: "{{ values_secret_name }}"
+ - name: secret_key
+ value: "values.yaml"
+ ################################################################
+ # This temporary secret (in the example, `jenkins-credentials`) should exist already
+ # in the `osm-workflows` namespace and contain the desired Jenkins credentials in
+ # a well-known key (in the example, `creds`).
+ #
+ # For instance:
+ #
+ # creds: |
+ # jenkinsUser: admin
+ # jenkinsPassword: myJ3nk1n2P2ssw0rd
+ - name: reference_secret_for_values
+ value: "{{ reference_secret_for_values }}"
+ - name: reference_key_for_values
+ value: "{{ reference_key_for_values }}"
+ # Specific parameters - Configmap generation
+ - name: is_preexisting_cm
+ value: "{is_preexisting_cm}"
+ - name: values_cm_name
+ value: "{{ values_configmap_name }}"
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: {{ cm_values }}
+ # value: |
+ # cm-key1: cm-value1
+ # cm-key2: cm-value2
+ # Specific parameters - KSU rendering
+ - name: ksu_name
+ value: "{{ ksu_name }}"
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ - name: sync
+ value: "{{ sync }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-ksu-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Temporary volume with OKA contents
+ - name: temp_volume_name
+ value: "{{ temp_volume_name }}"
+ # Specific parameters - OKA
+ - name: oka_name
+ value: "{{ oka_name }}"
+ ## Choose among `infra-controllers`, `infra-configs`, `cloud-resources`, `apps`:
+ - name: oka_type
+ value: "{{ oka_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ - name: tarball_file
+ value: "true"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-oka-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: providerconfig_name
+ value: "{{ providerconfig_name }}"
+ ## As of today, one among `azure`, `aws` or `gcp`
+ - name: provider_type
+ value: "{{ provider_type }}"
+ ## Final secret to reference from the `ProviderConfig`
+ - name: cred_secret_name
+ value: "{{ cred_secret_name }}"
+ ## Temporary secret with secret contents for the workflow
+ ## - If `temp_cred_secret_name` is empty, assumes that the final secret already exists
+ - name: temp_cred_secret_name
+ value: "{{ temp_cred_secret_name }}"
+ - name: temp_cred_secret_key
+ value: "creds"
+ - name: age_public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: osm_project_name
+ value: "{{ osm_project_name }}"
+ ## Specific parameters - GCP only
+ - name: target_gcp_project
+ value: "{{ target_gcp_project }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 6000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 6000 # Time to live after workflow is successful
+ secondsAfterFailure: 9000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-crossplane-providerconfig
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: project_name
+ value: "{{ osm_project_name }}"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 1000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 500 # Time to live after workflow is successful
+ secondsAfterFailure: 500 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-cluster-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - KSU id
+ - name: ksu_name
+ value: "{{ ksu_name }}"
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-ksu-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - OKA
+ - name: oka_name
+ value: "{{ oka_name }}"
+ ## Choose among `infra-controllers`, `infra-configs`, `cloud-resources`, `apps`:
+ - name: oka_type
+ value: "{{ oka_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-oka-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: providerconfig_name
+ value: "{{ providerconfig_name }}"
+ ## As of today, one among `azure`, `aws` or `gcp`
+ - name: provider_type
+ value: "{{ provider_type }}"
+ - name: osm_project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 6000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 6000 # Time to live after workflow is successful
+ secondsAfterFailure: 9000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-crossplane-providerconfig
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: cluster_kustomization_name
+ value: "{{ cluster_kustomization_name }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-detach-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - AKS cluster
+ - name: cluster_name
+ value: myakscluster01
+ - name: vm_size
+ value: Standard_D2_v2
+ - name: node_count
+ value: "1"
+ - name: cluster_location
+ value: "North Europe"
+ - name: rg_name
+ value: CloudNative-OSM
+ - name: k8s_version
+ value: "'1.28'"
+ - name: providerconfig_name
+ value: default
+ - name: cluster_kustomization_name
+ value: myakscluster01
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-aks-cluster-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - Generic cluster creation
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ ## As of today, one among `aks`, `eks` or `gke`:
+ - name: cluster_type
+ value: {{ cluster_type }}
+ - name: providerconfig_name
+ value: {{ providerconfig_name }}
+ - name: vm_size
+ value: {{ vm_size }}
+ - name: node_count
+ value: "{{ node_count }}"
+ - name: cluster_location
+ value: {{ cluster_location }}
+ - name: k8s_version
+ value: "'{{ k8s_version }}'"
+
+ # Specific parameters - Bootstrap and credentials
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: mgmt_project_name
+ value: "{{ osm_project_name }}"
+
+ # Specific parameters - AKS only
+ - name: rg_name
+ value: CloudNative-OSM
+
+ # Specific parameters - GKE only
+ - name: preemptible_nodes
+ value: "false"
+
+ # Advanced parameters - Recommended to keep defaults
+ - name: mgmt_cluster_name
+ value: "_management"
+ - name: base_templates_path
+ value: "cloud-resources"
+ - name: cloned_fleet_folder_name
+ value: "fleet-osm"
+ - name: cloned_sw_catalogs_folder_name
+ value: "sw-catalogs-osm"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-crossplane-cluster-and-bootstrap-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # HelmRelease generation
+ - name: helmrelease_name
+ value: "jenkins"
+ - name: chart_name
+ value: "jenkins"
+ - name: chart_version
+ value: '13.4.x'
+ - name: target_ns
+ value: "jenkins"
+ - name: create_ns
+ value: "true"
+ # Repo source generation
+ - name: is_preexisting_repo
+ value: "false"
+ - name: helmrepo_name
+ value: "bitnamicharts"
+ - name: helmrepo_url
+ value: oci://registry-1.docker.io/bitnamicharts
+ - name: helmrepo_ns
+ value: "jenkins"
+ - name: helmrepo_secret_ref
+ value: ""
+ # HelmRelease inline values (if any)
+ - name: inline_values
+ # Install some Jenkins plugins:
+ value: |
+ plugins:
+ - kubernetes:3852.v41ea_166a_ed1b_
+ - workflow-aggregator:590.v6a_d052e5a_a_b_5
+ - git:4.13.0
+ - configuration-as-code:1569.vb_72405b_80249
+ # overridePlugins: true
+ # Secret reference and generation (if required)
+ - name: is_preexisting_secret
+ value: "false"
+ - name: values_secret_name
+ value: "mysecret"
+ - name: secret_key
+ value: "values.yaml"
+ - name: age_public_key
+ value: "age1s236gmpr7myjjyqfrl6hwz0npqjgxa9t6tjj46yq28j2c4nk653saqreav"
+ - name: reference_secret_for_values
+ value: "jenkins-credentials"
+ - name: reference_key_for_values
+ value: "creds"
+ # ConfigMap reference and generation (if required)
+ - name: is_preexisting_cm
+ value: "false"
+ - name: values_cm_name
+ value: ""
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: ""
+ # value: |
+ # cm-key1: cm-value1
+ # cm-key2: cm-value2
+ # KSU rendering
+ - name: ksu_name
+ value: "jenkins"
+ - name: profile_name
+ value: "myakscluster01"
+ - name: profile_type
+ value: "applications"
+ - name: project_name
+ value: "osm_admin"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-ksu-generated-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - Base KSU generation from template
+ ## Relative path from "SW Catalogs" repo root
+ - name: templates_path
+ value: "{{ templates_path }}"
+ ## Should substitute environment variables in the template?
+ - name: substitute_environment
+ value: "{{ substitute_environment }}"
+ ## Filter for substitution of environment variables
+ - name: substitution_filter
+ value: "{{ substitution_filter }}"
+ ## Custom environment variables (formatted as .env), to be used for template parametrization
+ - name: custom_env_vars
+ value: "{custom_env_vars}"
+ # Specific parameters - Patch HelmRelease in KSU with inline values
+ - name: kustomization_name
+ value: "{{ kustomization_name }}"
+ - name: helmrelease_name
+ value: "{{ helmrelease_name }}"
+ - name: inline_values
+ value: {{ inline_values }}
+ # Specific parameters - Secret generation
+ - name: is_preexisting_secret
+ value: "{{ is_preexisting_secret }}"
+ - name: target_ns
+ value: "{{ target_ns }}"
+ - name: age_public_key
+ value: "{{ age_public_key }}"
+ - name: values_secret_name
+ value: "{{ values_secret_name }}"
+ - name: secret_key
+ value: "values.yaml"
+ - name: reference_secret_for_values
+ value: "{{ reference_secret_for_values }}"
+ - name: reference_key_for_values
+ value: "{{ reference_key_for_values }}"
+ # Specific parameters - Configmap generation
+ - name: is_preexisting_cm
+ value: "false"
+ - name: values_cm_name
+ value: "{{ values_configmap_name }}"
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: "{{ cm_values }}"
+ # Specific parameters - KSU rendering
+ - name: ksu_name
+ value: "{{ ksu_name }}"
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-ksu-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Temporary volume with OKA contents
+ - name: temp_volume_name
+ value: "{{ temp_volume_name }}"
+ # Specific parameters - OKA
+ - name: oka_name
+ value: "{{ oka_name }}"
+ ## Choose among `infra-controllers`, `infra-configs`, `cloud-resources`, `apps`:
+ - name: oka_type
+ value: "{{ oka_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ - name: tarball_file
+ value: "true"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-oka-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: providerconfig_name
+ value: "{{ providerconfig_name }}"
+ ## As of today, one among `azure`, `aws` or `gcp`
+ - name: provider_type
+ value: "{{ provider_type }}"
+ ## Final secret to reference from the `ProviderConfig`
+ - name: cred_secret_name
+ value: "{{ cred_secret_name }}"
+ ## Temporary secret with secret contents for the workflow
+ ## - If `temp_cred_secret_name` is empty, assumes that the final secret already exists
+ - name: temp_cred_secret_name
+ value: "{{ temp_cred_secret_name }}"
+ - name: temp_cred_secret_key
+ value: "creds"
+ - name: age_public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: osm_project_name
+ value: "{{ osm_project_name }}"
+ ## Specific parameters - GCP only
+ - name: target_gcp_project
+ value: "{{ target_gcp_project }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 6000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 6000 # Time to live after workflow is successful
+ secondsAfterFailure: 9000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-crossplane-providerconfig
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+import json
+
+
+async def create_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info("Create cloud_credentials workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-create-providerconfig.j2"
+ workflow_name = f"create-providerconfig-{content['_id']}"
+ # vim_name = content["name"].lower()
+ vim_name = content.get("git_name", content["name"]).lower()
+ # workflow_name = f"{op_id}-create-credentials-{vim_name}"
+
+ # Test kubectl connection
+ self.logger.debug(self._kubectl._get_kubectl_version())
+
+ # Create secret with creds
+ secret_name = workflow_name
+ secret_namespace = "osm-workflows"
+ secret_key = "creds"
+ secret_value = json.dumps(content["config"]["credentials"], indent=2)
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ # Additional params for the workflow
+ providerconfig_name = f"{vim_name}-config"
+ provider_type = content["vim_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ if provider_type == "gcp":
+ vim_tenant = content["vim_tenant_name"]
+ else:
+ vim_tenant = ""
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ providerconfig_name=providerconfig_name,
+ provider_type=provider_type,
+ cred_secret_name=vim_name,
+ temp_cred_secret_name=secret_name,
+ public_key_mgmt=self._pubkey,
+ osm_project_name=osm_project_name,
+ target_gcp_project=vim_tenant,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info("Delete cloud_credentials workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-delete-providerconfig.j2"
+ workflow_name = f"delete-providerconfig-{content['_id']}"
+ # vim_name = content["name"].lower()
+ vim_name = content.get("git_name", content["name"]).lower()
+ # workflow_name = f"{op_id}-delete-credentials-{vim_name}"
+
+ # Additional params for the workflow
+ providerconfig_name = f"{vim_name}-config"
+ provider_type = content["vim_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ providerconfig_name=providerconfig_name,
+ provider_type=provider_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def update_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info("Update cloud_credentials workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-update-providerconfig.j2"
+ workflow_name = f"update-providerconfig-{content['_id']}"
+ # vim_name = content["name"].lower()
+ vim_name = content.get("git_name", content["name"]).lower()
+ # workflow_name = f"{op_id}-update-credentials-{vim_name}"
+
+ # Create secret with creds
+ secret_name = workflow_name
+ secret_namespace = "osm-workflows"
+ secret_key = "creds"
+ secret_value = json.dumps(content["config"]["credentials"], indent=2)
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+ # Additional params for the workflow
+ providerconfig_name = f"{vim_name}-config"
+ provider_type = content["vim_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ if provider_type == "gcp":
+ vim_tenant = content["vim_tenant_name"]
+ else:
+ vim_tenant = ""
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ providerconfig_name=providerconfig_name,
+ provider_type=provider_type,
+ cred_secret_name=vim_name,
+ temp_cred_secret_name=secret_name,
+ public_key_mgmt=self._pubkey,
+ osm_project_name=osm_project_name,
+ target_gcp_project=vim_tenant,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def check_create_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import asyncio
+from time import time
+
+
+async def check_workflow_status(self, workflow_name):
+ self.logger.info(f"Async check workflow status Enter: {workflow_name}")
+ start_time = time()
+ timeout = 300
+ retry_time = 15
+ # TODO: Maybe it's better not to measure time, but controlling retries
+ # retries = 0
+ # total_retries = int(timeout/retry_time)
+ while time() <= start_time + timeout:
+ # workflow_list = await self._kubectl.list_generic_object(
+ # api_group="argoproj.io",
+ # api_plural="workflows",
+ # api_version="v1alpha1",
+ # namespace="osm-workflows",
+ # )
+ # self.logger.info(f"Workflow_list: { workflow_list }")
+ # kubectl get workflow/${WORKFLOW_NAME} -n osm-workflows -o jsonpath='{.status.conditions}' | jq -r '.[] | select(.type=="Completed").status'
+ workflow = await self._kubectl.get_generic_object(
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ namespace="osm-workflows",
+ name=workflow_name,
+ )
+ # self.logger.info(f"Workflow: {workflow}")
+ # self.logger.info(f"Workflow status: {workflow.get('status')}")
+ conditions = workflow.get("status", {}).get("conditions", [])
+ self.logger.info(f"Workflow status conditions: {conditions}")
+ result = next((item for item in conditions if item["type"] == "Completed"), {})
+ if result.get("status", "False") == "True":
+ self.logger.info(
+ f"Workflow {workflow_name} completed in {time() - start_time} seconds"
+ )
+ return True, "COMPLETED"
+ await asyncio.sleep(retry_time)
+ return False, "Workflow {workflow_name} did not complete in {timeout} seconds"
--- /dev/null
+#######################################################################################
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import logging
+from osm_lcm.lcm_utils import LcmBase
+
+from n2vc import kubectl
+
+
+class OduWorkflow(LcmBase):
+ def __init__(self, msg, lcm_tasks, config):
+ """
+ Init, Connect to database, filesystem storage, and messaging
+ :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
+ :return: None
+ """
+
+ self.logger = logging.getLogger("lcm.odu")
+ self.lcm_tasks = lcm_tasks
+ self.logger.info("Msg: {} lcm_tasks: {} ".format(msg, lcm_tasks))
+
+ # self._kubeconfig = kubeconfig # TODO: get it from config
+ self._kubeconfig = "/etc/osm/mgmtcluster-kubeconfig.yaml"
+ self._kubectl = kubectl.Kubectl(config_file=self._kubeconfig)
+ # self._repo_base_url = repo_base_url
+ # self._repo_user = repo_user
+ # self._pubkey = pubkey
+ self._repo_base_url = (
+ "http://git.172.21.249.24.nip.io" # TODO: get it from config
+ )
+ self._repo_user = "osm-developer" # TODO: get it from config
+ self._pubkey = "age1wnfvymrm4w9kfz8vn98lmu8c4w9e2wjd2v7u9lx7m3gn6patc4vqpralhx" # TODO: get it from config
+ self._workflow_debug = "true"
+ self._workflow_dry_run = "false"
+ self._workflows = {
+ "create_cluster": {
+ "workflow_function": self.create_cluster,
+ "check_resource_function": self.check_create_cluster,
+ },
+ "update_cluster": {
+ "workflow_function": self.update_cluster,
+ "check_resource_function": self.check_update_cluster,
+ },
+ "delete_cluster": {
+ "workflow_function": self.delete_cluster,
+ "check_resource_function": self.check_delete_cluster,
+ },
+ "register_cluster": {
+ "workflow_function": self.register_cluster,
+ "check_resource_function": self.check_register_cluster,
+ },
+ "deregister_cluster": {
+ "workflow_function": self.deregister_cluster,
+ "check_resource_function": self.check_deregister_cluster,
+ },
+ "create_profile": {
+ "workflow_function": self.create_profile,
+ "check_resource_function": self.check_create_profile,
+ },
+ "delete_profile": {
+ "workflow_function": self.delete_profile,
+ "check_resource_function": self.check_delete_profile,
+ },
+ "attach_profile_to_cluster": {
+ "workflow_function": self.attach_profile_to_cluster,
+ "check_resource_function": self.check_attach_profile_to_cluster,
+ },
+ "detach_profile_from_cluster": {
+ "workflow_function": self.detach_profile_from_cluster,
+ "check_resource_function": self.check_detach_profile_from_cluster,
+ },
+ "create_oka": {
+ "workflow_function": self.create_oka,
+ "check_resource_function": self.check_create_oka,
+ },
+ "update_oka": {
+ "workflow_function": self.update_oka,
+ "check_resource_function": self.check_update_oka,
+ },
+ "delete_oka": {
+ "workflow_function": self.delete_oka,
+ "check_resource_function": self.check_delete_oka,
+ },
+ "create_ksus": {
+ "workflow_function": self.create_ksus,
+ "check_resource_function": self.check_create_ksus,
+ },
+ "update_ksus": {
+ "workflow_function": self.update_ksus,
+ "check_resource_function": self.check_update_ksus,
+ },
+ "delete_ksus": {
+ "workflow_function": self.delete_ksus,
+ "check_resource_function": self.check_delete_ksus,
+ },
+ "clone_ksu": {
+ "workflow_function": self.clone_ksu,
+ "check_resource_function": self.check_clone_ksu,
+ },
+ "move_ksu": {
+ "workflow_function": self.move_ksu,
+ "check_resource_function": self.check_move_ksu,
+ },
+ "create_cloud_credentials": {
+ "workflow_function": self.create_cloud_credentials,
+ "check_resource_function": self.check_create_cloud_credentials,
+ },
+ "update_cloud_credentials": {
+ "workflow_function": self.update_cloud_credentials,
+ "check_resource_function": self.check_update_cloud_credentials,
+ },
+ "delete_cloud_credentials": {
+ "workflow_function": self.delete_cloud_credentials,
+ "check_resource_function": self.check_delete_cloud_credentials,
+ },
+ "dummy_operation": {
+ "workflow_function": self.dummy_operation,
+ "check_resource_function": self.check_dummy_operation,
+ },
+ }
+
+ super().__init__(msg, self.logger)
+
+ @property
+ def kubeconfig(self):
+ return self._kubeconfig
+
+ # Imported methods
+ from osm_lcm.odu_libs.vim_mgmt import (
+ create_cloud_credentials,
+ update_cloud_credentials,
+ delete_cloud_credentials,
+ check_create_cloud_credentials,
+ check_update_cloud_credentials,
+ check_delete_cloud_credentials,
+ )
+ from osm_lcm.odu_libs.cluster_mgmt import (
+ create_cluster,
+ update_cluster,
+ delete_cluster,
+ register_cluster,
+ deregister_cluster,
+ check_create_cluster,
+ check_update_cluster,
+ check_delete_cluster,
+ check_register_cluster,
+ check_deregister_cluster,
+ get_cluster_credentials,
+ )
+ from osm_lcm.odu_libs.ksu import (
+ create_ksus,
+ update_ksus,
+ delete_ksus,
+ clone_ksu,
+ move_ksu,
+ check_create_ksus,
+ check_update_ksus,
+ check_delete_ksus,
+ check_clone_ksu,
+ check_move_ksu,
+ )
+ from osm_lcm.odu_libs.oka import (
+ create_oka,
+ update_oka,
+ delete_oka,
+ check_create_oka,
+ check_update_oka,
+ check_delete_oka,
+ )
+ from osm_lcm.odu_libs.profiles import (
+ create_profile,
+ delete_profile,
+ attach_profile_to_cluster,
+ detach_profile_from_cluster,
+ check_create_profile,
+ check_delete_profile,
+ check_attach_profile_to_cluster,
+ check_detach_profile_from_cluster,
+ )
+ from osm_lcm.odu_libs.workflows import (
+ check_workflow_status,
+ )
+ from osm_lcm.odu_libs.render import (
+ render_jinja_template,
+ render_yaml_template,
+ )
+ from osm_lcm.odu_libs.common import create_secret
+
+ async def launch_workflow(self, key, op_id, op_params, content):
+ self.logger.info(
+ f"Workflow is getting into launch. Key: {key}. Operation: {op_id}. Params: {op_params}. Content: {content}"
+ )
+ workflow_function = self._workflows[key]["workflow_function"]
+ self.logger.info("workflow function : {}".format(workflow_function))
+ return await workflow_function(op_id, op_params, content)
+
+ async def dummy_operation(self, op_id, op_params, content):
+ self.logger.info("Empty operation status Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return content["workflow_name"]
+
+ async def check_resource_status(self, key, op_id, op_params, content):
+ self.logger.info(
+ f"Check resource status. Key: {key}. Operation: {op_id}. Params: {op_params}. Content: {content}"
+ )
+ check_resource_function = self._workflows[key]["check_resource_function"]
+ self.logger.info("check_resource function : {}".format(check_resource_function))
+ return await check_resource_function(op_id, op_params, content)
+
+ async def check_dummy_operation(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
in str(context.exception)
)
- # test vertical scale executes sucessfully
- # @patch("osm_lcm.ng_ro.status.response")
- @asynctest.fail_on(active_handles=True)
- async def test_vertical_scaling(self):
- nsr_id = descriptors.test_ids["TEST-V-SCALE"]["ns"]
- nslcmop_id = descriptors.test_ids["TEST-V-SCALE"]["instantiate"]
-
- # calling the vertical scale fucntion
- # self.my_ns.RO.status = asynctest.CoroutineMock(self.my_ns.RO.status, side_effect=self._ro_status("update"))
- mock_wait_ng_ro = asynctest.CoroutineMock()
- with patch("osm_lcm.ns.NsLcm._wait_ng_ro", mock_wait_ng_ro):
- await self.my_ns.vertical_scale(nsr_id, nslcmop_id)
- return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
- "operationState"
- )
- expected_value = "COMPLETED"
- self.assertNotEqual(return_value, expected_value)
-
- # test vertical scale executes fail
- @asynctest.fail_on(active_handles=True)
- async def test_vertical_scaling_fail(self):
- # get th nsr nad nslcmops id from descriptors
- nsr_id = descriptors.test_ids["TEST-V-SCALE"]["ns"]
- nslcmop_id = descriptors.test_ids["TEST-V-SCALE"]["instantiate-1"]
-
- # calling the vertical scale fucntion
- await self.my_ns.vertical_scale(nsr_id, nslcmop_id)
- return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
- "operationState"
- )
- expected_value = "FAILED"
- self.assertEqual(return_value, expected_value)
-
# async def test_instantiate_pdu(self):
# nsr_id = descriptors.test_ids["TEST-A"]["ns"]
# nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"]
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
-aiokafka==0.8.1
+aiokafka==0.11.0
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
async-timeout==4.0.3
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# aiokafka
# retrying-async
-bcrypt==4.0.1
+bcrypt==4.2.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# paramiko
-cachetools==5.3.1
+cachetools==5.4.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# google-auth
-certifi==2023.7.22
+certifi==2024.7.4
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# kubernetes
# requests
-cffi==1.16.0
+cffi==1.17.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# cryptography
# pynacl
-charset-normalizer==3.2.0
+charset-normalizer==3.3.2
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# requests
-cryptography==41.0.4
+cryptography==43.0.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# paramiko
dataclasses==0.6
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-dnspython==2.4.2
+dnspython==2.6.1
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# kubernetes
-idna==3.4
+idna==3.7
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# requests
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# theblues
-kafka-python==2.0.2
- # via
- # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
- # aiokafka
-kubernetes==26.1.0
+kubernetes==30.1.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# juju
-macaroonbakery==1.3.1
+macaroonbakery==1.3.4
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# juju
# theblues
-motor==3.3.1
+motor==3.5.1
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
oauthlib==3.2.2
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
+ # kubernetes
# requests-oauthlib
osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@master
# via -r requirements-dev.in
-packaging==23.1
+packaging==24.1
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# aiokafka
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# macaroonbakery
-pyasn1==0.5.0
+pyasn1==0.6.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# juju
# pyasn1-modules
# rsa
-pyasn1-modules==0.3.0
+pyasn1-modules==0.4.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# google-auth
-pycparser==2.21
+pycparser==2.22
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# cffi
-pycryptodome==3.19.0
+pycryptodome==3.20.0
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
pymacaroons==0.13.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# macaroonbakery
-pymongo==4.5.0
+pymongo==4.8.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# juju
# macaroonbakery
-python-dateutil==2.8.2
+python-dateutil==2.9.0.post0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# kubernetes
-pytz==2023.3.post1
+pytz==2024.1
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# pyrfc3339
-pyyaml==6.0.1
+pyyaml==6.0.2
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# juju
# jujubundlelib
# kubernetes
-requests==2.31.0
+requests==2.32.3
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# kubernetes
# macaroonbakery
# requests-oauthlib
# theblues
-requests-oauthlib==1.3.1
+requests-oauthlib==2.0.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# kubernetes
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# juju
-typing-extensions==4.8.0
+typing-extensions==4.12.2
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
+ # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+ # aiokafka
# typing-inspect
typing-inspect==0.9.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# juju
-urllib3==2.0.5
+urllib3==2.2.2
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# kubernetes
# requests
-websocket-client==1.6.3
+websocket-client==1.8.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# kubernetes
-websockets==11.0.3
+websockets==12.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
# juju
-
-# The following packages are considered to be unsafe in a requirements file:
-# setuptools
#######################################################################################
asynctest==0.13.0
# via -r requirements-test.in
-coverage==7.3.1
+coverage==7.6.1
# via -r requirements-test.in
mock==5.1.0
# via -r requirements-test.in
-nose2==0.13.0
+nose2==0.15.1
# via -r requirements-test.in
async-timeout
checksumdir
config-man
-grpcio-tools<1.48.2
+grpcio-tools
grpclib
-idna
jinja2
+protobuf==3.20.3
+pyrage
pyyaml>6
pydantic
-protobuf==3.20.3
+randomname
+retrying-async
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
-aiohttp==3.8.5
+aiohappyeyeballs==2.3.4
+ # via aiohttp
+aiohttp==3.10.1
# via -r requirements.in
aiosignal==1.3.1
# via aiohttp
-annotated-types==0.5.0
+annotated-types==0.7.0
# via pydantic
async-timeout==4.0.3
# via
# -r requirements.in
# aiohttp
-attrs==23.1.0
+ # retrying-async
+attrs==24.2.0
# via
# aiohttp
# glom
-boltons==23.0.0
+boltons==24.0.0
# via
# face
# glom
-charset-normalizer==3.2.0
- # via aiohttp
checksumdir==1.2.0
# via -r requirements.in
config-man==0.0.4
# via -r requirements.in
face==20.1.1
# via glom
-frozenlist==1.4.0
+fire==0.6.0
+ # via randomname
+frozenlist==1.4.1
# via
# aiohttp
# aiosignal
-glom==23.3.0
+glom==23.5.0
# via config-man
-grpcio==1.58.0
+grpcio==1.65.4
# via grpcio-tools
-grpcio-tools==1.48.1
+grpcio-tools==1.48.2
# via -r requirements.in
-grpclib==0.4.5
+grpclib==0.4.7
# via -r requirements.in
h2==4.1.0
# via grpclib
# via h2
hyperframe==6.0.1
# via h2
-idna==3.4
- # via
- # -r requirements.in
- # yarl
-jinja2==3.1.2
+idna==3.7
+ # via yarl
+jinja2==3.1.4
# via -r requirements.in
-markupsafe==2.1.3
+markupsafe==2.1.5
# via jinja2
-multidict==6.0.4
+multidict==6.0.5
# via
# aiohttp
# grpclib
# via
# -r requirements.in
# grpcio-tools
-pydantic==2.4.2
+pydantic==2.8.2
# via -r requirements.in
-pydantic-core==2.10.1
+pydantic-core==2.20.1
# via pydantic
-pyyaml==6.0.1
+pyrage==1.1.2
+ # via -r requirements.in
+pyyaml==6.0.2
+ # via -r requirements.in
+randomname==0.2.1
+ # via -r requirements.in
+retrying-async==2.0.0
# via -r requirements.in
-typing-extensions==4.8.0
+six==1.16.0
+ # via fire
+termcolor==2.4.0
+ # via fire
+typing-extensions==4.12.2
# via
# pydantic
# pydantic-core
-yarl==1.9.2
+yarl==1.9.4
# via aiohttp
# The following packages are considered to be unsafe in a requirements file:
deps = {[testenv]deps}
-r{toxinidir}/requirements-dev.txt
-r{toxinidir}/requirements-test.txt
- pylint
+ pylint==3.1.1
commands =
pylint -E osm_lcm --extension-pkg-allow-list=pydantic # issue with pydantic (https://github.com/pydantic/pydantic/issues/1961)