include README.rst
recursive-include osm_lcm *.py *.xml *.sh lcm.cfg *.txt
recursive-include devops-stages *
+recursive-include osm_lcm/odu_libs/templates *.j2
old_sa: bool = True
+class GitopsConfig(OsmConfigman):
+ repo_base_url: str = None
+ repo_user: str = None
+ pubkey: str = None
+ mgmtcluster_kubeconfig: str = "/etc/osm/mgmtcluster-kubeconfig.yaml"
+ loglevel: str = "DEBUG"
+ logfile: str = None
+ logger_name: str = None
+
+ def transform(self):
+ self.logger_name = "lcm.gitops"
+
+
# Main configuration Template
message: MessageConfig = MessageConfig()
tsdb: TsdbConfig = TsdbConfig()
servicekpi: MonitoringConfig = MonitoringConfig()
+ gitops: GitopsConfig = GitopsConfig()
def transform(self):
for attribute in dir(self):
class ClusterLcm(LcmBase):
- db_topic = "clusters"
+ db_collection = "clusters"
def __init__(self, msg, lcm_tasks, config):
"""
super().__init__(msg, self.logger)
- async def create(self, content, order_id):
+ async def create(self, op_id, op_params, content):
self.logger.info("cluster Create Enter")
+ db_cluster = content["cluster"]
- workflow_name = self.odu.launch_workflow("create_cluster", content)
+ workflow_name = await self.odu.launch_workflow(
+ "create_cluster", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- content["state"] = "CREATED"
- content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ db_cluster["state"] = "CREATED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- content["state"] = "FAILED_CREATION"
- content["resourceState"] = "ERROR"
+ db_cluster["state"] = "FAILED_CREATION"
+ db_cluster["resourceState"] = "ERROR"
# has to call update_operation_history return content
- content = self.update_operation_history(content, workflow_status, None)
- self.db.set_one("clusters", {"_id": content["_id"]}, content)
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "create_cluster", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_cluster", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- content["resourceState"] = "READY"
+ db_cluster["resourceState"] = "READY"
else:
- content["resourceState"] = "ERROR"
+ db_cluster["resourceState"] = "ERROR"
- content["operatingState"] = "IDLE"
- content = self.update_operation_history(
- content, workflow_status, resource_status
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
)
- self.db.set_one("clusters", {"_id": content["_id"]}, content)
- self.profile_state(content, workflow_status, resource_status)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ self.update_profile_state(db_cluster, workflow_status, resource_status)
return
- def profile_state(self, content, workflow_status, resource_status):
+ def update_profile_state(self, db_cluster, workflow_status, resource_status):
profiles = [
"infra_controller_profiles",
"infra_config_profiles",
"resource_profiles": "k8sresource",
}
for profile_type in profiles:
- profile_id = content[profile_type]
+ profile_id = db_cluster[profile_type]
self.logger.info("profile id is : {}".format(profile_id))
db_collection = profiles_collection[profile_type]
self.logger.info("the db_collection is :{}".format(db_collection))
db_profile = self.db.get_one(db_collection, {"_id": profile_id})
self.logger.info("the db_profile is :{}".format(db_profile))
- db_profile["state"] = content["state"]
- db_profile["resourceState"] = content["resourceState"]
- db_profile["operatingState"] = content["operatingState"]
+ db_profile["state"] = db_cluster["state"]
+ db_profile["resourceState"] = db_cluster["resourceState"]
+ db_profile["operatingState"] = db_cluster["operatingState"]
db_profile = self.update_operation_history(
db_profile, workflow_status, resource_status
)
self.logger.info("the db_profile is :{}".format(db_profile))
self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
- async def delete(self, content, order_id):
+ async def delete(self, op_id, op_params, content):
self.logger.info("cluster delete Enter")
- items = self.db.get_one("clusters", {"_id": content["_id"]})
+ db_cluster = content["cluster"]
- workflow_name = self.odu.launch_workflow("delete_cluster", content)
+ workflow_name = await self.odu.launch_workflow(
+ "delete_cluster", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- items["state"] = "DELETED"
- items["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ db_cluster["state"] = "DELETED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- items["state"] = "FAILED_DELETION"
- items["resourceState"] = "ERROR"
+ db_cluster["state"] = "FAILED_DELETION"
+ db_cluster["resourceState"] = "ERROR"
# has to call update_operation_history return content
- items = self.update_operation_history(items, workflow_status, None)
- self.db.set_one("clusters", {"_id": content["_id"]}, items)
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "delete_cluster", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_cluster", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- items["resourceState"] = "READY"
+ db_cluster["resourceState"] = "READY"
else:
- items["resourceState"] = "ERROR"
+ db_cluster["resourceState"] = "ERROR"
- items["operatingState"] = "IDLE"
- items = self.update_operation_history(items, workflow_status, resource_status)
- self.db.set_one("clusters", {"_id": content["_id"]}, items)
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
- # To delete it from dB
- if items["state"] == "DELETED":
- self.delete_cluster(content, order_id)
+ # To delete it from DB
+ if db_cluster["state"] == "DELETED":
+ self.delete_cluster(db_cluster)
return
- def delete_cluster(self, content, order_id):
- item_content = self.db.get_one("clusters", {"_id": content["_id"]})
- self.logger.info("1_the item_content is : {}".format(item_content))
+ def delete_cluster(self, db_cluster):
+ # Actually, item_content is equal to db_cluster
+ # item_content = self.db.get_one("clusters", {"_id": db_cluster["_id"]})
+ # self.logger.debug("item_content is : {}".format(item_content))
- self.logger.info("it is getting into if item_content state")
# detach profiles
update_dict = None
profiles_to_detach = [
"resource_profiles": "k8sresource",
}
for profile_type in profiles_to_detach:
- if item_content.get(profile_type):
+ if db_cluster.get(profile_type):
self.logger.info("the profile_type is :{}".format(profile_type))
- profile_ids = item_content[profile_type]
+ profile_ids = db_cluster[profile_type]
self.logger.info("the profile_ids is :{}".format(profile_ids))
profile_ids_copy = deepcopy(profile_ids)
self.logger.info("the profile_ids_copy is :{}".format(profile_ids_copy))
db_profile = self.db.get_one(db_collection, {"_id": profile_id})
self.logger.info("the db_profile is :{}".format(db_profile))
self.logger.info(
- "the item_content name is :{}".format(item_content["name"])
+ "the item_content name is :{}".format(db_cluster["name"])
)
self.logger.info(
"the db_profile name is :{}".format(db_profile["name"])
)
- if item_content["name"] == db_profile["name"]:
+ if db_cluster["name"] == db_profile["name"]:
self.logger.info("it is getting into if default")
self.db.del_one(db_collection, {"_id": profile_id})
else:
update_dict = {profile_type: profile_ids}
self.logger.info(f"the update dict is :{update_dict}")
self.db.set_one(
- "clusters", {"_id": content["_id"]}, update_dict
+ "clusters", {"_id": db_cluster["_id"]}, update_dict
)
- self.db.del_one("clusters", {"_id": item_content["_id"]})
- self.logger.info("the id is :{}".format(content["_id"]))
+ self.db.del_one("clusters", {"_id": db_cluster["_id"]})
+ self.logger.info("the id is :{}".format(db_cluster["_id"]))
- async def add(self, content, order_id):
+ async def attach_profile(self, op_id, op_params, content):
self.logger.info("profile attach Enter")
- db_cluster = self.db.get_one("clusters", {"_id": content["_id"]})
- profile_type = content["profile_type"]
+ db_cluster = content["cluster"]
+ db_profile = content["profile"]
+ profile_type = db_profile["profile_type"]
+ profile_id = db_profile["_id"]
self.logger.info("profile type is : {}".format(profile_type))
- profile_id = content["profile_id"]
self.logger.info("profile id is : {}".format(profile_id))
- workflow_name = self.odu.launch_workflow("attach_profile_to_cluster", content)
+ workflow_name = await self.odu.launch_workflow(
+ "attach_profile_to_cluster", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "attach_profile_to_cluster", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "attach_profile_to_cluster", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
db_cluster = self.update_operation_history(
db_cluster, workflow_status, resource_status
)
- profiles_collection = {
- "infra_controller_profiles": "k8sinfra_controller",
- "infra_config_profiles": "k8sinfra_config",
- "app_profiles": "k8sapp",
- "resource_profiles": "k8sresource",
- }
- db_collection = profiles_collection[profile_type]
- self.logger.info("db_collection is : {}".format(db_collection))
profile_list = db_cluster[profile_type]
self.logger.info("profile list is : {}".format(profile_list))
if resource_status:
return
- async def remove(self, content, order_id):
+ async def detach_profile(self, op_id, op_params, content):
self.logger.info("profile dettach Enter")
- db_cluster = self.db.get_one("clusters", {"_id": content["_id"]})
- profile_type = content["profile_type"]
+ db_cluster = content["cluster"]
+ db_profile = content["profile"]
+ profile_type = db_profile["profile_type"]
+ profile_id = db_profile["_id"]
self.logger.info("profile type is : {}".format(profile_type))
- profile_id = content["profile_id"]
self.logger.info("profile id is : {}".format(profile_id))
- workflow_name = self.odu.launch_workflow("detach_profile_from_cluster", content)
+ workflow_name = await self.odu.launch_workflow(
+ "detach_profile_from_cluster", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "detach_profile_from_cluster", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "detach_profile_from_cluster", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
db_cluster = self.update_operation_history(
db_cluster, workflow_status, resource_status
)
- profiles_collection = {
- "infra_controller_profiles": "k8sinfra_controller",
- "infra_config_profiles": "k8sinfra_config",
- "app_profiles": "k8sapp",
- "resource_profiles": "k8sresource",
- }
- db_collection = profiles_collection[profile_type]
- self.logger.info("db_collection is : {}".format(db_collection))
profile_list = db_cluster[profile_type]
self.logger.info("profile list is : {}".format(profile_list))
if resource_status:
return
- async def register(self, content, order_id):
+ async def register(self, op_id, op_params, content):
self.logger.info("cluster register enter")
+ db_cluster = content["cluster"]
- workflow_name = self.odu.launch_workflow("register_cluster", content)
+ workflow_name = await self.odu.launch_workflow(
+ "register_cluster", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- content["state"] = "CREATED"
- content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ db_cluster["state"] = "CREATED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- content["state"] = "FAILED_CREATION"
- content["resourceState"] = "ERROR"
+ db_cluster["state"] = "FAILED_CREATION"
+ db_cluster["resourceState"] = "ERROR"
# has to call update_operation_history return content
- content = self.update_operation_history(content, workflow_status, None)
- self.db.set_one("clusters", {"_id": content["_id"]}, content)
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "register_cluster", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "register_cluster", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- content["resourceState"] = "READY"
+ db_cluster["resourceState"] = "READY"
else:
- content["resourceState"] = "ERROR"
+ db_cluster["resourceState"] = "ERROR"
- content["operatingState"] = "IDLE"
- content = self.update_operation_history(
- content, workflow_status, resource_status
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
)
- self.db.set_one("clusters", {"_id": content["_id"]}, content)
- self.profile_state(content, workflow_status, resource_status)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ self.update_profile_state(db_cluster, workflow_status, resource_status)
return
- async def deregister(self, content, order_id):
+ async def deregister(self, op_id, op_params, content):
self.logger.info("cluster deregister enter")
+ db_cluster = content["cluster"]
- items = self.db.get_one("clusters", {"_id": content["_id"]})
- self.logger.info("the items is : {}".format(items))
+ self.logger.info("db_cluster is : {}".format(db_cluster))
- workflow_name = self.odu.launch_workflow("deregister_cluster", content)
+ workflow_name = await self.odu.launch_workflow(
+ "deregister_cluster", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- items["state"] = "DELETED"
- items["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ db_cluster["state"] = "DELETED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- items["state"] = "FAILED_DELETION"
- items["resourceState"] = "ERROR"
+ db_cluster["state"] = "FAILED_DELETION"
+ db_cluster["resourceState"] = "ERROR"
# has to call update_operation_history return content
- items = self.update_operation_history(items, workflow_status, None)
- self.db.set_one("clusters", {"_id": content["_id"]}, items)
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "deregister_cluster", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "deregister_cluster", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- items["resourceState"] = "READY"
+ db_cluster["resourceState"] = "READY"
else:
- items["resourceState"] = "ERROR"
+ db_cluster["resourceState"] = "ERROR"
- items["operatingState"] = "IDLE"
- items = self.update_operation_history(items, workflow_status, resource_status)
- self.db.set_one("clusters", {"_id": content["_id"]}, items)
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
- # To delete it from dB
- if items["state"] == "DELETED":
- self.db.del_one("clusters", {"_id": items["_id"]})
+ # To delete it from DB
+ if db_cluster["state"] == "DELETED":
+ self.db.del_one("clusters", {"_id": db_cluster["_id"]})
return
- async def get_creds(self, content, order_id):
- # self.logger.info("Cluster get creds Enter")
- # self.logger.info("Content: {} order_id: {}".format(content, order_id))
- db_content = self.db.get_one(self.db_topic, content)
- # self.logger.info("Content: {}".format(db_content))
-
- odu_workflow = self.odu.launch_workflow("get_creds_cluster", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ async def get_creds(self, db_cluster):
+ self.logger.info("Cluster get creds Enter")
+ result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
+ if result:
+ db_cluster["credentials"] = cluster_creds
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
return
- async def update(self, content, order_id):
- # self.logger.info("Cluster update Enter")
- _id = content["_id"]
- # self.logger.info("Content: {} order_id: {}".format(content, order_id))
- # self.logger.info("Cluster ID: {}".format(_id))
- db_content = self.db.get_one(self.db_topic, {"_id": _id})
- # self.logger.info("Content: {}".format(db_content))
-
- odu_workflow = self.odu.launch_workflow("update_cluster", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ async def update(self, op_id, op_params, content):
+ self.logger.info("Cluster update Enter")
+ db_cluster = content["cluster"]
+
+ workflow_name = await self.odu.launch_workflow(
+ "update_cluster", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
if workflow_status:
- db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- db_content["resourceState"] = "ERROR"
+ db_cluster["resourceState"] = "ERROR"
- db_content = self.update_operation_history(db_content, workflow_status, None)
+ db_cluster = self.update_operation_history(db_cluster, workflow_status, None)
# self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_topic, {"_id": _id}, db_content)
+ # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "update_cluster", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "update_cluster", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
if resource_status:
- db_content["resourceState"] = "READY"
+ db_cluster["resourceState"] = "READY"
else:
- db_content["resourceState"] = "ERROR"
-
- db_content = self.update_operation_history(
- db_content, workflow_status, resource_status
- )
+ db_cluster["resourceState"] = "ERROR"
- db_content["operatingState"] = "IDLE"
- if db_content["operatingState"] == "IDLE":
- # self.logger.info("Content: {}".format(db_content))
- if "k8s_version" in content:
- db_content["k8s_version"] = content["k8s_version"]
- elif "node_count" in content:
- db_content["node_count"] = content["node_count"]
- self.db.set_one(self.db_topic, {"_id": _id}, db_content)
+ db_cluster["operatingState"] = "IDLE"
+ db_cluster = self.update_operation_history(
+ db_cluster, workflow_status, resource_status
+ )
+ # self.logger.info("db_cluster: {}".format(db_cluster))
+ # TODO: verify enxtcondition
+ # For the moment, if the workflow completed successfully, then we update the db accordingly.
+ if workflow_status:
+ if "k8s_version" in op_params:
+ db_cluster["k8s_version"] = op_params["k8s_version"]
+ elif "node_count" in op_params:
+ db_cluster["node_count"] = op_params["node_count"]
+ # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
return
super().__init__(msg, self.logger)
- async def add(self, content, order_id):
+ async def add(self, op_id, op_params, content):
self.logger.info("Cloud Credentials create")
workflow_name = await self.odu.launch_workflow(
- "create_cloud_credentials", content
+ "create_cloud_credentials", op_id, op_params, content
)
workflow_status, workflow_msg = await self.odu.check_workflow_status(
if workflow_status:
resource_status, resource_msg = await self.odu.check_resource_status(
- "create_cloud_credentials", content
+ "create_cloud_credentials", op_id, op_params, content
)
self.logger.info(
"Resource Status: {} Resource Message: {}".format(
)
return
- async def edit(self, content, order_id):
+ async def edit(self, op_id, op_params, content):
workflow_name = await self.odu.launch_workflow(
- "update_cloud_credentials", content
+ "update_cloud_credentials", op_id, op_params, content
)
workflow_status, workflow_msg = await self.odu.check_workflow_status(
workflow_name
if workflow_status:
resource_status, resource_msg = await self.odu.check_resource_status(
- "update_cloud_credentials", content
+ "update_cloud_credentials", op_id, op_params, content
)
self.logger.info(
"Resource Status: {} Resource Message: {}".format(
)
return
- async def remove(self, content, order_id):
+ async def remove(self, op_id, op_params, content):
self.logger.info("Cloud Credentials delete")
workflow_name = await self.odu.launch_workflow(
- "delete_cloud_credentials", content
+ "delete_cloud_credentials", op_id, op_params, content
)
workflow_status, workflow_msg = await self.odu.check_workflow_status(
workflow_name
if workflow_status:
resource_status, resource_msg = await self.odu.check_resource_status(
- "delete_cloud_credentials", content
+ "delete_cloud_credentials", op_id, op_params, content
)
self.logger.info(
"Resource Status: {} Resource Message: {}".format(
super().__init__(msg, self.logger)
- async def create(self, content, order_id):
+ async def create(self, op_id, op_params, content):
self.logger.info("App Create Enter")
- workflow_name = self.odu.launch_workflow("create_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "create_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
return
- async def delete(self, content, order_id):
+ async def delete(self, op_id, op_params, content):
self.logger.info("App delete Enter")
- items = self.db.get_one("k8sapp", {"_id": content["_id"]})
- workflow_name = self.odu.launch_workflow("delete_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- items["state"] = "DELETED"
- items["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- items["state"] = "FAILED_DELETION"
- items["resourceState"] = "ERROR"
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
# has to call update_operation_history return content
- items = self.update_operation_history(items, workflow_status, None)
+ content = self.update_operation_history(content, workflow_status, None)
self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "delete_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- items["resourceState"] = "READY"
+ content["resourceState"] = "READY"
else:
- items["resourceState"] = "ERROR"
+ content["resourceState"] = "ERROR"
- items["operatingState"] = "IDLE"
- items = self.update_operation_history(items, workflow_status, resource_status)
- self.db.set_one("k8sapp", {"_id": content["_id"]}, items)
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
- # To delete it from dB
- if items["state"] == "DELETED":
+ # To delete it from DB
+ if content["state"] == "DELETED":
self.db.del_one("k8sapp", {"_id": content["_id"]})
return
super().__init__(msg, self.logger)
- async def create(self, content, order_id):
+ async def create(self, op_id, op_params, content):
self.logger.info("Resource Create Enter")
- workflow_name = self.odu.launch_workflow("create_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "create_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
return
- async def delete(self, content, order_id):
+ async def delete(self, op_id, op_params, content):
self.logger.info("Resource delete Enter")
- items = self.db.get_one("k8sresource", {"_id": content["_id"]})
+ content = self.db.get_one("k8sresource", {"_id": content["_id"]})
- workflow_name = self.odu.launch_workflow("delete_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- items["state"] = "DELETED"
- items["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- items["state"] = "FAILED_DELETION"
- items["resourceState"] = "ERROR"
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
# has to call update_operation_history return content
- items = self.update_operation_history(items, workflow_status, None)
- self.db.set_one("k8sresource", {"_id": content["_id"]}, items)
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "delete_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- items["resourceState"] = "READY"
+ content["resourceState"] = "READY"
else:
- items["resourceState"] = "ERROR"
+ content["resourceState"] = "ERROR"
- items["operatingState"] = "IDLE"
- items = self.update_operation_history(items, workflow_status, resource_status)
- self.db.set_one("k8sresource", {"_id": content["_id"]}, items)
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
- # To delete it from dB
- if items["state"] == "DELETED":
+ # To delete it from DB
+ if content["state"] == "DELETED":
self.db.del_one("k8sresource", {"_id": content["_id"]})
return
super().__init__(msg, self.logger)
- async def create(self, content, order_id):
+ async def create(self, op_id, op_params, content):
self.logger.info("Infra controller Create Enter")
- workflow_name = self.odu.launch_workflow("create_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "create_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
return
- async def delete(self, content, order_id):
+ async def delete(self, op_id, op_params, content):
self.logger.info("Infra controller delete Enter")
- items = self.db.get_one("k8sinfra_controller", {"_id": content["_id"]})
- workflow_name = self.odu.launch_workflow("delete_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- items["state"] = "DELETED"
- items["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- items["state"] = "FAILED_DELETION"
- items["resourceState"] = "ERROR"
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
# has to call update_operation_history return content
- items = self.update_operation_history(items, workflow_status, None)
- self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, items)
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "delete_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- items["resourceState"] = "READY"
+ content["resourceState"] = "READY"
else:
- items["resourceState"] = "ERROR"
+ content["resourceState"] = "ERROR"
- items["operatingState"] = "IDLE"
- items = self.update_operation_history(items, workflow_status, resource_status)
- self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, items)
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
- # To delete it from dB
- if items["state"] == "DELETED":
+ # To delete it from DB
+ if content["state"] == "DELETED":
self.db.del_one("k8sinfra_controller", {"_id": content["_id"]})
return
super().__init__(msg, self.logger)
- async def create(self, content, order_id):
+ async def create(self, op_id, op_params, content):
self.logger.info("Infra config Create Enter")
- workflow_name = self.odu.launch_workflow("create_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "create_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "create_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
return
- async def delete(self, content, order_id):
+ async def delete(self, op_id, op_params, content):
self.logger.info("Infra config delete Enter")
- workflow_name = self.odu.launch_workflow("delete_profile", content)
+ workflow_name = await self.odu.launch_workflow(
+ "delete_profile", op_id, op_params, content
+ )
self.logger.info("workflow_name is :{}".format(workflow_name))
- items = self.db.get_one("k8sinfra_config", {"_id": content["_id"]})
- workflow_status, workflow_msg = self.odu.check_workflow_status(workflow_name)
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
self.logger.info(
"workflow_status is :{} and workflow_msg is :{}".format(
workflow_status, workflow_msg
)
)
if workflow_status:
- items["state"] = "DELETED"
- items["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ content["state"] = "DELETED"
+ content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
else:
- items["state"] = "FAILED_DELETION"
- items["resourceState"] = "ERROR"
+ content["state"] = "FAILED_DELETION"
+ content["resourceState"] = "ERROR"
# has to call update_operation_history return content
- items = self.update_operation_history(items, workflow_status, None)
- self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, items)
+ content = self.update_operation_history(content, workflow_status, None)
+ self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
- resource_status, resource_msg = self.odu.check_resource_status(
- "delete_profile", content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_profile", op_id, op_params, content
)
self.logger.info(
"resource_status is :{} and resource_msg is :{}".format(
)
)
if resource_status:
- items["resourceState"] = "READY"
+ content["resourceState"] = "READY"
else:
- items["resourceState"] = "ERROR"
+ content["resourceState"] = "ERROR"
- items["operatingState"] = "IDLE"
- items = self.update_operation_history(items, workflow_status, resource_status)
- self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, items)
+ content["operatingState"] = "IDLE"
+ content = self.update_operation_history(
+ content, workflow_status, resource_status
+ )
+ self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
- # To delete it from dB
- if items["state"] == "DELETED":
+ # To delete it from DB
+ if content["state"] == "DELETED":
self.db.del_one("k8sinfra_config", {"_id": content["_id"]})
return
super().__init__(msg, self.logger)
- async def create(self, content, order_id):
- # self.logger.info("OKA Create Enter")
- # self.logger.info("Content: {}".format(content))
-
- db_content = self.db.get_one(self.db_collection, content)
- # self.logger.info("Content: {}".format(db_content))
+ async def create(self, op_id, op_params, content):
+ self.logger.info("OKA Create Enter")
+ db_content = content
- odu_workflow = self.odu.launch_workflow("create_oka", db_content)
- # self.logger.info("ODU workflow: {}".format(odu_workflow))
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "create_oka", op_id, op_params, db_content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
if workflow_status:
db_content["state"] = "CREATED"
db_content["resourceState"] = "ERROR"
db_content = self.update_operation_history(db_content, workflow_status, None)
- # self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "create_oka", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_oka", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
if resource_status:
db_content["resourceState"] = "READY"
)
db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
return
- async def edit(self, content, order_id):
- # self.logger.info("OKA Edit Enter")
- db_content = self.db.get_one(self.db_collection, content)
- # self.logger.info("Content: {}".format(db_content))
+ async def edit(self, op_id, op_params, content):
+ self.logger.info("OKA Edit Enter")
+ db_content = content
- odu_workflow = self.odu.launch_workflow("update_oka", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "update_oka", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
if workflow_status:
db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
db_content = self.update_operation_history(db_content, workflow_status, None)
# self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "update_oka", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "update_oka", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
if resource_status:
db_content["resourceState"] = "READY"
)
db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
return
- async def delete(self, content, order_id):
- # self.logger.info("OKA delete Enter")
-
- db_content = self.db.get_one(self.db_collection, content)
- # self.logger.info("Content: {}".format(db_content))
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("OKA delete Enter")
+ db_content = content
- odu_workflow = self.odu.launch_workflow("delete_oka", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "delete_oka", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
if workflow_status:
db_content["state"] = "DELETED"
db_content["resourceState"] = "ERROR"
db_content = self.update_operation_history(db_content, workflow_status, None)
- # self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "delete_oka", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_oka", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
if resource_status:
db_content["resourceState"] = "READY"
)
db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
if db_content["state"] == "DELETED":
- self.db.del_one(self.db_collection, content)
+ self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
return
super().__init__(msg, self.logger)
- async def create(self, content, order_id):
- # self.logger.info("ksu Create Enter")
- # self.logger.info("Content: {}".format(content))
-
- db_content = self.db.get_one(self.db_collection, content)
- # self.logger.info("Content: {}".format(db_content))
+ async def create(self, op_id, op_params, content):
+ self.logger.info("ksu Create Enter")
- odu_workflow = self.odu.launch_workflow("create_ksus", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "create_ksus", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
- if workflow_status:
- db_content["state"] = "CREATED"
- db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
- else:
- db_content["state"] = "FAILED_CREATION"
- db_content["resourceState"] = "ERROR"
+ for db_ksu in content:
+ if workflow_status:
+ db_ksu["state"] = "CREATED"
+ db_ksu["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_ksu["state"] = "FAILED_CREATION"
+ db_ksu["resourceState"] = "ERROR"
- db_content = self.update_operation_history(db_content, workflow_status, None)
- # self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ db_ksu = self.update_operation_history(db_ksu, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "create_ksus", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "create_ksus", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
- if resource_status:
- db_content["resourceState"] = "READY"
- else:
- db_content["resourceState"] = "ERROR"
+ for db_ksu in content:
+ if resource_status:
+ db_ksu["resourceState"] = "READY"
+ else:
+ db_ksu["resourceState"] = "ERROR"
- db_content = self.update_operation_history(
- db_content, workflow_status, resource_status
- )
+ db_ksu = self.update_operation_history(
+ db_ksu, workflow_status, resource_status
+ )
- db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ for db_ksu in content:
+ db_ksu["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
return
- async def edit(self, content, order_id):
- # self.logger.info("ksu edit Enter")
+ async def edit(self, op_id, op_params, content):
+ self.logger.info("ksu edit Enter")
- db_content = self.db.get_one(self.db_collection, {"_id": content["_id"]})
- # self.logger.info("Content: {}".format(db_content))
-
- odu_workflow = self.odu.launch_workflow("update_ksus", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "update_ksus", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
- if workflow_status:
- db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
- else:
- db_content["resourceState"] = "ERROR"
+ for db_ksu in content:
+ if workflow_status:
+ db_ksu["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_ksu["resourceState"] = "ERROR"
- db_content = self.update_operation_history(db_content, workflow_status, None)
- # self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, {"_id": content["_id"]}, db_content)
+ db_ksu = self.update_operation_history(db_ksu, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "update_ksus", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "update_ksus", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
- if resource_status:
- db_content["resourceState"] = "READY"
- else:
- db_content["resourceState"] = "ERROR"
+ for db_ksu in content:
+ if resource_status:
+ db_ksu["resourceState"] = "READY"
+ else:
+ db_ksu["resourceState"] = "ERROR"
- db_content = self.update_operation_history(
- db_content, workflow_status, resource_status
- )
+ db_ksu = self.update_operation_history(
+ db_ksu, workflow_status, resource_status
+ )
- db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, {"_id": content["_id"]}, db_content)
- return
+ for db_ksu, ksu_params in zip(content, op_params):
+ db_ksu["operatingState"] = "IDLE"
+ if workflow_status:
+ db_ksu["name"] = ksu_params["name"]
+ db_ksu["description"] = ksu_params["description"]
+ db_ksu["profile"]["profile_type"] = ksu_params["profile"][
+ "profile_type"
+ ]
+ db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
+ db_ksu["oka"] = ksu_params["oka"]
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
- async def delete(self, content, order_id):
- # self.logger.info("ksu delete Enter")
+ return
- db_content = self.db.get_one(self.db_collection, content)
- # self.logger.info("Content: {}".format(db_content))
+ async def delete(self, op_id, op_params, content):
+ self.logger.info("ksu delete Enter")
- odu_workflow = self.odu.launch_workflow("delete_ksus", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "delete_ksus", op_id, op_params, content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
- if workflow_status:
- db_content["state"] = "DELETED"
- db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
- else:
- db_content["state"] = "FAILED_DELETION"
- db_content["resourceState"] = "ERROR"
+ for db_ksu in content:
+ if workflow_status:
+ db_ksu["state"] = "DELETED"
+ db_ksu["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
+ else:
+ db_ksu["state"] = "FAILED_DELETION"
+ db_ksu["resourceState"] = "ERROR"
- db_content = self.update_operation_history(db_content, workflow_status, None)
- # self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ db_ksu = self.update_operation_history(db_ksu, workflow_status, None)
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "delete_ksus", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "delete_ksus", op_id, op_params, content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
- if resource_status:
- db_content["resourceState"] = "READY"
- else:
- db_content["resourceState"] = "ERROR"
+ for db_ksu in content:
+ if resource_status:
+ db_ksu["resourceState"] = "READY"
+ else:
+ db_ksu["resourceState"] = "ERROR"
- db_content = self.update_operation_history(
- db_content, workflow_status, resource_status
- )
+ db_ksu = self.update_operation_history(
+ db_ksu, workflow_status, resource_status
+ )
- db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ for db_ksu in content:
+ db_ksu["operatingState"] = "IDLE"
+ self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
- if db_content["state"] == "DELETED":
- self.db.del_one(self.db_collection, content)
+ if db_ksu["state"] == "DELETED":
+ self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
return
- async def clone(self, content, order_id):
- # self.logger.info("ksu clone Enter")
- db_content = self.db.get_one(self.db_collection, content)
- # self.logger.info("Content: {}".format(db_content))
+ async def clone(self, op_id, op_params, db_content):
+ self.logger.info("ksu clone Enter")
- odu_workflow = self.odu.launch_workflow("clone_ksus", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "clone_ksus", op_id, op_params, db_content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
if workflow_status:
db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
db_content["resourceState"] = "ERROR"
db_content = self.update_operation_history(db_content, workflow_status, None)
- # self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "clone_ksus", db_content
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "clone_ksus", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
)
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
if resource_status:
db_content["resourceState"] = "READY"
)
db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
return
- async def move(self, content, order_id):
- # self.logger.info("ksu move Enter")
- db_content = self.db.get_one(self.db_collection, content)
- # self.logger.info("Content: {}".format(db_content))
+ async def move(self, op_id, op_params, db_content):
+ self.logger.info("ksu move Enter")
- odu_workflow = self.odu.launch_workflow("move_ksus", db_content)
- workflow_status, workflow_msg = self.odu.check_workflow_status(odu_workflow)
- # self.logger.info(
- # "Workflow Status: {} Workflow Message: {}".format(
- # workflow_status, workflow_msg
- # )
- # )
+ workflow_name = await self.odu.launch_workflow(
+ "move_ksus", op_id, op_params, db_content
+ )
+ workflow_status, workflow_msg = await self.odu.check_workflow_status(
+ workflow_name
+ )
+ self.logger.info(
+ "Workflow Status: {} Workflow Message: {}".format(
+ workflow_status, workflow_msg
+ )
+ )
if workflow_status:
db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
db_content["resourceState"] = "ERROR"
db_content = self.update_operation_history(db_content, workflow_status, None)
- # self.logger.info("Db content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
if workflow_status:
- resource_status, resource_msg = self.odu.check_resource_status(
- "move_ksus", db_content
- )
- # self.logger.info(
- # "Resource Status: {} Resource Message: {}".format(
- # resource_status, resource_msg
- # )
- # )
+ resource_status, resource_msg = await self.odu.check_resource_status(
+ "move_ksus", op_id, op_params, db_content
+ )
+ self.logger.info(
+ "Resource Status: {} Resource Message: {}".format(
+ resource_status, resource_msg
+ )
+ )
if resource_status:
db_content["resourceState"] = "READY"
else:
)
db_content["operatingState"] = "IDLE"
- # self.logger.info("Content: {}".format(db_content))
- self.db.set_one(self.db_collection, content, db_content)
+ self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
return
uri: http://prometheus:9090/
# loglevel: DEBUG
# logfile: /var/log/osm/lcm-tsdb.log
+
+gitops:
+ mgmtcluster_kubeconfig: /etc/osm/mgmtcluster-kubeconfig.yaml
+ repo_user: osm-developer
+ # repo_base_url: http://git.<IP_ADDRESS>.nip.io
+ # pubkey: pubkey
+
from osm_lcm.data_utils.database.database import Database
from osm_lcm.data_utils.filesystem.filesystem import Filesystem
from osm_lcm.data_utils.lcm_config import LcmCfg
+from osm_lcm.data_utils.list_utils import find_in_list
from osm_lcm.lcm_hc import get_health_check_file
from os import path, getenv
from n2vc import version as n2vc_version
class Lcm:
+ profile_collection_mapping = {
+ "infra_controller_profiles": "k8sinfra_controller",
+ "infra_config_profiles": "k8sinfra_config",
+ "resource_profiles": "k8sresource",
+ "app_profiles": "k8sapp",
+ }
+
ping_interval_pace = (
120 # how many time ping is send once is confirmed all is running
)
wait_time = 2 if not first_start else 5
await asyncio.sleep(wait_time)
+ def get_operation_params(self, item, operation_id):
+ operation_history = item.get("operationHistory", [])
+ operation = find_in_list(
+ operation_history, lambda op: op["op_id"] == operation_id
+ )
+ return operation.get("operationParams", {})
+
async def kafka_read_callback(self, topic, command, params):
order_id = 1
self.logger.info(
return
elif topic == "vim_account":
vim_id = params["_id"]
- op_id = params["_id"]
+ op_id = vim_id
+ op_params = params
db_vim = self.db.get_one("vim_accounts", {"_id": vim_id})
- vim_config = db_vim["config"]
+ vim_config = db_vim.get("config", {})
+ self.logger.debug("Db Vim: {}".format(db_vim))
if command in ("create", "created"):
- self.logger.info("Command : {}".format(command))
+ self.logger.debug("Main config: {}".format(self.main_config.to_dict()))
+ if "credentials" in vim_config:
+ self.logger.info("Vim add cloud credentials")
+ task = asyncio.ensure_future(
+ self.cloud_credentials.add(op_id, op_params, db_vim)
+ )
+ self.lcm_tasks.register(
+ "vim_account", vim_id, op_id, "cloud_credentials_add", task
+ )
if not self.main_config.RO.ng:
- self.logger.info("Vim create")
+ self.logger.info("Calling RO to create VIM (no NG-RO)")
task = asyncio.ensure_future(self.vim.create(params, order_id))
self.lcm_tasks.register(
"vim_account", vim_id, order_id, "vim_create", task
)
- return
- elif "credentials" in vim_config.keys():
- task = asyncio.ensure_future(
- self.cloud_credentials.add(params, order_id)
- )
- self.lcm_tasks.register(
- "vim-account", vim_id, op_id, "cloud_credentials_add", task
- )
- return
+ return
elif command == "delete" or command == "deleted":
- if "credentials" in vim_config.keys():
+ self.lcm_tasks.cancel(topic, vim_id)
+ if "credentials" in vim_config:
+ self.logger.info("Vim remove cloud credentials")
task = asyncio.ensure_future(
- self.cloud_credentials.remove(params, order_id)
+ self.cloud_credentials.remove(op_id, op_params, db_vim)
)
self.lcm_tasks.register(
- "vim-account", vim_id, op_id, "cloud_credentials_remove", task
+ "vim_account", vim_id, op_id, "cloud_credentials_remove", task
)
- return
- else:
- self.lcm_tasks.cancel(topic, vim_id)
- task = asyncio.ensure_future(self.vim.delete(params, order_id))
- self.lcm_tasks.register(
- "vim_account", vim_id, order_id, "vim_delete", task
- )
- return
+ task = asyncio.ensure_future(self.vim.delete(params, order_id))
+ self.lcm_tasks.register(
+ "vim_account", vim_id, order_id, "vim_delete", task
+ )
+ return
elif command == "show":
print("not implemented show with vim_account")
sys.stdout.flush()
return
elif command in ("edit", "edited"):
- if "credentials" in vim_config.keys():
- self.logger.info("Vim Edit")
+ if "credentials" in vim_config:
+ self.logger.info("Vim update cloud credentials")
task = asyncio.ensure_future(
- self.cloud_credentials.edit(params, order_id)
+ self.cloud_credentials.edit(op_id, op_params, db_vim)
)
self.lcm_tasks.register(
"vim_account", vim_id, op_id, "cloud_credentials_update", task
)
- return
if not self.main_config.RO.ng:
task = asyncio.ensure_future(self.vim.edit(params, order_id))
self.lcm_tasks.register(
elif command == "deleted":
return # TODO cleaning of task just in case should be done
elif topic == "cluster":
- cluster_id = params["_id"]
+ if command != "get_creds":
+ op_id = params["operation_id"]
+ cluster_id = params["cluster_id"]
+ db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
+ op_params = self.get_operation_params(db_cluster, op_id)
+ db_content = {
+ "cluster": db_cluster,
+ }
if command == "create" or command == "created":
self.logger.debug("cluster_id = {}".format(cluster_id))
- task = asyncio.ensure_future(self.cluster.create(params, order_id))
+ # db_vim = self.db.get_one("vim_accounts", {"_id": db_cluster["vim_account"]})
+ db_vim = self.db.get_one(
+ "vim_accounts", {"name": db_cluster["vim_account"]}
+ )
+ db_content["vim_account"] = db_vim
+ task = asyncio.ensure_future(
+ self.cluster.create(op_id, op_params, db_content)
+ )
self.lcm_tasks.register(
- "cluster", cluster_id, order_id, "cluster_create", task
+ "cluster", cluster_id, op_id, "cluster_create", task
)
return
elif command == "delete" or command == "deleted":
- task = asyncio.ensure_future(self.cluster.delete(params, order_id))
+ task = asyncio.ensure_future(
+ self.cluster.delete(op_id, op_params, db_content)
+ )
self.lcm_tasks.register(
- "cluster", cluster_id, order_id, "cluster_delete", task
+ "cluster", cluster_id, op_id, "cluster_delete", task
)
return
elif command == "add" or command == "added":
- add_id = params.get("_id")
- task = asyncio.ensure_future(self.cluster.add(params, order_id))
+ profile_type = params["profile_type"]
+ profile_collection = self.profile_collection_mapping[profile_type]
+ db_profile = self.db.get_one(
+ profile_collection, {"_id": params["profile_id"]}
+ )
+ db_profile["profile_type"] = profile_type
+ db_content["profile"] = db_profile
+ task = asyncio.ensure_future(
+ self.cluster.attach_profile(op_id, op_params, db_content)
+ )
self.lcm_tasks.register(
- "cluster", add_id, order_id, "profile_add", task
+ "cluster", cluster_id, op_id, "profile_add", task
)
return
elif command == "remove" or command == "removed":
- remove_id = params.get("_id")
- task = asyncio.ensure_future(self.cluster.remove(params, order_id))
+ profile_type = params["profile_type"]
+ profile_collection = self.profile_collection_mapping[profile_type]
+ db_profile = self.db.get_one(
+ profile_collection, {"_id": params["profile_id"]}
+ )
+ db_profile["profile_type"] = profile_type
+ db_content["profile"] = db_profile
+ task = asyncio.ensure_future(
+ self.cluster.detach_profile(op_id, op_params, db_content)
+ )
self.lcm_tasks.register(
- "cluster", remove_id, order_id, "profile_remove", task
+ "cluster", cluster_id, op_id, "profile_remove", task
)
return
elif command == "register" or command == "registered":
- cluster_id = params.get("_id")
- task = asyncio.ensure_future(self.cluster.register(params, order_id))
+ task = asyncio.ensure_future(
+ self.cluster.register(op_id, op_params, db_content)
+ )
self.lcm_tasks.register(
- "cluster", cluster_id, order_id, "cluster_register", task
+ "cluster", cluster_id, op_id, "cluster_register", task
)
return
elif command == "deregister" or command == "deregistered":
- cluster_id = params.get("_id")
- task = asyncio.ensure_future(self.cluster.deregister(params, order_id))
+ task = asyncio.ensure_future(
+ self.cluster.deregister(op_id, op_params, db_content)
+ )
self.lcm_tasks.register(
- "cluster", cluster_id, order_id, "cluster_deregister", task
+ "cluster", cluster_id, op_id, "cluster_deregister", task
)
return
elif command == "get_creds":
- task = asyncio.ensure_future(self.cluster.get_creds(params, order_id))
- # self.logger.info("task: {}".format(task))
+ cluster_id = params["_id"]
+ db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
+ task = asyncio.ensure_future(self.cluster.get_creds(db_cluster))
self.lcm_tasks.register(
- "k8sclus", cluster_id, order_id, "k8sclus_get_creds", task
+ "cluster", cluster_id, cluster_id, "cluster_get_credentials", task
)
return
elif command == "upgrade" or command == "scale":
- task = asyncio.ensure_future(self.cluster.update(params, order_id))
- # self.logger.info("task: {}".format(task))
- if command == "upgrade":
- self.lcm_tasks.register(
- "k8sclus", cluster_id, order_id, "k8sclus_upgrade", task
- )
+ # db_vim = self.db.get_one("vim_accounts", {"_id": db_cluster["vim_account"]})
+ db_vim = self.db.get_one(
+ "vim_accounts", {"name": db_cluster["vim_account"]}
+ )
+ db_content["vim_account"] = db_vim
+ task = asyncio.ensure_future(
+ self.cluster.update(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register(
+ "cluster", cluster_id, op_id, "cluster_update", task
+ )
return
elif topic == "k8s_app":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sapp", {"_id": profile_id})
+ db_profile["profile_type"] = "applications"
+ op_params = self.get_operation_params(db_profile, op_id)
if command == "profile_create" or command == "profile_created":
- k8s_app_id = params.get("_id")
- self.logger.debug("k8s_app_id = {}".format(k8s_app_id))
- task = asyncio.ensure_future(self.k8s_app.create(params, order_id))
+ self.logger.debug("Create k8s_app_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_app.create(op_id, op_params, db_profile)
+ )
self.lcm_tasks.register(
- "k8s_app", k8s_app_id, order_id, "k8s_app_create", task
+ "k8s_app", profile_id, op_id, "k8s_app_create", task
)
return
elif command == "delete" or command == "deleted":
- k8s_app_id = params.get("_id")
- task = asyncio.ensure_future(self.k8s_app.delete(params, order_id))
+ self.logger.debug("Delete k8s_app_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_app.delete(op_id, op_params, db_profile)
+ )
self.lcm_tasks.register(
- "k8s_app", k8s_app_id, order_id, "k8s_app_delete", task
+ "k8s_app", profile_id, op_id, "k8s_app_delete", task
)
return
elif topic == "k8s_resource":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sresource", {"_id": profile_id})
+ db_profile["profile_type"] = "managed-resources"
+ op_params = self.get_operation_params(db_profile, op_id)
if command == "profile_create" or command == "profile_created":
- k8s_resource_id = params.get("_id")
- self.logger.debug("k8s_resource_id = {}".format(k8s_resource_id))
- task = asyncio.ensure_future(self.k8s_resource.create(params, order_id))
+ self.logger.debug("Create k8s_resource_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_resource.create(op_id, op_params, db_profile)
+ )
self.lcm_tasks.register(
"k8s_resource",
- k8s_resource_id,
- order_id,
+ profile_id,
+ op_id,
"k8s_resource_create",
task,
)
return
elif command == "delete" or command == "deleted":
- k8s_resource_id = params.get("_id")
- task = asyncio.ensure_future(self.k8s_resource.delete(params, order_id))
+ self.logger.debug("Delete k8s_resource_id = {}".format(profile_id))
+ task = asyncio.ensure_future(
+ self.k8s_resource.delete(op_id, op_params, db_profile)
+ )
self.lcm_tasks.register(
"k8s_resource",
- k8s_resource_id,
- order_id,
+ profile_id,
+ op_id,
"k8s_resource_delete",
task,
)
return
elif topic == "k8s_infra_controller":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
+ db_profile["profile_type"] = "infra-controllers"
+ op_params = self.get_operation_params(db_profile, op_id)
if command == "profile_create" or command == "profile_created":
- k8s_infra_controller_id = params.get("_id")
self.logger.debug(
- "k8s_infra_controller_id = {}".format(k8s_infra_controller_id)
+ "Create k8s_infra_controller_id = {}".format(profile_id)
)
task = asyncio.ensure_future(
- self.k8s_infra_controller.create(params, order_id)
+ self.k8s_infra_controller.create(op_id, op_params, db_profile)
)
self.lcm_tasks.register(
"k8s_infra_controller",
- k8s_infra_controller_id,
- order_id,
+ profile_id,
+ op_id,
"k8s_infra_controller_create",
task,
)
return
elif command == "delete" or command == "deleted":
- k8s_infra_controller_id = params.get("_id")
+ self.logger.debug(
+ "Delete k8s_infra_controller_id = {}".format(profile_id)
+ )
task = asyncio.ensure_future(
- self.k8s_infra_controller.delete(params, order_id)
+ self.k8s_infra_controller.delete(op_id, op_params, db_profile)
)
self.lcm_tasks.register(
"k8s_infra_controller",
- k8s_infra_controller_id,
- order_id,
+ profile_id,
+ op_id,
"k8s_infra_controller_delete",
task,
)
return
elif topic == "k8s_infra_config":
+ op_id = params["operation_id"]
+ profile_id = params["profile_id"]
+ db_profile = self.db.get_one("k8sinfra_config", {"_id": profile_id})
+ db_profile["profile_type"] = "infra-configs"
+ op_params = self.get_operation_params(db_profile, op_id)
if command == "profile_create" or command == "profile_created":
- k8s_infra_config_id = params.get("_id")
- self.logger.debug(
- "k8s_infra_config_id = {}".format(k8s_infra_config_id)
- )
+ self.logger.debug("Create k8s_infra_config_id = {}".format(profile_id))
task = asyncio.ensure_future(
- self.k8s_infra_config.create(params, order_id)
+ self.k8s_infra_config.create(op_id, op_params, db_profile)
)
self.lcm_tasks.register(
"k8s_infra_config",
- k8s_infra_config_id,
- order_id,
+ profile_id,
+ op_id,
"k8s_infra_config_create",
task,
)
return
elif command == "delete" or command == "deleted":
- k8s_infra_config_id = params.get("_id")
+ self.logger.debug("Delete k8s_infra_config_id = {}".format(profile_id))
task = asyncio.ensure_future(
- self.k8s_infra_config.delete(params, order_id)
+ self.k8s_infra_config.delete(op_id, op_params, db_profile)
)
self.lcm_tasks.register(
"k8s_infra_config",
- k8s_infra_config_id,
- order_id,
+ profile_id,
+ op_id,
"k8s_infra_config_delete",
task,
)
return
elif topic == "oka":
- # self.logger.info("Oka Elif")
- oka_id = params["_id"]
- # self.logger.info("Command: {}".format(command))
+ op_id = params["operation_id"]
+ oka_id = params["oka_id"]
+ db_oka = self.db.get_one("okas", {"_id": oka_id})
+ op_params = self.get_operation_params(db_oka, op_id)
if command == "create":
- task = asyncio.ensure_future(self.oka.create(params, order_id))
- # self.logger.info("Task: {}".format(task))
- self.lcm_tasks.register("oka", oka_id, order_id, "oka_create", task)
+ task = asyncio.ensure_future(self.oka.create(op_id, op_params, db_oka))
+ self.lcm_tasks.register("oka", oka_id, op_id, "oka_create", task)
return
elif command == "edit":
- task = asyncio.ensure_future(self.oka.edit(params, order_id))
- # self.logger.info("Task: {}".format(task))
- self.lcm_tasks.register("oka", oka_id, order_id, "oka_edit", task)
+ task = asyncio.ensure_future(self.oka.edit(op_id, op_params, db_oka))
+ self.lcm_tasks.register("oka", oka_id, op_id, "oka_edit", task)
return
elif command == "delete":
- task = asyncio.ensure_future(self.oka.delete(params, order_id))
- # self.logger.info("Task: {}".format(task))
- self.lcm_tasks.register("oka", oka_id, order_id, "oka_delete", task)
+ task = asyncio.ensure_future(self.oka.delete(op_id, op_params, db_oka))
+ self.lcm_tasks.register("oka", oka_id, op_id, "oka_delete", task)
return
elif topic == "ksu":
- # self.logger.info("Ksu Elif")
- ksu_id = params["_id"]
+ op_id = params["operation_id"]
+ op_params = None
+ db_content = None
+ if not (command == "clone" or command == "move"):
+ # op_params is a list
+ # db_content is a list of KSU
+ db_content = []
+ op_params = []
+ for ksu_id in params["ksus_list"]:
+ db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
+ db_content.append(db_ksu)
+ ksu_params = {}
+ if command == "delete":
+ ksu_params["profile"] = {}
+ ksu_params["profile"]["profile_type"] = db_ksu["profile"][
+ "profile_type"
+ ]
+ ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
+ else:
+ ksu_params = self.get_operation_params(db_ksu, op_id)
+ # Update ksu_params["profile"] with profile name and age-pubkey
+ profile_type = ksu_params["profile"]["profile_type"]
+ profile_id = ksu_params["profile"]["_id"]
+ profile_collection = self.profile_collection_mapping[profile_type]
+ db_profile = self.db.get_one(
+ profile_collection, {"_id": profile_id}
+ )
+ ksu_params["profile"]["name"] = db_profile["name"]
+ ksu_params["profile"]["age_pubkey"] = db_profile.get(
+ "age_pubkey", ""
+ )
+ if command == "create" or command == "edit" or command == "edited":
+ # Update ksu_params["oka"] with sw_catalog_path (when missing)
+ for oka in ksu_params["oka"]:
+ if "sw_catalog_path" not in oka:
+ oka_id = oka["_id"]
+ db_oka = self.db.get_one("okas", {"_id": oka_id})
+ oka[
+ "sw_catalog_path"
+ ] = f"infra-controllers/{db_oka['git_name']}"
+ op_params.append(ksu_params)
+ else:
+ # db_content and op_params are single items
+ db_content = self.db.get_one("ksus", {"_id": params["_id"]})
+ db_content = db_ksu
+ op_params = self.get_operation_params(db_ksu, op_id)
if command == "create":
- task = asyncio.ensure_future(self.ksu.create(params, order_id))
- # self.logger.info("task: {}".format(task))
- self.lcm_tasks.register("ksu", ksu_id, order_id, "ksu_create", task)
+ task = asyncio.ensure_future(
+ self.ksu.create(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_create", task)
return
elif command == "edit" or command == "edited":
- task = asyncio.ensure_future(self.ksu.edit(params, order_id))
- # self.logger.info("Task: {}".format(task))
- self.lcm_tasks.register("ksu", ksu_id, order_id, "ksu_edit", task)
+ task = asyncio.ensure_future(
+ self.ksu.edit(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_edit", task)
return
elif command == "delete":
- task = asyncio.ensure_future(self.ksu.delete(params, order_id))
- # self.logger.info("Task: {}".format(task))
- self.lcm_tasks.register("ksu", ksu_id, order_id, "ksu_delete", task)
+ task = asyncio.ensure_future(
+ self.ksu.delete(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_delete", task)
return
elif command == "clone":
- # self.logger.info("KSU clone")
- task = asyncio.ensure_future(self.ksu.edit(params, order_id))
- # self.logger.info("Task: {}".format(task))
- self.lcm_tasks.register("ksu", ksu_id, order_id, "ksu_clone", task)
+ task = asyncio.ensure_future(
+ self.ksu.clone(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_clone", task)
return
elif command == "move":
- # self.logger.info("KSU move")
- task = asyncio.ensure_future(self.ksu.edit(params, order_id))
- # self.logger.info("Task: {}".format(task))
- self.lcm_tasks.register("ksu", ksu_id, order_id, "ksu_move", task)
+ task = asyncio.ensure_future(
+ self.ksu.move(op_id, op_params, db_content)
+ )
+ self.lcm_tasks.register("ksu", ksu_id, op_id, "ksu_move", task)
return
self.logger.critical("unknown topic {} and command '{}'".format(topic, command))
self.consecutive_errors, self.first_start
)
)
- # self.logger.info(
- # "Consecutive errors: {} first start: {}".format(
- # self.consecutive_errors, self.first_start
- # )
- # )
topics_admin = ("admin",)
await asyncio.gather(
self.msg.aioread(
op_id = content["current_operation"]
self.logger.info("OP_id: {}".format(op_id))
length = 0
- for op in content["operationHistory"]:
- self.logger.info("Operations: {}".format(op))
- if op["op_id"] == op_id:
+ for operation in content["operationHistory"]:
+ self.logger.info("Operations: {}".format(operation))
+ if operation["op_id"] == op_id:
self.logger.info("Length: {}".format(length))
now = time()
if workflow_status:
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+from pyrage import x25519
+import yaml
+import base64
+
+
+def gather_age_key(cluster):
+ pubkey = cluster.get("age_pubkey")
+ privkey = cluster.get("age_privkey")
+ # return both public and private key
+ return pubkey, privkey
+
+
+def generate_age_key():
+ ident = x25519.Identity.generate()
+ # gets the public key
+ pubkey = ident.to_public()
+ # gets the private key
+ privkey = str(ident)
+ # return both public and private key
+ return pubkey, privkey
+
+
+async def create_cluster(self, op_id, op_params, content, bootstrap_only=False):
+ self.logger.info("Create cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+ db_vim_account = content["vim_account"]
+
+ # workflow_template = "launcher-create-aks-cluster-and-bootstrap.j2"
+ workflow_template = "launcher-create-crossplane-cluster-and-bootstrap.j2"
+ workflow_name = f"create-cluster-{db_cluster['_id']}"
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Generate age key
+ # public_key_new_cluster, private_key_new_cluster = generate_age_key()
+ # Get age key
+ public_key_new_cluster, private_key_new_cluster = gather_age_key(db_cluster)
+ self.logger.debug(f"public_key_new_cluster={public_key_new_cluster}")
+ self.logger.debug(f"private_key_new_cluster={private_key_new_cluster}")
+
+ # Test kubectl connection
+ self.logger.debug(self._kubectl._get_kubectl_version())
+
+ # Create secret with agekey
+ secret_name = f"secret-age-{cluster_name}"
+ secret_namespace = "osm-workflows"
+ secret_key = "agekey"
+ secret_value = private_key_new_cluster
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ # Additional params for the workflow
+ cluster_kustomization_name = cluster_name
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ if bootstrap_only:
+ cluster_type = ""
+ providerconfig_name = ""
+ else:
+ vim_account_id = db_cluster["vim_account"]
+ providerconfig_name = f"{vim_account_id}-config"
+ vim_type = db_vim_account["vim_type"]
+ if vim_type == "azure":
+ cluster_type = "aks"
+ elif vim_type == "aws":
+ cluster_type = "eks"
+ elif vim_type == "gcp":
+ cluster_type = "gke"
+ else:
+ raise Exception("Not suitable VIM account to register cluster")
+
+ # Render workflow
+ # workflow_kwargs = {
+ # "git_fleet_url": f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ # "git_sw_catalogs_url": f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ # }
+ # manifest = self.render_jinja_template(
+ # workflow_template,
+ # output_file=None,
+ # **workflow_kwargs
+ # )
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ cluster_name=cluster_name,
+ cluster_type=cluster_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ providerconfig_name=providerconfig_name,
+ public_key_mgmt=self._pubkey,
+ public_key_new_cluster=public_key_new_cluster,
+ secret_name_private_key_new_cluster=secret_name,
+ vm_size=db_cluster["node_size"],
+ node_count=db_cluster["node_count"],
+ k8s_version=db_cluster["k8s_version"],
+ cluster_location=db_cluster["region_name"],
+ osm_project_name=osm_project_name,
+ rg_name=db_cluster["resource_group"],
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+ # self.logger.info(f"Deleting secret {secret_name} in namespace {secret_namespace} ...")
+ # self._kubectl.delete_secret(name=secret_name, namespace=secret_namespace)
+ # self.logger.info("DONE")
+
+ # self.logger.info(f"Listing secrets in namespace {secret_namespace} ...")
+ # secret_list = self._kubectl.get_secrets(secret_namespace)
+ # # print(secret_list)
+ # for item in secret_list:
+ # print(item.metadata.name)
+ # self.logger.info("DONE")
+
+ # self.logger.info(f"Deleting secrets in namespace {secret_namespace} ...")
+ # for item in secret_list:
+ # print(f"Deleting {item.metadata.name} ...")
+ # self._kubectl.delete_secret(
+ # name=item.metadata.name,
+ # namespace=secret_namespace,
+ # )
+ # self.logger.info("DELETED")
+ # self.logger.info("DONE")
+
+
+async def update_cluster(self, op_id, op_params, content):
+ self.logger.info("Update cluster eks workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+ db_vim_account = content["vim_account"]
+
+ workflow_template = "launcher-update-crossplane-cluster.j2"
+ workflow_name = f"delete-cluster-{db_cluster['_id']}"
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Get age key
+ public_key_cluster, private_key_cluster = gather_age_key(db_cluster)
+ self.logger.debug(f"public_key_new_cluster={public_key_cluster}")
+ self.logger.debug(f"private_key_new_cluster={private_key_cluster}")
+
+ # Create secret with agekey
+ secret_name = f"secret-age-{cluster_name}"
+ secret_namespace = "osm-workflows"
+ secret_key = "agekey"
+ secret_value = private_key_cluster
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ # Additional params for the workflow
+ cluster_kustomization_name = cluster_name
+ osm_project_name = "osm_admin" # TODO: get project name from db_cluster
+ vim_account_id = db_cluster["vim_account"]
+ providerconfig_name = f"{vim_account_id}-config"
+ vim_type = db_vim_account["vim_type"]
+ if vim_type == "azure":
+ cluster_type = "aks"
+ elif vim_type == "aws":
+ cluster_type = "eks"
+ elif vim_type == "gcp":
+ cluster_type = "gke"
+ else:
+ raise Exception("Not suitable VIM account to update cluster")
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ cluster_name=cluster_name,
+ cluster_type=cluster_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ providerconfig_name=providerconfig_name,
+ public_key_mgmt=self._pubkey,
+ public_key_new_cluster=public_key_cluster,
+ secret_name_private_key_new_cluster=secret_name,
+ vm_size=db_cluster["node_size"],
+ node_count=db_cluster["node_count"],
+ k8s_version=db_cluster["k8s_version"],
+ cluster_location=db_cluster["region_name"],
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_cluster(self, op_id, op_params, content):
+ self.logger.info("Delete cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+
+ workflow_template = "launcher-delete-cluster.j2"
+ workflow_name = f"delete-cluster-{db_cluster['_id']}"
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Additional params for the workflow
+ cluster_kustomization_name = cluster_name
+ osm_project_name = "osm_admin" # TODO: get project name from DB
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ cluster_name=cluster_name,
+ cluster_kustomization_name=cluster_kustomization_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def register_cluster(self, op_id, op_params, content):
+ self.logger.info("Register cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ db_cluster = content["cluster"]
+ # cluster_name = db_cluster["name"].lower()
+ cluster_name = db_cluster["git_name"].lower()
+
+ # Create secret with kubeconfig
+ secret_name = f"kubeconfig-{cluster_name}"
+ secret_namespace = "managed-resources"
+ secret_key = "kubeconfig"
+ secret_value = yaml.safe_dump(
+ db_cluster["credentials"], indent=4, default_flow_style=False, sort_keys=False
+ )
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ workflow_name = await self.create_cluster(op_id, op_params, content, True)
+ return workflow_name
+
+
+async def deregister_cluster(self, op_id, op_params, content):
+ self.logger.info("Deregister cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ workflow_name = await self.delete_cluster(op_id, op_params, content)
+ return workflow_name
+
+
+async def get_cluster_credentials(self, db_cluster):
+ """
+ returns the kubeconfig file of a K8s cluster in a dictionary
+ """
+ self.logger.info("Get cluster credentials Enter")
+ self.logger.info(f"Content: {db_cluster}")
+
+ secret_name = f"kubeconfig-{db_cluster['git_name'].lower()}"
+ secret_namespace = "managed-resources"
+ secret_key = "kubeconfig"
+
+ self.logger.info(f"Checking content of secret {secret_name} ...")
+ try:
+ returned_secret_data = await self._kubectl.get_secret_content(
+ name=secret_name,
+ namespace=secret_namespace,
+ )
+ returned_secret_value = base64.b64decode(
+ returned_secret_data[secret_key]
+ ).decode("utf-8")
+ return True, yaml.safe_load(returned_secret_value)
+ except Exception as e:
+ message = f"Not possible to get the credentials of the cluster. Exception: {e}"
+ self.logger.critical(message)
+ return False, message
+
+
+async def check_create_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_register_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_deregister_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import base64
+
+
+async def create_secret(self, secret_name, secret_namespace, secret_key, secret_value):
+ async def check_secret(secret_name, secret_namespace, secret_key, secret_value):
+ self.logger.info(f"Checking content of secret {secret_name} ...")
+ returned_secret_data = await self._kubectl.get_secret_content(
+ name=secret_name,
+ namespace=secret_namespace,
+ )
+ self.logger.debug(f"Result from async call: { returned_secret_data }")
+
+ self.logger.debug("Comparing secret values")
+ returned_secret_value = base64.b64decode(
+ returned_secret_data[secret_key]
+ ).decode("utf-8")
+ self.logger.debug(f"secret_data_original={secret_value}")
+ self.logger.debug(f"secret_data_received={returned_secret_value}")
+ self.logger.info(
+ f"Result of secret comparison: {secret_value==returned_secret_value} ..."
+ )
+
+ self.logger.info(
+ f"Creating secret {secret_name} in namespace {secret_namespace} ..."
+ )
+ secret_data = {secret_key: base64.b64encode(secret_value.encode()).decode("utf-8")}
+ self.logger.info(f"Secret name: {secret_name}")
+ self.logger.info(f"Secret data {secret_data}")
+ self.logger.info(f"Namespace: {secret_namespace}")
+ self.logger.info("Calling N2VC kubectl to create secret...")
+ await self._kubectl.create_secret(
+ name=secret_name,
+ data=secret_data,
+ namespace=secret_namespace,
+ secret_type="Opaque",
+ )
+ self.logger.info(f"Secret {secret_name} CREATED")
+
+ await check_secret(secret_name, secret_namespace, secret_key, secret_value)
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+
+
+async def create_ksus(self, op_id, op_params_list, content_list):
+ self.logger.info("Create KSU workflow Enter")
+ self.logger.info(
+ f"Operation {op_id}. Params: {op_params_list}. Content: {content_list}"
+ )
+
+ if len(content_list) > 1:
+ raise Exception("There is no ODU workflow yet able to manage multiple KSUs")
+ db_ksu = content_list[0]
+ ksu_params = op_params_list[0]
+ oka_list = ksu_params["oka"]
+ if len(oka_list) > 1:
+ raise Exception(
+ "There is no ODU workflow yet able to manage multiple OKAs for a KSU"
+ )
+ oka_path = oka_list[0]["sw_catalog_path"]
+
+ workflow_template = "launcher-create-ksu-hr.j2"
+ workflow_name = f"create-ksus-{op_id}"
+ ksu_name = db_ksu["git_name"].lower()
+
+ # Additional params for the workflow
+ osm_project_name = "osm_admin" # TODO: get project name from db_ksu
+ kustomization_name = ksu_name
+ helmrelease_name = ksu_name
+ target_ns = ksu_params.get("namespace")
+ profile_type = ksu_params.get("profile", {}).get("profile_type")
+ profile_name = ksu_params.get("profile", {}).get("name")
+ age_public_key = ksu_params.get("profile", {}).get("age_pubkey")
+ substitute_environment = ksu_params.get("substitute_environment", "false")
+ substitution_filter = ksu_params.get("substitution_filter", "")
+ custom_env_vars = ksu_params.get("custom_env_vars", "")
+ if custom_env_vars:
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 12 + f"{k}={v}" for k, v in custom_env_vars.items()]
+ )
+ inline_values = ksu_params.get("inline_values", "")
+ if inline_values:
+ yaml_string = yaml.safe_dump(
+ inline_values, sort_keys=False, default_flow_style=False
+ )
+ inline_values = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_cm = "false"
+ values_configmap_name = f"cm-{ksu_name}"
+ cm_values = ksu_params.get("configmap_values", "")
+ if cm_values:
+ yaml_string = yaml.safe_dump(
+ cm_values, sort_keys=False, default_flow_style=False
+ )
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_secret = "false"
+ secret_values = ksu_params.get("secret_values", "")
+ if secret_values:
+ values_secret_name = f"secret-{ksu_name}"
+ reference_secret_for_values = f"ref-secret-{ksu_name}"
+ reference_key_for_values = f"ref-key-{ksu_name}"
+ secret_values = yaml.safe_dump(
+ secret_values, sort_keys=False, default_flow_style=False
+ )
+ else:
+ values_secret_name = ""
+ reference_secret_for_values = ""
+ reference_key_for_values = ""
+ sync = "true"
+
+ if secret_values:
+ secret_namespace = "osm-workflows"
+ # Create secret
+ await self.create_secret(
+ reference_secret_for_values,
+ secret_namespace,
+ reference_key_for_values,
+ secret_values,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ templates_path=oka_path,
+ substitute_environment=substitute_environment,
+ substitution_filter=substitution_filter,
+ custom_env_vars=custom_env_vars,
+ kustomization_name=kustomization_name,
+ helmrelease_name=helmrelease_name,
+ inline_values=inline_values,
+ is_preexisting_secret=is_preexisting_secret,
+ target_ns=target_ns,
+ age_public_key=age_public_key,
+ values_secret_name=values_secret_name,
+ reference_secret_for_values=reference_secret_for_values,
+ reference_key_for_values=reference_key_for_values,
+ is_preexisting_cm=is_preexisting_cm,
+ values_configmap_name=values_configmap_name,
+ cm_values=cm_values,
+ ksu_name=ksu_name,
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ sync=sync,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def update_ksus(self, op_id, op_params_list, content_list):
+ self.logger.info("Update KSU workflow Enter")
+ self.logger.info(
+ f"Operation {op_id}. Params: {op_params_list}. Content: {content_list}"
+ )
+
+ if len(content_list) > 1:
+ raise Exception("There is no ODU workflow yet able to manage multiple KSUs")
+ db_ksu = content_list[0]
+ ksu_params = op_params_list[0]
+ oka_list = ksu_params["oka"]
+ if len(oka_list) > 1:
+ raise Exception(
+ "There is no ODU workflow yet able to manage multiple OKAs for a KSU"
+ )
+ oka_path = oka_list[0]["sw_catalog_path"]
+
+ workflow_template = "launcher-update-ksu-hr.j2"
+ workflow_name = f"update-ksus-{op_id}"
+ ksu_name = db_ksu["git_name"].lower()
+
+ # Additional params for the workflow
+ osm_project_name = "osm_admin" # TODO: get project name from db_ksu
+ kustomization_name = ksu_name
+ helmrelease_name = ksu_name
+ target_ns = ksu_params.get("namespace")
+ profile_type = ksu_params.get("profile", {}).get("profile_type")
+ profile_name = ksu_params.get("profile", {}).get("name")
+ age_public_key = ksu_params.get("profile", {}).get("age_pubkey")
+ substitute_environment = ksu_params.get("substitute_environment", "false")
+ substitution_filter = ksu_params.get("substitution_filter", "")
+ custom_env_vars = ksu_params.get("custom_env_vars", "")
+ if custom_env_vars:
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 12 + f"{k}={v}" for k, v in custom_env_vars.items()]
+ )
+ inline_values = ksu_params.get("inline_values", "")
+ if inline_values:
+ yaml_string = yaml.safe_dump(
+ inline_values, sort_keys=False, default_flow_style=False
+ )
+ inline_values = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_cm = "false"
+ values_configmap_name = f"cm-{ksu_name}"
+ cm_values = ksu_params.get("configmap_values", "")
+ if cm_values:
+ yaml_string = yaml.safe_dump(
+ cm_values, sort_keys=False, default_flow_style=False
+ )
+ custom_env_vars = "|\n" + "\n".join(
+ [" " * 8 + line for line in yaml_string.splitlines()]
+ )
+ is_preexisting_secret = "false"
+ secret_values = ksu_params.get("secret_values", "")
+ if secret_values:
+ values_secret_name = f"secret-{ksu_name}"
+ reference_secret_for_values = f"ref-secret-{ksu_name}"
+ reference_key_for_values = f"ref-key-{ksu_name}"
+ secret_values = yaml.safe_dump(
+ secret_values, sort_keys=False, default_flow_style=False
+ )
+ else:
+ values_secret_name = ""
+ reference_secret_for_values = ""
+ reference_key_for_values = ""
+
+ if secret_values:
+ secret_namespace = "osm-workflows"
+ # Create secret
+ await self.create_secret(
+ reference_secret_for_values,
+ secret_namespace,
+ reference_key_for_values,
+ secret_values,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ templates_path=oka_path,
+ substitute_environment=substitute_environment,
+ substitution_filter=substitution_filter,
+ custom_env_vars=custom_env_vars,
+ kustomization_name=kustomization_name,
+ helmrelease_name=helmrelease_name,
+ inline_values=inline_values,
+ is_preexisting_secret=is_preexisting_secret,
+ target_ns=target_ns,
+ age_public_key=age_public_key,
+ values_secret_name=values_secret_name,
+ reference_secret_for_values=reference_secret_for_values,
+ reference_key_for_values=reference_key_for_values,
+ is_preexisting_cm=is_preexisting_cm,
+ values_configmap_name=values_configmap_name,
+ cm_values=cm_values,
+ ksu_name=ksu_name,
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_ksus(self, op_id, op_params_list, content_list):
+ self.logger.info("Delete KSU workflow Enter")
+ self.logger.info(
+ f"Operation {op_id}. Params: {op_params_list}. Content: {content_list}"
+ )
+
+ if len(content_list) > 1:
+ raise Exception("There is no ODU workflow yet able to manage multiple KSUs")
+ db_ksu = content_list[0]
+ ksu_params = op_params_list[0]
+
+ workflow_template = "launcher-delete-ksu.j2"
+ workflow_name = f"delete-ksus-{op_id}"
+ ksu_name = db_ksu["git_name"].lower()
+
+ # Additional params for the workflow
+ osm_project_name = "osm_admin" # TODO: get project name from db_ksu
+ profile_name = ksu_params.get("profile", {}).get("name")
+ profile_type = ksu_params.get("profile", {}).get("profile_type")
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ ksu_name=ksu_name,
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def clone_ksu(self, op_id, op_params, content):
+ self.logger.info("Clone KSU workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ workflow_name = f"clone-ksu-{content['_id']}"
+ return workflow_name
+
+
+async def move_ksu(self, op_id, op_params, content):
+ self.logger.info("Move KSU workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ workflow_name = f"move-ksu-{content['_id']}"
+ return workflow_name
+
+
+async def check_create_ksus(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_ksus(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_ksus(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_clone_ksu(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_move_ksu(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright 2020 Canonical Ltd.
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+import base64
+import logging
+from typing import Dict
+import typing
+import uuid
+import json
+
+from distutils.version import LooseVersion
+
+from kubernetes import client, config
+from kubernetes.client.api import VersionApi
+from kubernetes.client.models import (
+ V1ClusterRole,
+ V1Role,
+ V1ObjectMeta,
+ V1PolicyRule,
+ V1ServiceAccount,
+ V1ClusterRoleBinding,
+ V1RoleBinding,
+ V1RoleRef,
+ RbacV1Subject,
+ V1Secret,
+ V1SecretReference,
+ V1Namespace,
+)
+from kubernetes.client.rest import ApiException
+from n2vc.libjuju import retry_callback
+from retrying_async import retry
+
+
+SERVICE_ACCOUNT_TOKEN_KEY = "token"
+SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt"
+# clients
+CORE_CLIENT = "core_v1"
+RBAC_CLIENT = "rbac_v1"
+STORAGE_CLIENT = "storage_v1"
+CUSTOM_OBJECT_CLIENT = "custom_object"
+
+
+class Kubectl:
+ def __init__(self, config_file=None):
+ config.load_kube_config(config_file=config_file)
+ self._clients = {
+ CORE_CLIENT: client.CoreV1Api(),
+ RBAC_CLIENT: client.RbacAuthorizationV1Api(),
+ STORAGE_CLIENT: client.StorageV1Api(),
+ CUSTOM_OBJECT_CLIENT: client.CustomObjectsApi(),
+ }
+ self._configuration = config.kube_config.Configuration.get_default_copy()
+ self.logger = logging.getLogger("lcm.odu")
+
+ @property
+ def configuration(self):
+ return self._configuration
+
+ @property
+ def clients(self):
+ return self._clients
+
+ def get_services(
+ self,
+ field_selector: str = None,
+ label_selector: str = None,
+ ) -> typing.List[typing.Dict]:
+ """
+ Get Service list from a namespace
+
+ :param: field_selector: Kubernetes field selector for the namespace
+ :param: label_selector: Kubernetes label selector for the namespace
+
+ :return: List of the services matching the selectors specified
+ """
+ kwargs = {}
+ if field_selector:
+ kwargs["field_selector"] = field_selector
+ if label_selector:
+ kwargs["label_selector"] = label_selector
+ try:
+ result = self.clients[CORE_CLIENT].list_service_for_all_namespaces(**kwargs)
+ return [
+ {
+ "name": i.metadata.name,
+ "cluster_ip": i.spec.cluster_ip,
+ "type": i.spec.type,
+ "ports": (
+ [
+ {
+ "name": p.name,
+ "node_port": p.node_port,
+ "port": p.port,
+ "protocol": p.protocol,
+ "target_port": p.target_port,
+ }
+ for p in i.spec.ports
+ ]
+ if i.spec.ports
+ else []
+ ),
+ "external_ip": [i.ip for i in i.status.load_balancer.ingress]
+ if i.status.load_balancer.ingress
+ else None,
+ }
+ for i in result.items
+ ]
+ except ApiException as e:
+ self.logger.error("Error calling get services: {}".format(e))
+ raise e
+
+ def get_default_storage_class(self) -> str:
+ """
+ Default storage class
+
+ :return: Returns the default storage class name, if exists.
+ If not, it returns the first storage class.
+ If there are not storage classes, returns None
+ """
+ storage_classes = self.clients[STORAGE_CLIENT].list_storage_class()
+ selected_sc = None
+ default_sc_annotations = {
+ "storageclass.kubernetes.io/is-default-class": "true",
+ # Older clusters still use the beta annotation.
+ "storageclass.beta.kubernetes.io/is-default-class": "true",
+ }
+ for sc in storage_classes.items:
+ if not selected_sc:
+ # Select the first storage class in case there is no a default-class
+ selected_sc = sc.metadata.name
+ annotations = sc.metadata.annotations or {}
+ if any(
+ k in annotations and annotations[k] == v
+ for k, v in default_sc_annotations.items()
+ ):
+ # Default storage
+ selected_sc = sc.metadata.name
+ break
+ return selected_sc
+
+ def create_cluster_role(
+ self,
+ name: str,
+ labels: Dict[str, str],
+ namespace: str = "kube-system",
+ ):
+ """
+ Create a cluster role
+
+ :param: name: Name of the cluster role
+ :param: labels: Labels for cluster role metadata
+ :param: namespace: Kubernetes namespace for cluster role metadata
+ Default: kube-system
+ """
+ cluster_roles = self.clients[RBAC_CLIENT].list_cluster_role(
+ field_selector="metadata.name={}".format(name)
+ )
+
+ if len(cluster_roles.items) > 0:
+ raise Exception("Role with metadata.name={} already exists".format(name))
+
+ metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+ # Cluster role
+ cluster_role = V1ClusterRole(
+ metadata=metadata,
+ rules=[
+ V1PolicyRule(api_groups=["*"], resources=["*"], verbs=["*"]),
+ V1PolicyRule(non_resource_ur_ls=["*"], verbs=["*"]),
+ ],
+ )
+
+ self.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
+
+ async def create_role(
+ self,
+ name: str,
+ labels: Dict[str, str],
+ api_groups: list,
+ resources: list,
+ verbs: list,
+ namespace: str,
+ ):
+ """
+ Create a role with one PolicyRule
+
+ :param: name: Name of the namespaced Role
+ :param: labels: Labels for namespaced Role metadata
+ :param: api_groups: List with api-groups allowed in the policy rule
+ :param: resources: List with resources allowed in the policy rule
+ :param: verbs: List with verbs allowed in the policy rule
+ :param: namespace: Kubernetes namespace for Role metadata
+
+ :return: None
+ """
+
+ roles = self.clients[RBAC_CLIENT].list_namespaced_role(
+ namespace, field_selector="metadata.name={}".format(name)
+ )
+
+ if len(roles.items) > 0:
+ raise Exception("Role with metadata.name={} already exists".format(name))
+
+ metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+
+ role = V1Role(
+ metadata=metadata,
+ rules=[
+ V1PolicyRule(api_groups=api_groups, resources=resources, verbs=verbs),
+ ],
+ )
+
+ self.clients[RBAC_CLIENT].create_namespaced_role(namespace, role)
+
+ def delete_cluster_role(self, name: str):
+ """
+ Delete a cluster role
+
+ :param: name: Name of the cluster role
+ """
+ self.clients[RBAC_CLIENT].delete_cluster_role(name)
+
+ def _get_kubectl_version(self):
+ version = VersionApi().get_code()
+ return "{}.{}".format(version.major, version.minor)
+
+ def _need_to_create_new_secret(self):
+ min_k8s_version = "1.24"
+ current_k8s_version = self._get_kubectl_version()
+ return LooseVersion(min_k8s_version) <= LooseVersion(current_k8s_version)
+
+ def _get_secret_name(self, service_account_name: str):
+ random_alphanum = str(uuid.uuid4())[:5]
+ return "{}-token-{}".format(service_account_name, random_alphanum)
+
+ def _create_service_account_secret(
+ self,
+ service_account_name: str,
+ namespace: str,
+ secret_name: str,
+ ):
+ """
+ Create a secret for the service account. K8s version >= 1.24
+
+ :param: service_account_name: Name of the service account
+ :param: namespace: Kubernetes namespace for service account metadata
+ :param: secret_name: Name of the secret
+ """
+ v1_core = self.clients[CORE_CLIENT]
+ secrets = v1_core.list_namespaced_secret(
+ namespace, field_selector="metadata.name={}".format(secret_name)
+ ).items
+
+ if len(secrets) > 0:
+ raise Exception(
+ "Secret with metadata.name={} already exists".format(secret_name)
+ )
+
+ annotations = {"kubernetes.io/service-account.name": service_account_name}
+ metadata = V1ObjectMeta(
+ name=secret_name, namespace=namespace, annotations=annotations
+ )
+ type = "kubernetes.io/service-account-token"
+ secret = V1Secret(metadata=metadata, type=type)
+ v1_core.create_namespaced_secret(namespace, secret)
+
+ def _get_secret_reference_list(self, namespace: str, secret_name: str):
+ """
+ Return a secret reference list with one secret.
+ K8s version >= 1.24
+
+ :param: namespace: Kubernetes namespace for service account metadata
+ :param: secret_name: Name of the secret
+ :rtype: list[V1SecretReference]
+ """
+ return [V1SecretReference(name=secret_name, namespace=namespace)]
+
+ def create_service_account(
+ self,
+ name: str,
+ labels: Dict[str, str],
+ namespace: str = "kube-system",
+ ):
+ """
+ Create a service account
+
+ :param: name: Name of the service account
+ :param: labels: Labels for service account metadata
+ :param: namespace: Kubernetes namespace for service account metadata
+ Default: kube-system
+ """
+ v1_core = self.clients[CORE_CLIENT]
+ service_accounts = v1_core.list_namespaced_service_account(
+ namespace, field_selector="metadata.name={}".format(name)
+ )
+ if len(service_accounts.items) > 0:
+ raise Exception(
+ "Service account with metadata.name={} already exists".format(name)
+ )
+
+ metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+
+ if self._need_to_create_new_secret():
+ secret_name = self._get_secret_name(name)
+ secrets = self._get_secret_reference_list(namespace, secret_name)
+ service_account = V1ServiceAccount(metadata=metadata, secrets=secrets)
+ v1_core.create_namespaced_service_account(namespace, service_account)
+ self._create_service_account_secret(name, namespace, secret_name)
+ else:
+ service_account = V1ServiceAccount(metadata=metadata)
+ v1_core.create_namespaced_service_account(namespace, service_account)
+
+ def delete_secret(self, name: str, namespace: str = "kube-system"):
+ """
+ Delete a secret
+
+ :param: name: Name of the secret
+ :param: namespace: Kubernetes namespace
+ Default: kube-system
+ """
+ self.clients[CORE_CLIENT].delete_namespaced_secret(name, namespace)
+
+ def delete_service_account(self, name: str, namespace: str = "kube-system"):
+ """
+ Delete a service account
+
+ :param: name: Name of the service account
+ :param: namespace: Kubernetes namespace for service account metadata
+ Default: kube-system
+ """
+ self.clients[CORE_CLIENT].delete_namespaced_service_account(name, namespace)
+
+ def create_cluster_role_binding(
+ self, name: str, labels: Dict[str, str], namespace: str = "kube-system"
+ ):
+ """
+ Create a cluster role binding
+
+ :param: name: Name of the cluster role
+ :param: labels: Labels for cluster role binding metadata
+ :param: namespace: Kubernetes namespace for cluster role binding metadata
+ Default: kube-system
+ """
+ role_bindings = self.clients[RBAC_CLIENT].list_cluster_role_binding(
+ field_selector="metadata.name={}".format(name)
+ )
+ if len(role_bindings.items) > 0:
+ raise Exception("Generated rbac id already exists")
+
+ role_binding = V1ClusterRoleBinding(
+ metadata=V1ObjectMeta(name=name, labels=labels),
+ role_ref=V1RoleRef(kind="ClusterRole", name=name, api_group=""),
+ subjects=[
+ RbacV1Subject(kind="ServiceAccount", name=name, namespace=namespace)
+ ],
+ )
+ self.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
+
+ async def create_role_binding(
+ self,
+ name: str,
+ role_name: str,
+ sa_name: str,
+ labels: Dict[str, str],
+ namespace: str,
+ ):
+ """
+ Create a cluster role binding
+
+ :param: name: Name of the namespaced Role Binding
+ :param: role_name: Name of the namespaced Role to be bound
+ :param: sa_name: Name of the Service Account to be bound
+ :param: labels: Labels for Role Binding metadata
+ :param: namespace: Kubernetes namespace for Role Binding metadata
+
+ :return: None
+ """
+ role_bindings = self.clients[RBAC_CLIENT].list_namespaced_role_binding(
+ namespace, field_selector="metadata.name={}".format(name)
+ )
+ if len(role_bindings.items) > 0:
+ raise Exception(
+ "Role Binding with metadata.name={} already exists".format(name)
+ )
+
+ role_binding = V1RoleBinding(
+ metadata=V1ObjectMeta(name=name, labels=labels),
+ role_ref=V1RoleRef(kind="Role", name=role_name, api_group=""),
+ subjects=[
+ RbacV1Subject(kind="ServiceAccount", name=sa_name, namespace=namespace)
+ ],
+ )
+ self.clients[RBAC_CLIENT].create_namespaced_role_binding(
+ namespace, role_binding
+ )
+
+ def delete_cluster_role_binding(self, name: str):
+ """
+ Delete a cluster role binding
+
+ :param: name: Name of the cluster role binding
+ """
+ self.clients[RBAC_CLIENT].delete_cluster_role_binding(name)
+
+ @retry(
+ attempts=10,
+ delay=1,
+ fallback=Exception("Failed getting the secret from service account"),
+ callback=retry_callback,
+ )
+ async def get_secret_data(
+ self, name: str, namespace: str = "kube-system"
+ ) -> (str, str):
+ """
+ Get secret data
+
+ :param: name: Name of the secret data
+ :param: namespace: Name of the namespace where the secret is stored
+
+ :return: Tuple with the token and client certificate
+ """
+ v1_core = self.clients[CORE_CLIENT]
+
+ secret_name = None
+
+ service_accounts = v1_core.list_namespaced_service_account(
+ namespace, field_selector="metadata.name={}".format(name)
+ )
+ if len(service_accounts.items) == 0:
+ raise Exception(
+ "Service account not found with metadata.name={}".format(name)
+ )
+ service_account = service_accounts.items[0]
+ if service_account.secrets and len(service_account.secrets) > 0:
+ secret_name = service_account.secrets[0].name
+ if not secret_name:
+ raise Exception(
+ "Failed getting the secret from service account {}".format(name)
+ )
+ # TODO: refactor to use get_secret_content
+ secret = v1_core.list_namespaced_secret(
+ namespace, field_selector="metadata.name={}".format(secret_name)
+ ).items[0]
+
+ token = secret.data[SERVICE_ACCOUNT_TOKEN_KEY]
+ client_certificate_data = secret.data[SERVICE_ACCOUNT_ROOT_CA_KEY]
+
+ return (
+ base64.b64decode(token).decode("utf-8"),
+ base64.b64decode(client_certificate_data).decode("utf-8"),
+ )
+
+ @retry(
+ attempts=10,
+ delay=1,
+ fallback=Exception("Failed getting data from the secret"),
+ )
+ async def get_secret_content(
+ self,
+ name: str,
+ namespace: str,
+ ) -> dict:
+ """
+ Get secret data
+
+ :param: name: Name of the secret
+ :param: namespace: Name of the namespace where the secret is stored
+
+ :return: Dictionary with secret's data
+ """
+ v1_core = self.clients[CORE_CLIENT]
+
+ secret = v1_core.read_namespaced_secret(name, namespace)
+
+ return secret.data
+
+ @retry(
+ attempts=10,
+ delay=1,
+ fallback=Exception("Failed creating the secret"),
+ )
+ async def create_secret(
+ self, name: str, data: dict, namespace: str, secret_type: str
+ ):
+ """
+ Create secret with data
+
+ :param: name: Name of the secret
+ :param: data: Dict with data content. Values must be already base64 encoded
+ :param: namespace: Name of the namespace where the secret will be stored
+ :param: secret_type: Type of the secret, e.g., Opaque, kubernetes.io/service-account-token, kubernetes.io/tls
+
+ :return: None
+ """
+ self.logger.info("Enter create_secret function")
+ v1_core = self.clients[CORE_CLIENT]
+ self.logger.info(f"v1_core: {v1_core}")
+ metadata = V1ObjectMeta(name=name, namespace=namespace)
+ self.logger.info(f"metadata: {metadata}")
+ secret = V1Secret(metadata=metadata, data=data, type=secret_type)
+ self.logger.info(f"secret: {secret}")
+ v1_core.create_namespaced_secret(namespace, secret)
+ self.logger.info("Namespaced secret was created")
+
+ async def create_certificate(
+ self,
+ namespace: str,
+ name: str,
+ dns_prefix: str,
+ secret_name: str,
+ usages: list,
+ issuer_name: str,
+ ):
+ """
+ Creates cert-manager certificate object
+
+ :param: namespace: Name of the namespace where the certificate and secret is stored
+ :param: name: Name of the certificate object
+ :param: dns_prefix: Prefix for the dnsNames. They will be prefixed to the common k8s svc suffixes
+ :param: secret_name: Name of the secret created by cert-manager
+ :param: usages: List of X.509 key usages
+ :param: issuer_name: Name of the cert-manager's Issuer or ClusterIssuer object
+
+ """
+ certificate_body = {
+ "apiVersion": "cert-manager.io/v1",
+ "kind": "Certificate",
+ "metadata": {"name": name, "namespace": namespace},
+ "spec": {
+ "secretName": secret_name,
+ "privateKey": {
+ "rotationPolicy": "Always",
+ "algorithm": "ECDSA",
+ "size": 256,
+ },
+ "duration": "8760h", # 1 Year
+ "renewBefore": "2208h", # 9 months
+ "subject": {"organizations": ["osm"]},
+ "commonName": "osm",
+ "isCA": False,
+ "usages": usages,
+ "dnsNames": [
+ "{}.{}".format(dns_prefix, namespace),
+ "{}.{}.svc".format(dns_prefix, namespace),
+ "{}.{}.svc.cluster".format(dns_prefix, namespace),
+ "{}.{}.svc.cluster.local".format(dns_prefix, namespace),
+ ],
+ "issuerRef": {"name": issuer_name, "kind": "ClusterIssuer"},
+ },
+ }
+ client = self.clients[CUSTOM_OBJECT_CLIENT]
+ try:
+ client.create_namespaced_custom_object(
+ group="cert-manager.io",
+ plural="certificates",
+ version="v1",
+ body=certificate_body,
+ namespace=namespace,
+ )
+ except ApiException as e:
+ info = json.loads(e.body)
+ if info.get("reason").lower() == "alreadyexists":
+ self.logger.warning("Certificate already exists: {}".format(e))
+ else:
+ raise e
+
+ async def delete_certificate(self, namespace, object_name):
+ client = self.clients[CUSTOM_OBJECT_CLIENT]
+ try:
+ client.delete_namespaced_custom_object(
+ group="cert-manager.io",
+ plural="certificates",
+ version="v1",
+ name=object_name,
+ namespace=namespace,
+ )
+ except ApiException as e:
+ info = json.loads(e.body)
+ if info.get("reason").lower() == "notfound":
+ self.logger.warning("Certificate already deleted: {}".format(e))
+ else:
+ raise e
+
+ @retry(
+ attempts=10,
+ delay=1,
+ fallback=Exception("Failed creating the namespace"),
+ )
+ async def create_namespace(self, name: str, labels: dict = None):
+ """
+ Create a namespace
+
+ :param: name: Name of the namespace to be created
+ :param: labels: Dictionary with labels for the new namespace
+
+ """
+ v1_core = self.clients[CORE_CLIENT]
+ metadata = V1ObjectMeta(name=name, labels=labels)
+ namespace = V1Namespace(
+ metadata=metadata,
+ )
+
+ try:
+ v1_core.create_namespace(namespace)
+ self.logger.debug("Namespace created: {}".format(name))
+ except ApiException as e:
+ info = json.loads(e.body)
+ if info.get("reason").lower() == "alreadyexists":
+ self.logger.warning("Namespace already exists: {}".format(e))
+ else:
+ raise e
+
+ @retry(
+ attempts=10,
+ delay=1,
+ fallback=Exception("Failed deleting the namespace"),
+ )
+ async def delete_namespace(self, name: str):
+ """
+ Delete a namespace
+
+ :param: name: Name of the namespace to be deleted
+
+ """
+ try:
+ self.clients[CORE_CLIENT].delete_namespace(name)
+ except ApiException as e:
+ if e.reason == "Not Found":
+ self.logger.warning("Namespace already deleted: {}".format(e))
+
+ def get_secrets(
+ self,
+ namespace: str,
+ field_selector: str = None,
+ ) -> typing.List[typing.Dict]:
+ """
+ Get Secret list from a namespace
+
+ :param: namespace: Kubernetes namespace
+ :param: field_selector: Kubernetes field selector
+
+ :return: List of the secrets matching the selectors specified
+ """
+ try:
+ v1_core = self.clients[CORE_CLIENT]
+ secrets = v1_core.list_namespaced_secret(
+ namespace=namespace,
+ field_selector=field_selector,
+ ).items
+ return secrets
+ except ApiException as e:
+ self.logger.error("Error calling get secrets: {}".format(e))
+ raise e
+
+ def create_generic_object(
+ self,
+ api_group: str,
+ api_plural: str,
+ api_version: str,
+ namespace: str,
+ manifest_dict: dict,
+ ):
+ """
+ Creates generic object
+
+ :param: api_group: API Group
+ :param: api_plural: API Plural
+ :param: api_version: API Version
+ :param: namespace: Namespace
+ :param: manifest_dict: Dictionary with the content of the Kubernetes manifest
+
+ """
+ client = self.clients[CUSTOM_OBJECT_CLIENT]
+ try:
+ client.create_namespaced_custom_object(
+ group=api_group,
+ plural=api_plural,
+ version=api_version,
+ body=manifest_dict,
+ namespace=namespace,
+ )
+ except ApiException as e:
+ info = json.loads(e.body)
+ if info.get("reason").lower() == "alreadyexists":
+ self.logger.warning("Object already exists: {}".format(e))
+ else:
+ raise e
+
+ def delete_generic_object(
+ self,
+ api_group: str,
+ api_plural: str,
+ api_version: str,
+ namespace: str,
+ name: str,
+ ):
+ """
+ Deletes generic object
+
+ :param: api_group: API Group
+ :param: api_plural: API Plural
+ :param: api_version: API Version
+ :param: namespace: Namespace
+ :param: name: Name of the object
+
+ """
+ client = self.clients[CUSTOM_OBJECT_CLIENT]
+ try:
+ client.delete_namespaced_custom_object(
+ group=api_group,
+ plural=api_plural,
+ version=api_version,
+ name=name,
+ namespace=namespace,
+ )
+ except ApiException as e:
+ info = json.loads(e.body)
+ if info.get("reason").lower() == "notfound":
+ self.logger.warning("Object already deleted: {}".format(e))
+ else:
+ raise e
+
+ async def get_generic_object(
+ self,
+ api_group: str,
+ api_plural: str,
+ api_version: str,
+ namespace: str,
+ name: str,
+ ):
+ """
+ Gets generic object
+
+ :param: api_group: API Group
+ :param: api_plural: API Plural
+ :param: api_version: API Version
+ :param: namespace: Namespace
+ :param: name: Name of the object
+
+ """
+ client = self.clients[CUSTOM_OBJECT_CLIENT]
+ try:
+ object_dict = client.list_namespaced_custom_object(
+ group=api_group,
+ plural=api_plural,
+ version=api_version,
+ namespace=namespace,
+ field_selector=f"metadata.name={name}",
+ )
+ if len(object_dict.get("items")) == 0:
+ return None
+ return object_dict.get("items")[0]
+ except ApiException as e:
+ info = json.loads(e.body)
+ if info.get("reason").lower() == "notfound":
+ self.logger.warning("Cannot get custom object: {}".format(e))
+ else:
+ raise e
+
+ async def list_generic_object(
+ self,
+ api_group: str,
+ api_plural: str,
+ api_version: str,
+ namespace: str,
+ ):
+ """
+ Lists all generic objects of the requested API group
+
+ :param: api_group: API Group
+ :param: api_plural: API Plural
+ :param: api_version: API Version
+ :param: namespace: Namespace
+
+ """
+ client = self.clients[CUSTOM_OBJECT_CLIENT]
+ try:
+ object_dict = client.list_namespaced_custom_object(
+ group=api_group,
+ plural=api_plural,
+ version=api_version,
+ namespace=namespace,
+ )
+ self.logger.debug(f"Object-list: {object_dict.get('items')}")
+ return object_dict.get("items")
+ except ApiException as e:
+ info = json.loads(e.body)
+ if info.get("reason").lower() == "notfound":
+ self.logger.warning(
+ "Cannot retrieve list of custom objects: {}".format(e)
+ )
+ else:
+ raise e
+
+ @retry(
+ attempts=10,
+ delay=1,
+ fallback=Exception("Failed creating the secret"),
+ )
+ async def create_secret_string(
+ self, name: str, string_data: str, namespace: str, secret_type: str
+ ):
+ """
+ Create secret with data
+
+ :param: name: Name of the secret
+ :param: string_data: String with data content
+ :param: namespace: Name of the namespace where the secret will be stored
+ :param: secret_type: Type of the secret, e.g., Opaque, kubernetes.io/service-account-token, kubernetes.io/tls
+
+ :return: None
+ """
+ v1_core = self.clients[CORE_CLIENT]
+ metadata = V1ObjectMeta(name=name, namespace=namespace)
+ secret = V1Secret(metadata=metadata, string_data=string_data, type=secret_type)
+ v1_core.create_namespaced_secret(namespace, secret)
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+from osm_lcm.lcm_utils import LcmException
+
+
+async def create_oka(self, op_id, op_params, content):
+ self.logger.info("Create OKA workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-create-oka.j2"
+ workflow_name = f"create-oka-{content['_id']}"
+
+ # Additional params for the workflow
+ oka_name = content["git_name"].lower()
+ oka_type = "infra-controllers"
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Get the OKA package
+ oka_fs_info = content["_admin"]["storage"]
+ oka_folder = f"{oka_fs_info['path']}{oka_fs_info['folder']}"
+ oka_filename = oka_fs_info["zipfile"]
+ self.fs.sync(oka_folder)
+ if not self.fs.file_exists(f"{oka_folder}/{oka_filename}"):
+ raise LcmException(message="Not able to find oka", bad_args=["oka_path"])
+
+ # Create temporary volume for the OKA package and copy the content
+ temp_volume_name = f"temp-pvc-oka-{op_id}"
+ await self._kubectl.create_pvc_with_content(
+ name=temp_volume_name,
+ namespace="osm-workflows",
+ src_folder=oka_folder,
+ filename=oka_filename,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ oka_name=oka_name,
+ oka_type=oka_type,
+ osm_project_name=osm_project_name,
+ temp_volume_name=temp_volume_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def update_oka(self, op_id, op_params, content):
+ self.logger.info("Update OKA workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-update-oka.j2"
+ workflow_name = f"update-oka-{content['_id']}"
+
+ # Additional params for the workflow
+ oka_name = content["git_name"].lower()
+ oka_type = "infra-controllers"
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Get the OKA package
+ oka_fs_info = content["_admin"]["storage"]
+ oka_folder = (
+ f"{oka_fs_info['path']}/{oka_fs_info['folder']}/{oka_fs_info['zipfile']}"
+ )
+ oka_filename = "package.tar.gz"
+ # Sync fs?
+
+ # Create temporary volume for the OKA package and copy the content
+ temp_volume_name = f"temp-pvc-oka-{op_id}"
+ await self._kubectl.create_pvc_with_content(
+ name=temp_volume_name,
+ namespace="osm-workflows",
+ src_folder=oka_folder,
+ filename=oka_filename,
+ )
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ oka_name=oka_name,
+ oka_type=oka_type,
+ osm_project_name=osm_project_name,
+ temp_volume_name=temp_volume_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_oka(self, op_id, op_params, content):
+ self.logger.info("Delete OKA workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-delete-oka.j2"
+ workflow_name = f"delete-oka-{content['_id']}"
+
+ # Additional params for the workflow
+ oka_name = content["git_name"].lower()
+ oka_type = "infra-controllers"
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ oka_name=oka_name,
+ oka_type=oka_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def check_create_oka(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_oka(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_oka(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+
+
+async def create_profile(self, op_id, op_params, content):
+ self.logger.info("Create profile workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-create-profile.j2"
+ workflow_name = f"create-profile-{content['_id']}"
+
+ # Additional params for the workflow
+ profile_name = content["git_name"].lower()
+ profile_type = content["profile_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_profile(self, op_id, op_params, content):
+ self.logger.info("Delete profile workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-delete-profile.j2"
+ workflow_name = f"delete-profile-{content['_id']}"
+
+ # Additional params for the workflow
+ profile_name = content["git_name"].lower()
+ profile_type = content["profile_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def attach_profile_to_cluster(self, op_id, op_params, content):
+ self.logger.info("Attach profile to cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ profile = content["profile"]
+ cluster = content["cluster"]
+ workflow_template = "launcher-attach-profile.j2"
+ workflow_name = f"attach-profile-{op_id}"
+
+ # Additional params for the workflow
+ profile_name = profile["git_name"].lower()
+ profile_type = profile["profile_type"]
+ cluster_kustomization_name = cluster["git_name"].lower()
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def detach_profile_from_cluster(self, op_id, op_params, content):
+ self.logger.info("Detach profile to cluster workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ profile = content["profile"]
+ cluster = content["cluster"]
+ workflow_template = "launcher-detach-profile.j2"
+ workflow_name = f"detach-profile-{op_id}"
+
+ # Additional params for the workflow
+ # Additional params for the workflow
+ profile_name = profile["git_name"].lower()
+ profile_type = profile["profile_type"]
+ cluster_kustomization_name = cluster["git_name"].lower()
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ profile_name=profile_name,
+ profile_type=profile_type,
+ cluster_kustomization_name=cluster_kustomization_name,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.info(manifest)
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def check_create_profile(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_profile(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_attach_profile_to_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_detach_profile_from_cluster(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+from jinja2 import Environment, PackageLoader, select_autoescape
+import json
+import yaml
+
+
+def render_jinja_template(self, template_file, output_file=None, **kwargs):
+ """Renders a jinja template with the provided values
+
+ Args:
+ template_file: Jinja template to be rendered
+ output_file: Output file
+ kwargs: (key,value) pairs to be replaced in the template
+
+ Returns:
+ content: The content of the rendered template
+ """
+
+ # Load the template from file
+ # loader = FileSystemLoader("osm_lcm/odu_libs/templates")
+ loader = PackageLoader("osm_lcm", "odu_libs/templates")
+ self.logger.debug(f"Loader: {loader}")
+ env = Environment(loader=loader, autoescape=select_autoescape())
+ self.logger.debug(f"Env: {env}")
+
+ template_list = env.list_templates()
+ self.logger.debug(f"Template list: {template_list}")
+ template = env.get_template(template_file)
+ self.logger.debug(f"Template: {template}")
+
+ # Replace kwargs
+ self.logger.debug(f"Kwargs: {kwargs}")
+ content = template.render(kwargs)
+ if output_file:
+ with open(output_file, "w") as c_file:
+ c_file.write(content)
+ return content
+
+
+def render_yaml_template(self, template_file, output_file=None, **kwargs):
+ """Renders a YAML template with the provided values
+
+ Args:
+ template_file: Yaml template to be rendered
+ output_file: Output file
+ kwargs: (key,value) pairs to be replaced in the template
+
+ Returns:
+ content: The content of the rendered template
+ """
+
+ def print_yaml_json(document, to_json=False):
+ if to_json:
+ print(json.dumps(document, indent=4))
+ else:
+ print(
+ yaml.safe_dump(
+ document, indent=4, default_flow_style=False, sort_keys=False
+ )
+ )
+
+ # Load template in dictionary
+ with open(template_file, "r") as t_file:
+ content_dict = yaml.safe_load(t_file.read())
+ # Replace kwargs
+ self.self.logger.debug(f"Kwargs: {kwargs}")
+ for k, v in kwargs:
+ content_dict[k] = v
+
+ content = print_yaml_json(content_dict)
+ if output_file:
+ with open(output_file, "w") as c_file:
+ c_file.write(content)
+ return content
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: cluster_kustomization_name
+ value: "{{ cluster_kustomization_name }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-attach-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters - Source and destination KSU
+ ## Source KSU:
+ - name: source_ksu_name
+ value: "jenkins"
+ - name: source_profile_name
+ value: "myakscluster01"
+ - name: source_profile_type
+ value: "applications"
+ - name: source_project_name
+ value: "osm_admin"
+ ## Destination KSU:
+ ## - If any of the destination parameters are not specified, it will assume
+ ## they are the same as in source.
+ ## - It will reject if all are empty or equal to source, to avoid cloning a KSU over itself
+ - name: destination_ksu_name
+ value: ""
+ - name: destination_profile_name
+ value: "myprofile"
+ - name: destination_profile_type
+ value: "applications"
+ - name: destination_project_name
+ value: ""
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-clone-ksu-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - AKS cluster
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ - name: vm_size
+ value: {{ vm_size }}
+ - name: node_count
+ value: "{{ node_count }}"
+ - name: cluster_location
+ value: {{ cluster_location }}
+ - name: rg_name
+ value: {{ rg_name }}
+ - name: k8s_version
+ value: "'{{ k8s_version }}'"
+ - name: providerconfig_name
+ value: {{ providerconfig_name }}
+
+ # Specific parameters - Bootstrap
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: fleet_repo_url
+ value: {{ git_fleet_url }}
+ - name: sw_catalogs_repo_url
+ value: {{ git_sw_catalogs_url }}
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-aks-cluster-and-bootstrap-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - Generic cluster creation
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ ## As of today, one among `aks`, `eks` or `gke`:
+ - name: cluster_type
+ value: {{ cluster_type }}
+ - name: vm_size
+ value: {{ vm_size }}
+ - name: node_count
+ value: "{{ node_count }}"
+ - name: cluster_location
+ value: {{ cluster_location }}
+ - name: k8s_version
+ value: "'{{ k8s_version }}'"
+ - name: providerconfig_name
+ value: {{ providerconfig_name }}
+
+ # Specific parameters - Bootstrap and credentials
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: mgmt_project_name
+ value: "{{ osm_project_name }}"
+
+ # Specific parameters - AKS only
+ - name: rg_name
+ value: {{ rg_name }}
+
+ # Specific parameters - GKE only
+ - name: preemptible_nodes
+ value: "false"
+
+ # Advanced parameters - Recommended to keep defaults
+ - name: skip_bootstrap
+ value: "false"
+ - name: mgmt_cluster_name
+ value: "_management"
+ - name: base_templates_path
+ value: "cloud-resources"
+ - name: cloned_fleet_folder_name
+ value: "fleet-osm"
+ - name: cloned_sw_catalogs_folder_name
+ value: "sw-catalogs-osm"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-crossplane-cluster-and-bootstrap-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # HelmRelease generation
+ - name: helmrelease_name
+ value: "jenkins"
+ - name: chart_name
+ value: "jenkins"
+ - name: chart_version
+ value: '13.4.x'
+ - name: target_ns
+ value: "jenkins"
+ - name: create_ns
+ value: "true"
+ # Repo source generation
+ - name: is_preexisting_repo
+ value: "false"
+ - name: helmrepo_name
+ value: "bitnamicharts"
+ - name: helmrepo_url
+ value: oci://registry-1.docker.io/bitnamicharts
+ - name: helmrepo_ns
+ value: "jenkins"
+ - name: helmrepo_secret_ref
+ value: ""
+ # HelmRelease inline values (if any)
+ - name: inline_values
+ # Install some Jenkins plugins:
+ value: |
+ plugins:
+ - kubernetes:3852.v41ea_166a_ed1b_
+ - workflow-aggregator:590.v6a_d052e5a_a_b_5
+ - git:4.13.0
+ - configuration-as-code:1569.vb_72405b_80249
+ # overridePlugins: true
+ # Secret reference and generation (if required)
+ - name: is_preexisting_secret
+ value: "false"
+ - name: values_secret_name
+ value: "mysecret"
+ - name: secret_key
+ value: "values.yaml"
+ - name: age_public_key
+ value: "age1s236gmpr7myjjyqfrl6hwz0npqjgxa9t6tjj46yq28j2c4nk653saqreav"
+ - name: reference_secret_for_values
+ value: "jenkins-credentials"
+ - name: reference_key_for_values
+ value: "creds"
+ # ConfigMap reference and generation (if required)
+ - name: is_preexisting_cm
+ value: "false"
+ - name: values_cm_name
+ value: ""
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: ""
+ # value: |
+ # cm-key1: cm-value1
+ # cm-key2: cm-value2
+ # KSU rendering
+ - name: ksu_name
+ value: "jenkins"
+ - name: profile_name
+ value: "myakscluster01"
+ - name: profile_type
+ value: "applications"
+ - name: project_name
+ value: "osm_admin"
+ # Will it syncronize the KSU folder with the results of the rendering?
+ ## If left empty, it does not syncronize, so that we can easily accumulate more than
+ ## one Helm chart into the same KSU if desired
+ ## In this example, we will syncronize explicitly to make the example easier to follow.
+ - name: sync
+ value: "true"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-ksu-generated-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - Base KSU generation from template
+ ## Relative path from "SW Catalogs" repo root
+ - name: templates_path
+ value: "{{ templates_path }}"
+ ## Should substitute environment variables in the template?
+ - name: substitute_environment
+ value: "{{ substitute_environment }}"
+ ## Filter for substitution of environment variables
+ - name: substitution_filter
+ value: "{{ substitution_filter }}"
+ ## Custom environment variables (formatted as .env), to be used for template parametrization
+ - name: custom_env_vars
+ value: "{{ custom_env_vars }}"
+ # value: |
+ # KEY1=value1
+ # KEY2=value2
+ # Specific parameters - Patch HelmRelease in KSU with inline values
+ - name: kustomization_name
+ value: "{{ kustomization_name }}"
+ - name: helmrelease_name
+ value: "{{ helmrelease_name }}"
+ - name: inline_values
+ # Install some Jenkins plugins:
+ value: {{ inline_values }}
+ # Specific parameters - Secret generation
+ - name: is_preexisting_secret
+ value: "{{ is_preexisting_secret }}"
+ - name: target_ns
+ value: "{{ target_ns }}"
+ - name: age_public_key
+ value: "{{ age_public_key }}"
+ - name: values_secret_name
+ value: "{{ values_secret_name }}"
+ - name: secret_key
+ value: "values.yaml"
+ ################################################################
+ # This temporary secret (in the example, `jenkins-credentials`) should exist already
+ # in the `osm-workflows` namespace and contain the desired Jenkins credentials in
+ # a well-known key (in the example, `creds`).
+ #
+ # For instance:
+ #
+ # creds: |
+ # jenkinsUser: admin
+ # jenkinsPassword: myJ3nk1n2P2ssw0rd
+ - name: reference_secret_for_values
+ value: "{{ reference_secret_for_values }}"
+ - name: reference_key_for_values
+ value: "{{ reference_key_for_values }}"
+ # Specific parameters - Configmap generation
+ - name: is_preexisting_cm
+ value: "{is_preexisting_cm}"
+ - name: values_cm_name
+ value: "{{ values_configmap_name }}"
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: {{ cm_values }}
+ # value: |
+ # cm-key1: cm-value1
+ # cm-key2: cm-value2
+ # Specific parameters - KSU rendering
+ - name: ksu_name
+ value: "{{ ksu_name }}"
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ - name: sync
+ value: "{{ sync }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-ksu-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Temporary volume with OKA contents
+ - name: temp_volume_name
+ value: "{{ temp_volume_name }}"
+ # Specific parameters - OKA
+ - name: oka_name
+ value: "{{ oka_name }}"
+ ## Choose among `infra-controllers`, `infra-configs`, `cloud-resources`, `apps`:
+ - name: oka_type
+ value: "{{ oka_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ - name: tarball_file
+ value: "true"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-oka-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: providerconfig_name
+ value: "{{ providerconfig_name }}"
+ ## As of today, one among `azure`, `aws` or `gcp`
+ - name: provider_type
+ value: "{{ provider_type }}"
+ ## Final secret to reference from the `ProviderConfig`
+ - name: cred_secret_name
+ value: "{{ cred_secret_name }}"
+ ## Temporary secret with secret contents for the workflow
+ ## - If `temp_cred_secret_name` is empty, assumes that the final secret already exists
+ - name: temp_cred_secret_name
+ value: "{{ temp_cred_secret_name }}"
+ - name: temp_cred_secret_key
+ value: "creds"
+ - name: age_public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: osm_project_name
+ value: "{{ osm_project_name }}"
+ ## Specific parameters - GCP only
+ - name: target_gcp_project
+ value: "{{ target_gcp_project }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 6000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 6000 # Time to live after workflow is successful
+ secondsAfterFailure: 9000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-create-crossplane-providerconfig
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: project_name
+ value: "{{ osm_project_name }}"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 1000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 500 # Time to live after workflow is successful
+ secondsAfterFailure: 500 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-cluster-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - KSU id
+ - name: ksu_name
+ value: "{{ ksu_name }}"
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-ksu-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - OKA
+ - name: oka_name
+ value: "{{ oka_name }}"
+ ## Choose among `infra-controllers`, `infra-configs`, `cloud-resources`, `apps`:
+ - name: oka_type
+ value: "{{ oka_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-oka-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: providerconfig_name
+ value: "{{ providerconfig_name }}"
+ ## As of today, one among `azure`, `aws` or `gcp`
+ - name: provider_type
+ value: "{{ provider_type }}"
+ - name: osm_project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 6000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 6000 # Time to live after workflow is successful
+ secondsAfterFailure: 9000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-delete-crossplane-providerconfig
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # Specific parameters
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: cluster_kustomization_name
+ value: "{{ cluster_kustomization_name }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-detach-profile-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - AKS cluster
+ - name: cluster_name
+ value: myakscluster01
+ - name: vm_size
+ value: Standard_D2_v2
+ - name: node_count
+ value: "1"
+ - name: cluster_location
+ value: "North Europe"
+ - name: rg_name
+ value: CloudNative-OSM
+ - name: k8s_version
+ value: "'1.28'"
+ - name: providerconfig_name
+ value: default
+ - name: cluster_kustomization_name
+ value: myakscluster01
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-aks-cluster-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters - Generic cluster creation
+ - name: cluster_kustomization_name
+ value: {{ cluster_kustomization_name }}
+ - name: cluster_name
+ value: {{ cluster_name }}
+ ## As of today, one among `aks`, `eks` or `gke`:
+ - name: cluster_type
+ value: {{ cluster_type }}
+ - name: providerconfig_name
+ value: {{ providerconfig_name }}
+ - name: vm_size
+ value: {{ vm_size }}
+ - name: node_count
+ value: "{{ node_count }}"
+ - name: cluster_location
+ value: {{ cluster_location }}
+ - name: k8s_version
+ value: "'{{ k8s_version }}'"
+
+ # Specific parameters - Bootstrap and credentials
+ - name: public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: public_key_new_cluster
+ value: "{{ public_key_new_cluster }}"
+ - name: secret_name_private_age_key_for_new_cluster
+ value: "{{ secret_name_private_key_new_cluster }}"
+ - name: key_name_in_secret
+ value: "agekey"
+ - name: mgmt_project_name
+ value: "{{ osm_project_name }}"
+
+ # Specific parameters - AKS only
+ - name: rg_name
+ value: CloudNative-OSM
+
+ # Specific parameters - GKE only
+ - name: preemptible_nodes
+ value: "false"
+
+ # Advanced parameters - Recommended to keep defaults
+ - name: mgmt_cluster_name
+ value: "_management"
+ - name: base_templates_path
+ value: "cloud-resources"
+ - name: cloned_fleet_folder_name
+ value: "fleet-osm"
+ - name: cloned_sw_catalogs_folder_name
+ value: "sw-catalogs-osm"
+
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 2000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 1000 # Time to live after workflow is successful
+ secondsAfterFailure: 1000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-crossplane-cluster-and-bootstrap-wft
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # HelmRelease generation
+ - name: helmrelease_name
+ value: "jenkins"
+ - name: chart_name
+ value: "jenkins"
+ - name: chart_version
+ value: '13.4.x'
+ - name: target_ns
+ value: "jenkins"
+ - name: create_ns
+ value: "true"
+ # Repo source generation
+ - name: is_preexisting_repo
+ value: "false"
+ - name: helmrepo_name
+ value: "bitnamicharts"
+ - name: helmrepo_url
+ value: oci://registry-1.docker.io/bitnamicharts
+ - name: helmrepo_ns
+ value: "jenkins"
+ - name: helmrepo_secret_ref
+ value: ""
+ # HelmRelease inline values (if any)
+ - name: inline_values
+ # Install some Jenkins plugins:
+ value: |
+ plugins:
+ - kubernetes:3852.v41ea_166a_ed1b_
+ - workflow-aggregator:590.v6a_d052e5a_a_b_5
+ - git:4.13.0
+ - configuration-as-code:1569.vb_72405b_80249
+ # overridePlugins: true
+ # Secret reference and generation (if required)
+ - name: is_preexisting_secret
+ value: "false"
+ - name: values_secret_name
+ value: "mysecret"
+ - name: secret_key
+ value: "values.yaml"
+ - name: age_public_key
+ value: "age1s236gmpr7myjjyqfrl6hwz0npqjgxa9t6tjj46yq28j2c4nk653saqreav"
+ - name: reference_secret_for_values
+ value: "jenkins-credentials"
+ - name: reference_key_for_values
+ value: "creds"
+ # ConfigMap reference and generation (if required)
+ - name: is_preexisting_cm
+ value: "false"
+ - name: values_cm_name
+ value: ""
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: ""
+ # value: |
+ # cm-key1: cm-value1
+ # cm-key2: cm-value2
+ # KSU rendering
+ - name: ksu_name
+ value: "jenkins"
+ - name: profile_name
+ value: "myakscluster01"
+ - name: profile_type
+ value: "applications"
+ - name: project_name
+ value: "osm_admin"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-ksu-generated-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Specific parameters - Base KSU generation from template
+ ## Relative path from "SW Catalogs" repo root
+ - name: templates_path
+ value: "{{ templates_path }}"
+ ## Should substitute environment variables in the template?
+ - name: substitute_environment
+ value: "{{ substitute_environment }}"
+ ## Filter for substitution of environment variables
+ - name: substitution_filter
+ value: "{{ substitution_filter }}"
+ ## Custom environment variables (formatted as .env), to be used for template parametrization
+ - name: custom_env_vars
+ value: "{custom_env_vars}"
+ # Specific parameters - Patch HelmRelease in KSU with inline values
+ - name: kustomization_name
+ value: "{{ kustomization_name }}"
+ - name: helmrelease_name
+ value: "{{ helmrelease_name }}"
+ - name: inline_values
+ value: {{ inline_values }}
+ # Specific parameters - Secret generation
+ - name: is_preexisting_secret
+ value: "{{ is_preexisting_secret }}"
+ - name: target_ns
+ value: "{{ target_ns }}"
+ - name: age_public_key
+ value: "{{ age_public_key }}"
+ - name: values_secret_name
+ value: "{{ values_secret_name }}"
+ - name: secret_key
+ value: "values.yaml"
+ - name: reference_secret_for_values
+ value: "{{ reference_secret_for_values }}"
+ - name: reference_key_for_values
+ value: "{{ reference_key_for_values }}"
+ # Specific parameters - Configmap generation
+ - name: is_preexisting_cm
+ value: "false"
+ - name: values_cm_name
+ value: "{{ values_configmap_name }}"
+ - name: cm_key
+ value: "values.yaml"
+ - name: cm_values
+ value: "{{ cm_values }}"
+ # Specific parameters - KSU rendering
+ - name: ksu_name
+ value: "{{ ksu_name }}"
+ - name: profile_name
+ value: "{{ profile_name }}"
+ - name: profile_type
+ value: "{{ profile_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-ksu-hr-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+ # Temporary volume with OKA contents
+ - name: temp_volume_name
+ value: "{{ temp_volume_name }}"
+ # Specific parameters - OKA
+ - name: oka_name
+ value: "{{ oka_name }}"
+ ## Choose among `infra-controllers`, `infra-configs`, `cloud-resources`, `apps`:
+ - name: oka_type
+ value: "{{ oka_type }}"
+ - name: project_name
+ value: "{{ osm_project_name }}"
+ - name: tarball_file
+ value: "true"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 600 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 600 # Time to live after workflow is successful
+ secondsAfterFailure: 900 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-oka-wtf
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: {{ workflow_name }}
+spec:
+ arguments:
+ parameters:
+ # Fleet repo
+ - name: git_fleet_url
+ value: "{{ git_fleet_url }}"
+ - name: fleet_destination_folder
+ value: "/fleet/fleet-osm"
+ - name: git_fleet_cred_secret
+ value: fleet-repo
+ # SW-Catalogs repo
+ - name: git_sw_catalogs_url
+ value: "{{ git_sw_catalogs_url }}"
+ - name: sw_catalogs_destination_folder
+ value: "/sw-catalogs/sw-catalogs-osm"
+ - name: git_sw_catalogs_cred_secret
+ value: sw-catalogs
+
+ # Specific parameters
+ - name: providerconfig_name
+ value: "{{ providerconfig_name }}"
+ ## As of today, one among `azure`, `aws` or `gcp`
+ - name: provider_type
+ value: "{{ provider_type }}"
+ ## Final secret to reference from the `ProviderConfig`
+ - name: cred_secret_name
+ value: "{{ cred_secret_name }}"
+ ## Temporary secret with secret contents for the workflow
+ ## - If `temp_cred_secret_name` is empty, assumes that the final secret already exists
+ - name: temp_cred_secret_name
+ value: "{{ temp_cred_secret_name }}"
+ - name: temp_cred_secret_key
+ value: "creds"
+ - name: age_public_key_mgmt
+ value: "{{ public_key_mgmt }}"
+ - name: osm_project_name
+ value: "{{ osm_project_name }}"
+ ## Specific parameters - GCP only
+ - name: target_gcp_project
+ value: "{{ target_gcp_project }}"
+ # Debug/dry run?
+ - name: debug
+ value: "{{ workflow_debug }}"
+ - name: dry_run
+ value: "{{ workflow_dry_run }}"
+
+ # Cleanup policy
+ ttlStrategy:
+ secondsAfterCompletion: 6000 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished
+ secondsAfterSuccess: 6000 # Time to live after workflow is successful
+ secondsAfterFailure: 9000 # Time to live after workflow fails
+
+ workflowTemplateRef:
+ name: full-update-crossplane-providerconfig
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import yaml
+import json
+
+
+async def create_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info("Create cloud_credentials workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-create-providerconfig.j2"
+ workflow_name = f"create-providerconfig-{content['_id']}"
+ # vim_name = content["name"].lower()
+ vim_name = content.get("git_name", content["name"]).lower()
+ # workflow_name = f"{op_id}-create-credentials-{vim_name}"
+
+ # Test kubectl connection
+ self.logger.debug(self._kubectl._get_kubectl_version())
+
+ # Create secret with creds
+ secret_name = workflow_name
+ secret_namespace = "osm-workflows"
+ secret_key = "creds"
+ secret_value = json.dumps(content["config"]["credentials"], indent=2)
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+
+ # Additional params for the workflow
+ providerconfig_name = f"{vim_name}-config"
+ provider_type = content["vim_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ if provider_type == "gcp":
+ vim_tenant = content["vim_tenant_name"]
+ else:
+ vim_tenant = ""
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ providerconfig_name=providerconfig_name,
+ provider_type=provider_type,
+ cred_secret_name=vim_name,
+ temp_cred_secret_name=secret_name,
+ public_key_mgmt=self._pubkey,
+ osm_project_name=osm_project_name,
+ target_gcp_project=vim_tenant,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def delete_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info("Delete cloud_credentials workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-delete-providerconfig.j2"
+ workflow_name = f"delete-providerconfig-{content['_id']}"
+ # vim_name = content["name"].lower()
+ vim_name = content.get("git_name", content["name"]).lower()
+ # workflow_name = f"{op_id}-delete-credentials-{vim_name}"
+
+ # Additional params for the workflow
+ providerconfig_name = f"{vim_name}-config"
+ provider_type = content["vim_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ providerconfig_name=providerconfig_name,
+ provider_type=provider_type,
+ osm_project_name=osm_project_name,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def update_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info("Update cloud_credentials workflow Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+
+ workflow_template = "launcher-update-providerconfig.j2"
+ workflow_name = f"update-providerconfig-{content['_id']}"
+ # vim_name = content["name"].lower()
+ vim_name = content.get("git_name", content["name"]).lower()
+ # workflow_name = f"{op_id}-update-credentials-{vim_name}"
+
+ # Create secret with creds
+ secret_name = workflow_name
+ secret_namespace = "osm-workflows"
+ secret_key = "creds"
+ secret_value = json.dumps(content["config"]["credentials"], indent=2)
+ await self.create_secret(
+ secret_name,
+ secret_namespace,
+ secret_key,
+ secret_value,
+ )
+ # Additional params for the workflow
+ providerconfig_name = f"{vim_name}-config"
+ provider_type = content["vim_type"]
+ osm_project_name = "osm_admin" # TODO: get project name from content
+ if provider_type == "gcp":
+ vim_tenant = content["vim_tenant_name"]
+ else:
+ vim_tenant = ""
+
+ # Render workflow
+ manifest = self.render_jinja_template(
+ workflow_template,
+ output_file=None,
+ workflow_name=workflow_name,
+ git_fleet_url=f"{self._repo_base_url}/{self._repo_user}/fleet-osm.git",
+ git_sw_catalogs_url=f"{self._repo_base_url}/{self._repo_user}/sw-catalogs-osm.git",
+ providerconfig_name=providerconfig_name,
+ provider_type=provider_type,
+ cred_secret_name=vim_name,
+ temp_cred_secret_name=secret_name,
+ public_key_mgmt=self._pubkey,
+ osm_project_name=osm_project_name,
+ target_gcp_project=vim_tenant,
+ workflow_debug=self._workflow_debug,
+ workflow_dry_run=self._workflow_dry_run,
+ )
+ self.logger.debug(f"Workflow manifest: {manifest}")
+
+ # Submit workflow
+ self._kubectl.create_generic_object(
+ namespace="osm-workflows",
+ manifest_dict=yaml.safe_load(manifest),
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ )
+ return workflow_name
+
+
+async def check_create_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_update_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
+
+
+async def check_delete_cloud_credentials(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return True, "OK"
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+
+
+import asyncio
+from time import time
+
+
+async def check_workflow_status(self, workflow_name):
+ self.logger.info(f"Async check workflow status Enter: {workflow_name}")
+ start_time = time()
+ timeout = 300
+ retry_time = 15
+ # TODO: Maybe it's better not to measure time, but controlling retries
+ # retries = 0
+ # total_retries = int(timeout/retry_time)
+ while time() <= start_time + timeout:
+ # workflow_list = await self._kubectl.list_generic_object(
+ # api_group="argoproj.io",
+ # api_plural="workflows",
+ # api_version="v1alpha1",
+ # namespace="osm-workflows",
+ # )
+ # self.logger.info(f"Workflow_list: { workflow_list }")
+ # kubectl get workflow/${WORKFLOW_NAME} -n osm-workflows -o jsonpath='{.status.conditions}' | jq -r '.[] | select(.type=="Completed").status'
+ workflow = await self._kubectl.get_generic_object(
+ api_group="argoproj.io",
+ api_plural="workflows",
+ api_version="v1alpha1",
+ namespace="osm-workflows",
+ name=workflow_name,
+ )
+ # self.logger.info(f"Workflow: {workflow}")
+ # self.logger.info(f"Workflow status: {workflow.get('status')}")
+ conditions = workflow.get("status", {}).get("conditions", [])
+ self.logger.info(f"Workflow status conditions: {conditions}")
+ result = next((item for item in conditions if item["type"] == "Completed"), {})
+ if result.get("status", "False") == "True":
+ self.logger.info(
+ f"Workflow {workflow_name} completed in {time() - start_time} seconds"
+ )
+ return True, "COMPLETED"
+ await asyncio.sleep(retry_time)
+ return False, "Workflow {workflow_name} did not complete in {timeout} seconds"
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
+#######################################################################################
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
import logging
from osm_lcm.lcm_utils import LcmBase
+# from n2vc import kubectl
+from osm_lcm.odu_libs import kubectl
+
class OduWorkflow(LcmBase):
def __init__(self, msg, lcm_tasks, config):
self.lcm_tasks = lcm_tasks
self.logger.info("Msg: {} lcm_tasks: {} ".format(msg, lcm_tasks))
+ # self._kubeconfig = kubeconfig # TODO: get it from config
+ self._kubeconfig = "/etc/osm/mgmtcluster-kubeconfig.yaml"
+ self._kubectl = kubectl.Kubectl(config_file=self._kubeconfig)
+ # self._repo_base_url = repo_base_url
+ # self._repo_user = repo_user
+ # self._pubkey = pubkey
+ self._repo_base_url = (
+ "http://git.172.21.249.24.nip.io" # TODO: get it from config
+ )
+ self._repo_user = "osm-developer" # TODO: get it from config
+ self._pubkey = "age1wnfvymrm4w9kfz8vn98lmu8c4w9e2wjd2v7u9lx7m3gn6patc4vqpralhx" # TODO: get it from config
+ self._workflow_debug = "true"
+ self._workflow_dry_run = "false"
+ self._workflows = {
+ "create_cluster": {
+ "workflow_function": self.create_cluster,
+ "check_resource_function": self.check_create_cluster,
+ },
+ "update_cluster": {
+ "workflow_function": self.update_cluster,
+ "check_resource_function": self.check_update_cluster,
+ },
+ "delete_cluster": {
+ "workflow_function": self.delete_cluster,
+ "check_resource_function": self.check_delete_cluster,
+ },
+ "register_cluster": {
+ "workflow_function": self.register_cluster,
+ "check_resource_function": self.check_register_cluster,
+ },
+ "deregister_cluster": {
+ "workflow_function": self.deregister_cluster,
+ "check_resource_function": self.check_deregister_cluster,
+ },
+ "create_profile": {
+ "workflow_function": self.create_profile,
+ "check_resource_function": self.check_create_profile,
+ },
+ "delete_profile": {
+ "workflow_function": self.delete_profile,
+ "check_resource_function": self.check_delete_profile,
+ },
+ "attach_profile_to_cluster": {
+ "workflow_function": self.attach_profile_to_cluster,
+ "check_resource_function": self.check_attach_profile_to_cluster,
+ },
+ "detach_profile_from_cluster": {
+ "workflow_function": self.detach_profile_from_cluster,
+ "check_resource_function": self.check_detach_profile_from_cluster,
+ },
+ "create_oka": {
+ "workflow_function": self.create_oka,
+ "check_resource_function": self.check_create_oka,
+ },
+ "update_oka": {
+ "workflow_function": self.update_oka,
+ "check_resource_function": self.check_update_oka,
+ },
+ "delete_oka": {
+ "workflow_function": self.delete_oka,
+ "check_resource_function": self.check_delete_oka,
+ },
+ "create_ksus": {
+ "workflow_function": self.create_ksus,
+ "check_resource_function": self.check_create_ksus,
+ },
+ "update_ksus": {
+ "workflow_function": self.update_ksus,
+ "check_resource_function": self.check_update_ksus,
+ },
+ "delete_ksus": {
+ "workflow_function": self.delete_ksus,
+ "check_resource_function": self.check_delete_ksus,
+ },
+ "clone_ksu": {
+ "workflow_function": self.clone_ksu,
+ "check_resource_function": self.check_clone_ksu,
+ },
+ "move_ksu": {
+ "workflow_function": self.move_ksu,
+ "check_resource_function": self.check_move_ksu,
+ },
+ "create_cloud_credentials": {
+ "workflow_function": self.create_cloud_credentials,
+ "check_resource_function": self.check_create_cloud_credentials,
+ },
+ "update_cloud_credentials": {
+ "workflow_function": self.update_cloud_credentials,
+ "check_resource_function": self.check_update_cloud_credentials,
+ },
+ "delete_cloud_credentials": {
+ "workflow_function": self.delete_cloud_credentials,
+ "check_resource_function": self.check_delete_cloud_credentials,
+ },
+ "dummy_operation": {
+ "workflow_function": self.dummy_operation,
+ "check_resource_function": self.check_dummy_operation,
+ },
+ }
+
super().__init__(msg, self.logger)
- def launch_workflow(self, key, content):
+ @property
+ def kubeconfig(self):
+ return self._kubeconfig
+
+ # Imported methods
+ from osm_lcm.odu_libs.vim_mgmt import (
+ create_cloud_credentials,
+ update_cloud_credentials,
+ delete_cloud_credentials,
+ check_create_cloud_credentials,
+ check_update_cloud_credentials,
+ check_delete_cloud_credentials,
+ )
+ from osm_lcm.odu_libs.cluster_mgmt import (
+ create_cluster,
+ update_cluster,
+ delete_cluster,
+ register_cluster,
+ deregister_cluster,
+ check_create_cluster,
+ check_update_cluster,
+ check_delete_cluster,
+ check_register_cluster,
+ check_deregister_cluster,
+ get_cluster_credentials,
+ )
+ from osm_lcm.odu_libs.ksu import (
+ create_ksus,
+ update_ksus,
+ delete_ksus,
+ clone_ksu,
+ move_ksu,
+ check_create_ksus,
+ check_update_ksus,
+ check_delete_ksus,
+ check_clone_ksu,
+ check_move_ksu,
+ )
+ from osm_lcm.odu_libs.oka import (
+ create_oka,
+ update_oka,
+ delete_oka,
+ check_create_oka,
+ check_update_oka,
+ check_delete_oka,
+ )
+ from osm_lcm.odu_libs.profiles import (
+ create_profile,
+ delete_profile,
+ attach_profile_to_cluster,
+ detach_profile_from_cluster,
+ check_create_profile,
+ check_delete_profile,
+ check_attach_profile_to_cluster,
+ check_detach_profile_from_cluster,
+ )
+ from osm_lcm.odu_libs.workflows import (
+ check_workflow_status,
+ )
+ from osm_lcm.odu_libs.render import (
+ render_jinja_template,
+ render_yaml_template,
+ )
+ from osm_lcm.odu_libs.common import create_secret
+
+ async def launch_workflow(self, key, op_id, op_params, content):
self.logger.info(
- f"Workflow is getting into launch. Key: {key}. Content: {content}"
+ f"Workflow is getting into launch. Key: {key}. Operation: {op_id}. Params: {op_params}. Content: {content}"
)
- return f"workflow-{key}-{content['_id']}"
+ workflow_function = self._workflows[key]["workflow_function"]
+ self.logger.info("workflow function : {}".format(workflow_function))
+ return await workflow_function(op_id, op_params, content)
- def check_workflow_status(self, workflow_name):
- self.logger.info(f"Check workflow status {workflow_name}")
- return True, "OK"
+ async def dummy_operation(self, op_id, op_params, content):
+ self.logger.info("Empty operation status Enter")
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
+ return content["workflow_name"]
+
+ async def check_resource_status(self, key, op_id, op_params, content):
+ self.logger.info(
+ f"Check resource status. Key: {key}. Operation: {op_id}. Params: {op_params}. Content: {content}"
+ )
+ check_resource_function = self._workflows[key]["check_resource_function"]
+ self.logger.info("check_resource function : {}".format(check_resource_function))
+ return await check_resource_function(op_id, op_params, content)
- def check_resource_status(self, key, content):
- self.logger.info(f"Check resource status {key}: {content}")
+ async def check_dummy_operation(self, op_id, op_params, content):
+ self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
return True, "OK"
async-timeout
checksumdir
config-man
-grpcio-tools<1.48.2
+grpcio-tools
grpclib
-idna
jinja2
+protobuf==3.20.3
+pyrage
pyyaml>6
pydantic
-protobuf==3.20.3
+randomname
+retrying-async
# via
# -r requirements.in
# aiohttp
+ # retrying-async
attrs==24.2.0
# via
# aiohttp
# via -r requirements.in
face==20.1.1
# via glom
+fire==0.6.0
+ # via randomname
frozenlist==1.4.1
# via
# aiohttp
# via config-man
grpcio==1.65.4
# via grpcio-tools
-grpcio-tools==1.48.1
+grpcio-tools==1.48.2
# via -r requirements.in
grpclib==0.4.7
# via -r requirements.in
hyperframe==6.0.1
# via h2
idna==3.7
- # via
- # -r requirements.in
- # yarl
+ # via yarl
jinja2==3.1.4
# via -r requirements.in
markupsafe==2.1.5
# via -r requirements.in
pydantic-core==2.20.1
# via pydantic
+pyrage==1.1.2
+ # via -r requirements.in
pyyaml==6.0.2
# via -r requirements.in
+randomname==0.2.1
+ # via -r requirements.in
+retrying-async==2.0.0
+ # via -r requirements.in
+six==1.16.0
+ # via fire
+termcolor==2.4.0
+ # via fire
typing-extensions==4.12.2
# via
# pydantic