blob: cf09e8fa5b44dcd698ed3fbddb9ee8544deaf71c [file] [log] [blame]
#######################################################################################
# Copyright ETSI Contributors and Others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
import asyncio
from math import ceil
from jsonpath_ng.ext import parse
async def check_workflow_status(self, workflow_name):
self.logger.info(f"check_workflow_status Enter: {workflow_name}")
if not workflow_name:
return False, "Workflow was not launched"
try:
return await self.readiness_loop(
item="workflow",
name=workflow_name,
namespace="osm-workflows",
condition={
"jsonpath_filter": "status.conditions[?(@.type=='Completed')].status",
"value": "True",
},
deleted=False,
timeout=300,
)
except Exception as e:
return False, f"Unexpected exception: {e}"
async def readiness_loop(
self, item, name, namespace, condition, deleted, timeout, kubectl=None
):
if kubectl is None:
kubectl = self._kubectl
self.logger.info("readiness_loop Enter")
self.logger.info(
f"{item} {name}. Namespace: '{namespace}'. Condition: {condition}. Deleted: {deleted}. Timeout: {timeout}"
)
item_api_map = {
"workflow": {
"api_group": "argoproj.io",
"api_plural": "workflows",
"api_version": "v1alpha1",
},
"kustomization": {
"api_group": "kustomize.toolkit.fluxcd.io",
"api_plural": "kustomizations",
"api_version": "v1",
},
"cluster_aws": {
"api_group": "eks.aws.upbound.io",
"api_plural": "clusters",
"api_version": "v1beta1",
},
"cluster_azure": {
"api_group": "containerservice.azure.upbound.io",
"api_plural": "kubernetesclusters",
"api_version": "v1beta1",
},
"cluster_gcp": {
"api_group": "container.gcp.upbound.io",
"api_plural": "clusters",
"api_version": "v1beta2",
},
"nodepool_aws": {
"api_group": "eks.aws.upbound.io",
"api_plural": "nodegroups",
"api_version": "v1beta1",
},
"nodepool_gcp": {
"api_group": "container.gcp.upbound.io",
"api_plural": "nodepools",
"api_version": "v1beta2",
},
}
counter = 1
retry_time = self._odu_checkloop_retry_time
max_iterations = ceil(timeout / retry_time)
api_group = item_api_map[item]["api_group"]
api_plural = item_api_map[item]["api_plural"]
api_version = item_api_map[item]["api_version"]
while counter <= max_iterations:
try:
self.logger.info(f"Iteration {counter}/{max_iterations}")
generic_object = await kubectl.get_generic_object(
api_group=api_group,
api_plural=api_plural,
api_version=api_version,
namespace=namespace,
name=name,
)
if deleted:
if generic_object:
self.logger.info(
f"Found {api_plural}. Name: {name}. Namespace: '{namespace}'. API: {api_group}/{api_version}"
)
else:
self.logger.info(
f"{item} {name} deleted after {counter} iterations (aprox {counter*retry_time} seconds)"
)
return True, "COMPLETED"
else:
if not condition:
return True, "Nothing to check"
if generic_object:
# self.logger.debug(f"{yaml.safe_dump(generic_object)}")
conditions = generic_object.get("status", {}).get("conditions", [])
self.logger.info(f"{item} status conditions: {conditions}")
else:
self.logger.info(
f"Could not find {api_plural}. Name: {name}. Namespace: '{namespace}'. API: {api_group}/{api_version}"
)
conditions = []
jsonpath_expr = parse(condition["jsonpath_filter"])
match = jsonpath_expr.find(generic_object)
if match:
value = match[0].value
condition_function = condition.get("function", lambda x, y: x == y)
if condition_function(condition["value"], value):
self.logger.info(
f"{item} {name} met the condition {condition} in {counter} iterations (aprox {counter*retry_time} seconds)"
)
return True, "COMPLETED"
except Exception as e:
self.logger.error(f"Exception: {e}")
await asyncio.sleep(retry_time)
counter += 1
return (
False,
"{item} {name} was not ready after {max_iterations} iterations (aprox {timeout} seconds)",
)