From: garciadeblas Date: Wed, 3 Jul 2024 07:17:56 +0000 (+0200) Subject: Feature 11019: Workflow for cloud-native operations in OSM following Gitops model X-Git-Tag: release-v16.0-start~16 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=70461c5ebfedb37ed8686b28a91513cf7cb38733;p=osm%2Fdevops.git Feature 11019: Workflow for cloud-native operations in OSM following Gitops model Change-Id: Ie763936b095715669741197e36456d8e644c7456 Signed-off-by: garciadeblas --- diff --git a/docker/osm-krm-functions/Dockerfile b/docker/osm-krm-functions/Dockerfile new file mode 100644 index 00000000..ca7c90f4 --- /dev/null +++ b/docker/osm-krm-functions/Dockerfile @@ -0,0 +1,58 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +FROM alpine:3.20 +# FROM bash:3.1.23-alpine3.20 + +# Ensure compatibility with any script +RUN ln -s /usr/local/bin/bash /bin/bash + +# Install packages available at Alpine repos +RUN apk add --no-cache \ + age \ + bash \ + curl \ + envsubst \ + git \ + kubectl \ + kustomize \ + rsync \ + sops \ + yq +#\ +# apg \ +# gnupg \ +# gpg \ +# openssh-client \ +# sshpass + +# Install other dependencies +RUN (curl -s https://fluxcd.io/install.sh | bash) && \ + curl https://github.com/GoogleContainerTools/kpt/releases/download/v1.0.0-beta.44/kpt_linux_amd64 -Lo kpt && \ + chmod +x kpt && \ + mv kpt /usr/local/bin/ + +# Create new user and log in as it +RUN addgroup -g 10000 -S app && \ + adduser -h /app -s /bin/false -D -u 10000 -S -G app app +USER app +WORKDIR /app + +# Add helper scripts +COPY --chown=app:app scripts/docker-entrypoint.sh /app/scripts/entrypoint.sh +COPY --chown=app:app scripts/library /app/scripts/library + +ENTRYPOINT [ "/app/scripts/entrypoint.sh" ] + +CMD ["bash"] diff --git a/docker/osm-krm-functions/scripts/docker-entrypoint.sh b/docker/osm-krm-functions/scripts/docker-entrypoint.sh new file mode 100755 index 00000000..cab54e4f --- /dev/null +++ b/docker/osm-krm-functions/scripts/docker-entrypoint.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# "Debug mode" variable +DEBUG="${DEBUG:-}" +[[ "${DEBUG,,}" == "true" ]] && set -x + +# If there is an input stream, dumps it into a temporary file and sets it as INFILE +if [[ -n "${INSTREAM}" ]]; +then + # Save input stream to temporary file + TMPFILE=$(mktemp /tmp/INSTREAM.XXXXXXXXXX) || exit 1 + echo "${INSTREAM}" > "${TMPFILE}" + export INFILE="${TMPFILE}" +fi + +# Sets default INPUT and OUTPUT +INFILE="${INFILE:-/dev/stdin}" +OUTFILE="${OUTFILE:-/dev/stdout}" + +# Loads helper functions and KRM functions +source /app/scripts/library/helper-functions.rc +source /app/scripts/library/krm-functions.rc + +# If applicable, loads additional environment variables +if [[ -n "${CUSTOM_ENV}" ]]; +then + set -a + source <(echo "${CUSTOM_ENV}") + set +a +fi + +# In case INFILE and OUTFILE are the same, it uses a temporary output file +if [[ "${INFILE}" == "${OUTFILE}" ]]; +then + TMPOUTFILE="$(mktemp "/results/OUTFILE.XXXXXXXXXX")" || exit 1 +else + TMPOUTFILE="${OUTFILE}" +fi + +#################### EXECUTION #################### +# Debug mode: +if [[ "${DEBUG,,}" == "true" ]]; +then + "$@" < "${INFILE}" | tee "${TMPOUTFILE}" +# Normal mode: +else + "$@" < "${INFILE}" > "${TMPOUTFILE}" +fi +################################################### + +# In case INFILE and OUTFILE are the same, it renames the temporary file over the OUTFILE (i.e., the same as INFILE) +if [[ "${INFILE}" == "${OUTFILE}" ]]; +then + mv -f "${TMPOUTFILE}" "${OUTFILE}" +fi diff --git a/docker/osm-krm-functions/scripts/library/helper-functions.rc b/docker/osm-krm-functions/scripts/library/helper-functions.rc new file mode 100644 index 00000000..29e00ffd --- /dev/null +++ b/docker/osm-krm-functions/scripts/library/helper-functions.rc @@ -0,0 +1,632 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Convert input string to a safe name for K8s resources +function safe_name() { + local INPUT="$1" + + echo "${INPUT,,}" | \ + sed '/\.\// s|./||' | \ + sed 's|\.|-|g' | \ + sed 's|/|-|g' | \ + sed 's|_|-|g' | \ + sed 's| |-|g' +} + + +# Helper function to create a new age key pair +function create_age_keypair() { + local AGE_KEY_NAME="$1" + local CREDENTIALS_DIR="${2:-"${CREDENTIALS_DIR}"}" + + # Delete the keys in case they existed already + rm -f "${CREDENTIALS_DIR}/${AGE_KEY_NAME}.key" "${CREDENTIALS_DIR}/${AGE_KEY_NAME}.pub" + + # Private key + age-keygen -o "${CREDENTIALS_DIR}/${AGE_KEY_NAME}.key" + + # Public key (extracted from comment at private key) + age-keygen -y "${CREDENTIALS_DIR}/${AGE_KEY_NAME}.key" > "${CREDENTIALS_DIR}/${AGE_KEY_NAME}.pub" +} + + +# Helper function to in-place encrypt secrets in manifest +function encrypt_secret_inplace() { + local FILE="$1" + local PUBLIC_KEY="$2" + + sops \ + --age=${PUBLIC_KEY} \ + --encrypt \ + --encrypted-regex '^(data|stringData)$' \ + --in-place "${FILE}" +} + + +# Helper function to encrypt secrets from stdin +function encrypt_secret_from_stdin() { + local PUBLIC_KEY="$1" + + # Save secret manifest to temporary file + local TMPFILE=$(mktemp /tmp/secret.XXXXXXXXXX) || exit 1 + cat > "${TMPFILE}" + # NOTE: Required workaround for busybox's version of `mktemp`, which is quite limited and does not support temporary files with extensions. + # `.yaml` is required for proper `sops` behaviour. + mv "${TMPFILE}" "${TMPFILE}.yaml" + + # Encrypt + sops \ + --age=${PUBLIC_KEY} \ + --encrypt \ + --encrypted-regex '^(data|stringData)$' \ + --in-place "${TMPFILE}.yaml" + + # Outputs the result and removes the temporary file + cat "${TMPFILE}.yaml" && rm -f "${TMPFILE}.yaml" +} + + +# Helper function to create secret manifest and encrypt with public key +function kubectl_encrypt() { + local PUBLIC_KEY="$1" + + # Gathers all optional parameters for transformer funcion (if any) and puts them into an array for further use + local ALL_PARAMS=( "${@}" ) + local PARAMS=( "${ALL_PARAMS[@]:1}" ) + + kubectl \ + "${PARAMS[@]}" | \ + encrypt_secret_from_stdin \ + "${PUBLIC_KEY}" +} + + +# Generator function to convert source folder to `ResourceList` +function folder2list_generator() { + local FOLDER="${1:-}" + local SUBSTENV="${2:-"false"}" + local FILTER="${3:-""}" + + if [[ "${SUBSTENV,,}" == "true" ]]; + then + # Mix input with new generated manifests and replace environment variables + join_lists \ + <(cat) \ + <( + kpt fn source "${FOLDER}" | \ + replace_env_vars "${FILTER}" + ) + else + # Mix input with new generated manifests + join_lists \ + <(cat) \ + <( + kpt fn source "${FOLDER}" + ) + fi + +} + + +# Function to convert source folder to `ResourceList` (no generator) +function folder2list() { + local FOLDER="${1:-}" + + kpt fn source "${FOLDER}" +} + + +# Helper function to convert manifest to `ResourceList` +function manifest2list() { + kustomize cfg cat --wrap-kind ResourceList +} + + +# Helper function to convert `ResourceList` to manifests in folder structure. +# - New folder must be created to render the manifests. +function list2folder() { + local FOLDER="${1:-}" + local DRY_RUN="${2:-${DRY_RUN:-false}}" + + if [[ "${DRY_RUN,,}" == "true" ]]; + then + cat + else + kpt fn sink "${FOLDER}" + fi +} + + +# Helper function to convert `ResourceList` to manifests in folder structure. +# - It copies (cp) the generated files/subfolders over the target folder. +# - Pre-existing files and subfolder structure in target folder is preserved. +function list2folder_cp_over() { + local FOLDER="${1:-}" + local DRY_RUN="${2:-${DRY_RUN:-false}}" + + if [[ "${DRY_RUN,,}" == "true" ]]; + then + cat + else + local TMPFOLDER=$(mktemp -d) || exit 1 + kpt fn sink "${TMPFOLDER}/manifests" + + # Copy the generated files over the target folder + mkdir -p "${FOLDER}/" + cp -r "${TMPFOLDER}/manifests/"* "${FOLDER}/" + + # Delete temporary folder + rm -rf "${TMPFOLDER}" + fi +} + + +# Helper function to convert `ResourceList` to manifests in folder structure. +# - It syncs the generated files/subfolders over the target folder. +# - Pre-existing files and subfolder structure in target folder is deleted if not present in `ResourceList`. +function list2folder_sync_replace() { + local FOLDER="${1:-}" + local DRY_RUN="${2:-${DRY_RUN:-false}}" + + if [[ "${DRY_RUN,,}" == "true" ]]; + then + cat + else + local TMPFOLDER=$(mktemp -d) || exit 1 + kpt fn sink "${TMPFOLDER}/manifests" + + # Copy the generated files over the target folder + mkdir -p "${FOLDER}/" + rsync -arh --exclude ".git" --exclude ".*" --delete \ + "${TMPFOLDER}/manifests/" "${FOLDER}/" + + # Delete temporary folder + rm -rf "${TMPFOLDER}" + fi +} + + +# Helper function to render **SAFELY** a single manifest coming from stdin into a profile, with a proper KSU subfolder +function render_manifest_over_ksu() { + local KSU_NAME="$1" + local TARGET_PROFILE_FOLDER="$2" + local MANIFEST_FILENAME="$3" + + manifest2list | \ + set_filename_to_items \ + "${MANIFEST_FILENAME}" | \ + prepend_folder_path \ + "${KSU_NAME}/" | \ + list2folder_cp_over \ + "${TARGET_PROFILE_FOLDER}" +} + + +# Set filename to `ResourceList` item +function set_filename_to_items() { + local FILENAME="$1" + + yq "(.items[]).metadata.annotations.\"config.kubernetes.io/path\" |= \"${FILENAME}\"" | \ + yq "(.items[]).metadata.annotations.\"internal.config.kubernetes.io/path\" |= \"${FILENAME}\"" +} + + +# Prepend folder path to `ResourceList` +function prepend_folder_path() { + local PREFIX="$1" + + if [[ (-z "${PREFIX}") || ("${PREFIX}" == ".") ]]; + then + cat + else + yq "(.items[]).metadata.annotations.\"config.kubernetes.io/path\" |= \"${PREFIX}\" + ." | \ + yq "(.items[]).metadata.annotations.\"internal.config.kubernetes.io/path\" |= \"${PREFIX}\" + ." + fi +} + + +# Rename file in `ResourceList` +function rename_file_in_items() { + local SOURCE_NAME="$1" + local DEST_NAME="$2" + + yq "(.items[].metadata.annotations | select (.\"config.kubernetes.io/path\" == \"${SOURCE_NAME}\")).\"config.kubernetes.io/path\" = \"${DEST_NAME}\"" | \ + yq "(.items[].metadata.annotations | select (.\"internal.config.kubernetes.io/path\" == \"${SOURCE_NAME}\")).\"internal.config.kubernetes.io/path\" = \"${DEST_NAME}\"" +} + + +# Get value from key in object in `ResourceList` +function get_value_from_resourcelist() { + local KEY_PATH="$1" + local TARGET_FILTERS="${2:-}" + # Example: To get a specific kind ("ProviderConfig") with a specific name ("default"). (TIP: Note the escaped double quotes). + # TARGET_FILTERS="| select(.kind == \"ProviderConfig\") | select(.metadata.name == \"default\")" + + yq "(.items[]${TARGET_FILTERS})${KEY_PATH}" +} + + +# Patch "replace" to item in `ResourceList` +function patch_replace() { + local KEY_PATH="$1" + local VALUE="$2" + local TARGET_FILTERS="${3:-}" + # Example: To only patch a specific kind ("ProviderConfig") with a specific name ("default"). (TIP: Note the escaped double quotes). + # TARGET_FILTERS="| select(.kind == \"ProviderConfig\") | select(.metadata.name == \"default\")" + + yq "(.items[]${TARGET_FILTERS})${KEY_PATH} = \"${VALUE}\"" +} + + +# Add label to item in `ResourceList` +function set_label() { + local KEY="$1" + local VALUE="$2" + local TARGET_FILTERS="${3:-}" + # Example: To only patch a specific kind ("ProviderConfig") with a specific name ("default"). (TIP: Note the escaped double quotes). + # TARGET_FILTERS="| select(.kind == \"ProviderConfig\") | select(.metadata.name == \"default\")" + + yq "(.items[]${TARGET_FILTERS}).metadata.labels.${KEY} = \"${VALUE}\"" +} + + +# Patch which "appends" to list existing in item in `ResourceList` +function patch_add_to_list() { + local KEY_PATH="$1" + local VALUE="$2" + local TARGET_FILTERS="${3:-}" + # Example: To only patch a specific kind ("ProviderConfig") with a specific name ("default"). (TIP: Note the escaped double quotes). + # TARGET_FILTERS="| select(.kind == \"ProviderConfig\") | select(.metadata.name == \"default\")" + + local VALUE_AS_JSON="$(echo "${VALUE}" | yq -o json -I0)" + + yq "(.items[]${TARGET_FILTERS})${KEY_PATH} += ${VALUE_AS_JSON}" +} + + +# Patch which removes from list, existing in item in `ResourceList` +function patch_delete_from_list() { + local KEY_PATH="$1" + local TARGET_FILTERS="${2:-}" + + # local VALUE_AS_JSON="$(echo "${VALUE}" | yq -o json -I0)" + + yq "del((.items[]${TARGET_FILTERS})${KEY_PATH})" +} + + +# Check if an element/value is in a given list, existing in item in `ResourceList` +function is_element_on_list() { + local KEY_PATH="$1" + local VALUE="$2" + local TARGET_FILTERS="${3:-}" + # Example: To only patch a specific kind ("ProviderConfig") with a specific name ("default"). (TIP: Note the escaped double quotes). + # TARGET_FILTERS="| select(.kind == \"ProviderConfig\") | select(.metadata.name == \"default\")" + + TEST_RESULT=$( + cat | \ + yq "(.items[]${TARGET_FILTERS})${KEY_PATH} == \"${VALUE}\"" | grep "true" + ) + + if [[ "${TEST_RESULT}" != "true" ]] + then + echo "false" + else + echo "true" + fi +} + + +# Patch "replace" to item in `ResourceList` using a JSON as value +function patch_replace_inline_json() { + local KEY_PATH="$1" + local VALUE="$2" + local TARGET_FILTERS="${3:-}" + # Example: To only patch a specific kind ("ProviderConfig") with a specific name ("default"). (TIP: Note the escaped double quotes). + # TARGET_FILTERS="| select(.kind == \"ProviderConfig\") | select(.metadata.name == \"default\")" + + VALUE_AS_JSON="$(echo "${VALUE}" | yq -o=json)" yq "(.items[]${TARGET_FILTERS})${KEY_PATH} = strenv(VALUE_AS_JSON)" +} + + +# Delete full object from `ResourceList` +function delete_object() { + local OBJECT_NAME="$1" + local KIND_NAME="$2" + local API_VERSION="${3:-""}" + + # Calculated inputs + if [[ -z "${API_VERSION}" ]] + then + # If `apiVersion` is not specified + local TARGET_FILTER="| select(.kind == \"${KIND_NAME}\") | select(.metadata.name == \"${OBJECT_NAME}\")" + else + # Otherwise, it is taken into account + local TARGET_FILTER="| select(.kind == \"${KIND_NAME}\") | select(.apiVersion == \"${API_VERSION}\") | select(.metadata.name == \"${OBJECT_NAME}\")" + fi + + # Delete object + yq "del((.items[]${TARGET_FILTER}))" +} + + +# Empty transformer function +function noop_transformer() { + cat +} + + +# Add patch to `Kustomization` item in `ResourceList` +function add_patch_to_kustomization() { + local KUSTOMIZATION_NAME="$1" + local FULL_PATCH_CONTENT="$2" + + patch_add_to_list \ + ".spec.patches" \ + "${FULL_PATCH_CONTENT}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${KUSTOMIZATION_NAME}\")" +} + + +# Helper function to produce a JSON Patch as specified in RFC 6902 +function as_json_patch() { + local OPERATION="$1" + local PATCH_PATH="$2" + local VALUES="$3" + + # Convert to JSON dictionary to insert as map instead of string + local VALUES_AS_DICT=$(echo "${VALUES}" | yq -o=json) + + # Generate a patch list + cat < "${CLUSTER_FOLDER}/.sops.yaml" + + # Add also the public SOPS key to the repository so that others who clone the repo can encrypt new files + # NOTE: This file cannot be generated by pure KRM functions since it begins by a dot ('.') + echo "${PUBLIC_KEY_NEW_CLUSTER}" \ + > "${CLUSTER_FOLDER}/.sops.pub.asc" + + # Prepare everything to perform a Flux bootstrap of the new remote cluster from the management cluster. + # Here we also add the `age` private key to the **management cluster** as secret. This one will be used during bootstrap to inject the key into the new cluster + local CLUSTER_AGE_SECRET_NAME=$(safe_name "sops-age-${CLUSTER_KUSTOMIZATION_NAME}") + echo "" | + generator_bootstrap_new_cluster \ + "${CLUSTER_NAME}" \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "${CLUSTER_AGE_SECRET_NAME}" \ + "${SW_CATALOGS_REPO_DIR}" | \ + generator_k8s_age_secret_new_cluster \ + "${PRIVATE_KEY_NEW_CLUSTER}" \ + "${PUBLIC_KEY_MGMT}" \ + "${CLUSTER_AGE_SECRET_NAME}" | \ + prepend_folder_path "${CLUSTER_KUSTOMIZATION_NAME}/" | \ + list2folder_cp_over \ + "${MGMT_RESOURCES_DIR}" +} + + +# Create remote CrossPlane cluster (generic for any cloud) +function create_crossplane_cluster() { + local CLUSTER_KUSTOMIZATION_NAME="$1" + local CLUSTER_NAME="$2" + # As of today, one among `aks`, `eks` or `gke`: + local CLUSTER_TYPE="$3" + local PROVIDERCONFIG_NAME="${4:-default}" + local VM_SIZE="$5" + local NODE_COUNT="$6" + local CLUSTER_LOCATION="$7" + local K8S_VERSION="${8:-"'1.28'"}" + local PUBLIC_KEY_MGMT="${9:-"${PUBLIC_KEY_MGMT}"}" + local PUBLIC_KEY_NEW_CLUSTER="${10}" + local PRIVATE_KEY_NEW_CLUSTER="${11:-"${PRIVATE_KEY_NEW_CLUSTER}"}" + # AKS only + local AKS_RG_NAME="${12:-""}" + # GKE only + local GKE_PREEMPTIBLE_NODES="${13:-""}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${14:-"${FLEET_REPO_DIR}"}" + local FLEET_REPO_URL="${15:-""}" + ## `SW_CATALOGS_REPO_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}" + local SW_CATALOGS_REPO_DIR="${16:-"${SW_CATALOGS_REPO_DIR}"}" + local SW_CATALOGS_REPO_URL="${17:-""}" + # Perform bootstrap unless asked otherwise + local SKIP_BOOTSTRAP="${18:"false"}" + # Only change if absolutely needeed + local MGMT_PROJECT_NAME="${19:-"osm_admin"}" + local MGMT_CLUSTER_NAME="${20:-"_management"}" + local BASE_TEMPLATES_PATH="${21:-"cloud-resources"}" + local TEMPLATE_MANIFEST_FILENAME="${22:-"${CLUSTER_TYPE,,}01.yaml"}" + local MANIFEST_FILENAME="${23:-"${CLUSTER_TYPE,,}-${CLUSTER_NAME}.yaml"}" + + + # Is the provider type supported? + local VALID_PROVIDERS=("eks" "aks" "gke") + CLUSTER_TYPE="${CLUSTER_TYPE,,}" + [[ ! ($(echo ${VALID_PROVIDERS[@]} | grep -w "${CLUSTER_TYPE}")) ]] && return 1 + + # Determines the source dir for the templates and the target folder in Fleet + local TEMPLATES_DIR="${SW_CATALOGS_REPO_DIR}/${BASE_TEMPLATES_PATH}/${CLUSTER_TYPE}/templates" + local TARGET_FOLDER="${FLEET_REPO_DIR}/${MGMT_PROJECT_NAME}/managed-resources/${MGMT_CLUSTER_NAME}" + + # Determine which optional steps may be needed + local IS_AKS=$([[ "${CLUSTER_TYPE}" == "aks" ]]; echo $?) + local IS_GCP=$([[ "${CLUSTER_TYPE}" == "gcp" ]]; echo $?) + + # Pipeline of transformations to create the cluster resource + export CLUSTER_KUSTOMIZATION_NAME + folder2list \ + "${TEMPLATES_DIR}" | \ + replace_env_vars \ + '${CLUSTER_KUSTOMIZATION_NAME}' | \ + patch_replace \ + ".spec.postBuild.substitute.cluster_name" \ + "${CLUSTER_NAME}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + patch_replace \ + ".spec.postBuild.substitute.vm_size" \ + "${VM_SIZE}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + patch_replace \ + ".spec.postBuild.substitute.node_count" \ + "${NODE_COUNT}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + patch_replace \ + ".spec.postBuild.substitute.cluster_location" \ + "${CLUSTER_LOCATION}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + patch_replace \ + ".spec.postBuild.substitute.k8s_version" \ + "${K8S_VERSION}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + patch_replace \ + ".spec.postBuild.substitute.providerconfig_name" \ + "${PROVIDERCONFIG_NAME}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + transform_if \ + "${IS_AKS}" \ + patch_replace \ + ".spec.postBuild.substitute.rg_name" \ + "${AKS_RG_NAME}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + transform_if \ + "${IS_GKE}" \ + patch_replace \ + ".spec.postBuild.substitute.preemptible_nodes" \ + "${GKE_PREEMPTIBLE_NODES}" \ + "| select(.kind == \"Kustomization\") | select(.metadata.name == \"${CLUSTER_KUSTOMIZATION_NAME}\")" | \ + rename_file_in_items \ + "${TEMPLATE_MANIFEST_FILENAME}" \ + "${MANIFEST_FILENAME}" | \ + prepend_folder_path "${CLUSTER_KUSTOMIZATION_NAME}/" | \ + list2folder_cp_over \ + "${TARGET_FOLDER}" + + # Bootstrap (unless asked to skip) + if [[ "${SKIP_BOOTSTRAP,,}" == "true" ]]; then + return 0 + fi + create_bootstrap_for_remote_cluster \ + "${CLUSTER_NAME}" \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "${FLEET_REPO_DIR}" \ + "${SW_CATALOGS_REPO_DIR}" \ + "${FLEET_REPO_URL}" \ + "${SW_CATALOGS_REPO_URL}" \ + "${MGMT_PROJECT_NAME}" \ + "${PUBLIC_KEY_MGMT}" \ + "${PUBLIC_KEY_NEW_CLUSTER}" \ + "${PRIVATE_KEY_NEW_CLUSTER}" +} + + +# Delete remote cluster (generic for any cloud) +function delete_remote_cluster() { + local CLUSTER_KUSTOMIZATION_NAME="$1" + local PROJECT_NAME="${2:-"${MGMT_PROJECT_NAME}"}" + local FLEET_REPO_DIR="${3:-"${FLEET_REPO_DIR}"}" + local MGMT_RESOURCES_DIR="${4:-"${MGMT_RESOURCES_DIR}"}" + + # Optional inputs: Paths for each profile in the Git repo + local INFRA_CONTROLLERS_DIR="${5:-"${FLEET_REPO_DIR}/${PROJECT_NAME}/infra-controller-profiles/${CLUSTER_KUSTOMIZATION_NAME}"}" + local INFRA_CONFIGS_DIR="${6:-"${FLEET_REPO_DIR}/${PROJECT_NAME}/infra-config-profiles/${CLUSTER_KUSTOMIZATION_NAME}"}" + local MANAGED_RESOURCES_DIR="${7:-"${FLEET_REPO_DIR}/${PROJECT_NAME}/managed-resources/${CLUSTER_KUSTOMIZATION_NAME}"}" + local APPS_DIR="${8:-"${FLEET_REPO_DIR}/${PROJECT_NAME}/app-profiles/${CLUSTER_KUSTOMIZATION_NAME}"}" + local CLUSTER_DIR="${9:-"${FLEET_REPO_DIR}/clusters/${CLUSTER_KUSTOMIZATION_NAME}"}" + + # Delete profile folders + rm -rf "${INFRA_CONTROLLERS_DIR}" + rm -rf "${INFRA_CONFIGS_DIR}" + rm -rf "${MANAGED_RESOURCES_DIR}" + rm -rf "${APPS_DIR}" + + # Delete base cluster Kustomizations + rm -rf "${CLUSTER_DIR}" + + # Delete cluster resources + rm -rf "${MGMT_RESOURCES_DIR}/${CLUSTER_KUSTOMIZATION_NAME}" +} + + +# Update remote CrossPlane cluster (generic for any cloud) +function update_crossplane_cluster() { + local CLUSTER_KUSTOMIZATION_NAME="$1" + local CLUSTER_NAME="$2" + # As of today, one among `aks`, `eks` or `gke`: + local CLUSTER_TYPE="$3" + local PROVIDERCONFIG_NAME="${4:-default}" + local VM_SIZE="$5" + local NODE_COUNT="$6" + local CLUSTER_LOCATION="$7" + local K8S_VERSION="${8:-"'1.28'"}" + local PUBLIC_KEY_MGMT="${9:-"${PUBLIC_KEY_MGMT}"}" + local PUBLIC_KEY_NEW_CLUSTER="${10}" + local PRIVATE_KEY_NEW_CLUSTER="${11:-"${PRIVATE_KEY_NEW_CLUSTER}"}" + # AKS only + local AKS_RG_NAME="${12:-""}" + # GKE only + local GKE_PREEMPTIBLE_NODES="${13:-""}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${14:-"${FLEET_REPO_DIR}"}" + local FLEET_REPO_URL="${15:-""}" + ## `SW_CATALOGS_REPO_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}" + local SW_CATALOGS_REPO_DIR="${16:-"${SW_CATALOGS_REPO_DIR}"}" + local SW_CATALOGS_REPO_URL="${17:-""}" + # Prevent a new bootstrap by default + local SKIP_BOOTSTRAP="${18:"true"}" + # Only change if absolutely needeed + local MGMT_PROJECT_NAME="${19:-"osm_admin"}" + local MGMT_CLUSTER_NAME="${20:-"_management"}" + local BASE_TEMPLATES_PATH="${21:-"cloud-resources"}" + local TEMPLATE_MANIFEST_FILENAME="${22:-"${CLUSTER_TYPE,,}01.yaml"}" + local MANIFEST_FILENAME="${23:-"${CLUSTER_TYPE,,}-${CLUSTER_NAME}.yaml"}" + + + # Is the provider type supported? + local VALID_PROVIDERS=("eks" "aks" "gke") + CLUSTER_TYPE="${CLUSTER_TYPE,,}" + [[ ! ($(echo ${VALID_PROVIDERS[@]} | grep -w "${CLUSTER_TYPE}")) ]] && return 1 + + # Determine key folders in Fleet + local MGMT_RESOURCES_DIR="${FLEET_REPO_DIR}/${MGMT_PROJECT_NAME}/managed-resources/${MGMT_CLUSTER_NAME}" + + # First, delete cluster's CrossPlane resources + # NOTE: We only delete de Kustomization referring to CrossPlane resources, + # not the bootstrap resources or the profiles. Thus we avoid that KSUs are + # affected or a potential second unnecesary bootstrap. + rm -rf "${MGMT_RESOURCES_DIR}/${CLUSTER_KUSTOMIZATION_NAME}/${MANIFEST_FILENAME}" + + # Then, recreate the manifests with updated values + create_crossplane_cluster \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "${CLUSTER_NAME}" \ + "${CLUSTER_TYPE}" \ + "${PROVIDERCONFIG_NAME}" \ + "${VM_SIZE}" \ + "${NODE_COUNT}" \ + "${CLUSTER_LOCATION}" \ + "${K8S_VERSION}" \ + "${PUBLIC_KEY_MGMT}" \ + "${PUBLIC_KEY_NEW_CLUSTER}" \ + "${PRIVATE_KEY_NEW_CLUSTER}" \ + "${AKS_RG_NAME}" \ + "${GKE_PREEMPTIBLE_NODES}" \ + "${FLEET_REPO_DIR}" \ + "${FLEET_REPO_URL}" \ + "${SW_CATALOGS_REPO_DIR}" \ + "${SW_CATALOGS_REPO_URL}" \ + "${SKIP_BOOTSTRAP}" \ + "${MGMT_PROJECT_NAME}" \ + "${MGMT_CLUSTER_NAME}" \ + "${BASE_TEMPLATES_PATH}" \ + "${TEMPLATE_MANIFEST_FILENAME}" \ + "${MANIFEST_FILENAME}" +} + + +# ----- Helper functions for adding/removing a profile from a cluster ----- + +# Helper function to find profiles of a given type already used in the cluster +function profiles_of_type_in_cluster() { + local CLUSTER_KUSTOMIZATION_NAME="$1" + local RELEVANT_PROFILE_TYPE="$2" + local FLEET_REPO_DIR="${3:-"${FLEET_REPO_DIR}"}" + + # Calculated fields + local CLUSTER_FOLDER="${FLEET_REPO_DIR}/clusters/${CLUSTER_KUSTOMIZATION_NAME}" + + # Processing (echoes the list) + folder2list \ + "${CLUSTER_FOLDER}" | \ + get_value_from_resourcelist \ + ".metadata.name" \ + "| select(.kind == \"Kustomization\") + | select(.metadata.labels.osm_profile_type == \"${RELEVANT_PROFILE_TYPE}\")" | \ + multiline2commalist +} + + +# Function to list the profiles **this profile depends on** +function profiles_this_one_depends_on() { + local CLUSTER_KUSTOMIZATION_NAME="$1" + local PROFILE_TYPE="$2" + local FLEET_REPO_DIR="${3:-"${FLEET_REPO_DIR}"}" + + case "${PROFILE_TYPE,,}" in + + "controller" | "infra-controller" | "infra-controllers" | "infra_controller" | "infra_controllers") + # Controllers do not depend on any other type of profiles + echo "" + return 0 + ;; + + "config" | "infra-config" | "infra-configs" | "infra_config" | "infra_configs") + # Infra configs depend on controllers + profiles_of_type_in_cluster \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "infra-controllers" \ + "${FLEET_REPO_DIR}" + return 0 + ;; + + "managed" | "resources" | "managed-resources" | "managed_resources") + # Managed resources depend on infra configs + profiles_of_type_in_cluster \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "infra-configs" \ + "${FLEET_REPO_DIR}" + return 0 + ;; + + "app" |"apps" | "applications" | "cnf" | "cnfs" | "nf" | "nfs") + # Apps (also) depend on infra configs + profiles_of_type_in_cluster \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "infra-configs" \ + "${FLEET_REPO_DIR}" + return 0 + ;; + + *) + echo -n "------------ ERROR ------------" + return 1 + ;; + esac +} + + +# Function to list the profiles that **depend on this profile** +function profiles_depend_on_this_one() { + local CLUSTER_KUSTOMIZATION_NAME="$1" + local PROFILE_TYPE="$2" + local FLEET_REPO_DIR="${3:-"${FLEET_REPO_DIR}"}" + + case "${PROFILE_TYPE,,}" in + + "controller" | "infra-controller" | "infra-controllers" | "infra_controller" | "infra_controllers") + # Infra configs depend on infra controllers + profiles_of_type_in_cluster \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "infra-configs" \ + "${FLEET_REPO_DIR}" + return 0 + ;; + + "config" | "infra-config" | "infra-configs" | "infra_config" | "infra_configs") + # Both managed resources and apps depend on configs + local PROFILES=( + $( + profiles_of_type_in_cluster \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "managed-resources" \ + "${FLEET_REPO_DIR}" + ) \ + $( + profiles_of_type_in_cluster \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "apps" \ + "${FLEET_REPO_DIR}" + ) + ) + printf '%s,' "${PROFILES[@]}" | sed 's/,$//g' + return 0 + ;; + + "managed" | "resources" | "managed-resources" | "managed_resources") + # No other profiles depend on managed resources + echo "" + return 0 + ;; + + "app" |"apps" | "applications" | "cnf" | "cnfs" | "nf" | "nfs") + # No other profiles depend on apps + echo "" + return 0 + ;; + + *) + echo -n "------------ ERROR ------------" + return 1 + ;; + esac +} + + +# Helper function to add a dependency to a Kustomization only if it does not exist already +function add_dependency_to_kustomization_safely() { + local KUSTOMIZATION_NAME="$1" + local KUSTOMIZATION_TO_ADD_AS_DEP="$2" + + local INPUT=$(cat) + local FILTER="| select(.kind == \"Kustomization\") | select(.metadata.name == \"${KUSTOMIZATION_NAME}\")" + + # Check if the dependency was added already + local TEST_RESULT=$( + echo "${INPUT}" | \ + is_element_on_list \ + ".spec.dependsOn[].name" \ + "${KUSTOMIZATION_TO_ADD_AS_DEP}" \ + "${FILTER}" + ) + + # If it existed already, returns the stream as is + if [[ "${TEST_RESULT}" == "true" ]] + then + echo "${INPUT}" + # Otherwise, processes the stream to add it + else + echo "${INPUT}" | \ + patch_add_to_list \ + ".spec.dependsOn" \ + "{name: ${KUSTOMIZATION_TO_ADD_AS_DEP}}" \ + "${FILTER}" + fi +} + + +# Helper function to remove a dependency from a Kustomization +function remove_dependency_from_kustomization_safely() { + local KUSTOMIZATION_NAME="$1" + local KUSTOMIZATION_TO_REMOVE_AS_DEP="$2" + + # Calculated inputs + local KEY_PATH=".spec.dependsOn[] | select(.name == \"${KUSTOMIZATION_TO_REMOVE_AS_DEP}\")" + local FILTER="| select(.kind == \"Kustomization\") | select(.metadata.name == \"${KUSTOMIZATION_NAME}\")" + + # Remove the entry from the dependency list (if it exists) + yq "del((.items[]${FILTER})${KEY_PATH})" +} + + +# Ensure list of Kustomizations depend on a given Kustomization +function add_dependency_to_set_of_kustomizations_safely() { + local KS_NAME="$1" + local THEY_DEPEND_ON_THIS="$2" + + local INPUT="$(cat)" + local OUTPUT="" + + # For each of the Kustomizations on the comma-separated list, adds `KS_NAME` as one of their dependencies + for KUST in ${THEY_DEPEND_ON_THIS//,/ } + do + local OUTPUT="$( + echo "${INPUT}" | \ + add_dependency_to_kustomization_safely \ + "${KUST}" \ + "${KS_NAME}" + )" + local INPUT="${OUTPUT}" + done + + # Return the final `ResultList`, after all iterations + echo "${OUTPUT}" +} + + +# Ensure list of Kustomizations no longer depend on a given Kustomization +function remove_dependency_from_set_of_kustomizations_safely() { + local KS_NAME="$1" + local THEY_NO_LONGER_DEPEND_ON_THIS="$2" + + local INPUT="$(cat)" + local OUTPUT="" + + # For each of the Kustomizations on the comma-separated list, removes `KS_NAME` from their dependencies + for KUST in ${THEY_NO_LONGER_DEPEND_ON_THIS//,/ } + do + local OUTPUT="$( + echo "${INPUT}" | \ + remove_dependency_from_kustomization_safely \ + "${KUST}" \ + "${KS_NAME}" + )" + local INPUT="${OUTPUT}" + done + + # Return the final `ResultList`, after all iterations + echo "${OUTPUT}" +} + +# ----- END of Helper functions for adding/removing a profile from a cluster ----- + + +# Add an existing profile to a cluster +function attach_profile_to_cluster() { + local PROFILE_NAME="$1" + local PROFILE_TYPE="$2" + local PROJECT_NAME="$3" + local CLUSTER_KUSTOMIZATION_NAME="$4" + local FLEET_REPO_DIR="${5:-"${FLEET_REPO_DIR}"}" + + # Calculated inputs + local CLUSTER_FOLDER="${FLEET_REPO_DIR}/clusters/${CLUSTER_KUSTOMIZATION_NAME}" + local TARGET_PROFILE_PATH="$( + path_to_profile \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" + )" + + # Finds out which profiles it should depend on... and which profiles should depend on it + local DEPENDS_ON=$( + profiles_this_one_depends_on \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "${PROFILE_TYPE}" \ + "${FLEET_REPO_DIR}" + ) + + local THEY_DEPEND_ON_THIS=$( + profiles_depend_on_this_one \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "${PROFILE_TYPE}" \ + "${FLEET_REPO_DIR}" + ) + + # Parameters for the new Kustomization object to point to the profile + local KS_NAME="$(safe_name "${PROFILE_TYPE}-${PROFILE_NAME}")" + local MANIFEST_FILENAME="${KS_NAME}.yaml" + local KS_NS=flux-system + local MANIFESTS_PATH="${TARGET_PROFILE_PATH}" + local SOURCE_REPO=GitRepository/fleet-repo.flux-system + local SOURCE_SYNC_INTERVAL="60m" + local HEALTH_CHECK_TO="3m" + local RETRY_INTERVAL="1m" + local TIMEOUT="5m" + local OPTIONS="\ + --decryption-provider=sops \ + --decryption-secret=sops-age \ + --prune=true \ + --timeout="${TIMEOUT}" \ + --retry-interval="${RETRY_INTERVAL}" \ + --label osm_profile_type="${PROFILE_TYPE}" + " + + # Finally, we update the folder with all the required changes: + # - Update pre-existing Kustomizations that should depend on the new profile (besides others). + # - Create a new Kustomization pointing to the profile. + # - Update Kustomize's `kustomization.yaml` at the root of the cluster folder to take into account the new Kustomization pointing to the profile. + # - Update the cluster folder accordingly. + folder2list \ + "${CLUSTER_FOLDER}" | + add_dependency_to_set_of_kustomizations_safely \ + "${KS_NAME}" \ + "${THEY_DEPEND_ON_THIS}" | \ + generator_kustomization \ + "${MANIFEST_FILENAME}" \ + "${KS_NAME}" \ + "${KS_NS}" \ + "${SOURCE_REPO}" \ + "${MANIFESTS_PATH}" \ + "${SOURCE_SYNC_INTERVAL}" \ + "${HEALTH_CHECK_TO}" \ + "${DEPENDS_ON}" \ + "${OPTIONS}" | \ + patch_add_to_list \ + ".resources" \ + "${MANIFEST_FILENAME}" \ + "| select(.kind == \"Kustomization\") | select(.apiVersion == \"kustomize.config.k8s.io/v1beta1\") | select(.metadata.annotations.\"config.kubernetes.io/path\" == \"kustomization.yaml\")" | \ + list2folder_sync_replace \ + "${CLUSTER_FOLDER}" +} + + +# Remove an existing profile from a cluster +function detach_profile_from_cluster() { + local PROFILE_NAME="$1" + local PROFILE_TYPE="$2" + local PROJECT_NAME="$3" + local CLUSTER_KUSTOMIZATION_NAME="$4" + local FLEET_REPO_DIR="${5:-"${FLEET_REPO_DIR}"}" + + # Calculated inputs + local CLUSTER_FOLDER="${FLEET_REPO_DIR}/clusters/${CLUSTER_KUSTOMIZATION_NAME}" + local TARGET_PROFILE_PATH="$( + path_to_profile \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" + )" + + # Finds out which profiles still depend on it + local THEY_DEPEND_ON_THIS=$( + profiles_depend_on_this_one \ + "${CLUSTER_KUSTOMIZATION_NAME}" \ + "${PROFILE_TYPE}" \ + "${FLEET_REPO_DIR}" + ) + + # Parameters for the new Kustomization object to point to the profile + local KS_NAME="$(safe_name "${PROFILE_TYPE}-${PROFILE_NAME}")" + + # Finally, we update the folder with all the required changes: + # - Update pre-existing Kustomizations that should depend on the new profile (besides others). + # - Create a new Kustomization pointing to the profile. + # - Update Kustomize's `kustomization.yaml` at the root of the cluster folder so that it no longer tries to gather the Kustomization pointing to the profile. + # - Update the cluster folder accordingly. + folder2list \ + "${CLUSTER_FOLDER}" | + remove_dependency_from_set_of_kustomizations_safely \ + "${KS_NAME}" \ + "${THEY_DEPEND_ON_THIS}" | \ + delete_object \ + "${KS_NAME}" \ + "Kustomization" \ + "kustomize.toolkit.fluxcd.io/v1" | \ + patch_delete_from_list \ + ".resources[] | select(. == \"${MANIFEST_FILENAME}\") " \ + "| select(.kind == \"Kustomization\") | select(.apiVersion == \"kustomize.config.k8s.io/v1beta1\") | select(.metadata.annotations.\"config.kubernetes.io/path\" == \"kustomization.yaml\")" | \ + list2folder_sync_replace \ + "${CLUSTER_FOLDER}" +} + + +# Low-level function to add a KSU into a profile +function create_ksu_into_profile() { + local KSU_NAME="$1" + local TARGET_PROFILE_FOLDER="$2" + local TEMPLATES_PATH="$3" + local SW_CATALOGS_REPO_DIR="$4" + local TRANSFORMER="${5:-noop_transformer}" + + # Gathers all optional parameters for transformer funcion (if any) and puts them into an array for further use + local ALL_PARAMS=( "${@}" ) + local TRANSFORMER_ARGS=( "${ALL_PARAMS[@]:5}" ) + + # Composes the route to the local templates folder + local TEMPLATES_FOLDER="${SW_CATALOGS_REPO_DIR}/${TEMPLATES_PATH}" + + folder2list \ + "${TEMPLATES_FOLDER}" | \ + "${TRANSFORMER}" \ + "${TRANSFORMER_ARGS[@]}" | \ + prepend_folder_path "${KSU_NAME}/" | \ + list2folder_cp_over \ + "${TARGET_PROFILE_FOLDER}" +} + + +# Function to render a KSU from a `ResourceList` into a profile +function render_ksu_into_profile() { + local KSU_NAME="$1" + local PROFILE_NAME="$2" + local PROFILE_TYPE="$3" + local PROJECT_NAME="${4:-"${MGMT_PROJECT_NAME}"}" + local FLEET_REPO_DIR="$5" + local SYNC="${6:-"false"}" + + local TARGET_PROFILE_PATH=$( + path_to_profile \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" + ) + + local TARGET_PROFILE_FOLDER="${FLEET_REPO_DIR}/${TARGET_PROFILE_PATH}" + + # Determines the appropriate function depending on rendering strategy + # - Sync (and potentially delete files in target folder) + # - Copy over (only overwrite changed files, keep the rest) + RENDERER="" + if [[ ${SYNC,,} == "true" ]]; + then + RENDERER="list2folder_sync_replace" + else + RENDERER="list2folder_cp_over" + fi + + # Render with the selected strategy + [[ "${DRY_RUN,,}" != "true" ]] && mkdir -p "${TARGET_PROFILE_FOLDER}/${KSU_NAME}" + "${RENDERER}" \ + "${TARGET_PROFILE_FOLDER}/${KSU_NAME}" + ## This is improves the behaviour of the following code, + ## since avoids unintented deletions in parent folder due to sync + # prepend_folder_path "${KSU_NAME}/" | \ + # "${RENDERER}" \ + # "${TARGET_PROFILE_FOLDER}" +} + + +# High-level function to add a KSU into a profile for the case where +# 1. It is originated from an OKA, and +# 2. It is based on a HelmRelease. +function create_hr_ksu_into_profile() { + # Base KSU generation from template + ## `TEMPLATES_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}/{{inputs.parameters.templates_path}}" + local TEMPLATES_DIR="$1" + local SUBSTITUTE_ENVIRONMENT="${2:-"false"}" + local SUBSTITUTION_FILTER="${3:-""}" + local CUSTOM_ENV_VARS="${4:-""}" + # Patch HelmRelease in KSU with inline values + local KUSTOMIZATION_NAME="$5" + local HELMRELEASE_NAME="$6" + local INLINE_VALUES="${7:-""}" + # Secret reference and generation (if required) + local IS_PREEXISTING_SECRET="${8:-"false"}" + local TARGET_NS="$9" + local VALUES_SECRET_NAME="${10}" + local SECRET_KEY="${11:-"values.yaml"}" + local AGE_PUBLIC_KEY="${12}" + ## `SECRET_VALUES` will be obtained from the + ## secret named after the input parameter `reference_secret_for_values`, + ## and from the key named after the input parameter `reference_key_for_values` + local LOCAL_SECRET_VALUES="${13:-"${SECRET_VALUES}"}" + # ConfigMap reference and generation (if required) + local IS_PREEXISTING_CM="${14:-"false"}" + local VALUES_CM_NAME="${15:-""}" + local CM_KEY="${16:-""}" + local CM_VALUES="${17:-""}" + # KSU rendering + local KSU_NAME="${18}" + local PROFILE_NAME="${19}" + local PROFILE_TYPE="${20}" + local PROJECT_NAME="${21:-"osm_admin"}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${22:-"/fleet/fleet-osm/"}" + local SYNC="${23:-"true"}" + + # Decides which steps may be skipped + HAS_INLINE_VALUES=$([[ -n "${INLINE_VALUES}" ]]; echo $?) + HAS_REFERENCES=$([[ ( -n "${VALUES_SECRET_NAME}" ) || ( -n "${VALUES_CM_NAME}" ) ]]; echo $?) + NEEDS_NEW_SECRET=$([[ ( -n "${VALUES_SECRET_NAME}" ) && ( "${IS_PREEXISTING_SECRET,,}" == "false" ) ]]; echo $?) + NEEDS_NEW_CM=$([[ ( -n "${VALUES_CM_NAME}" ) && ( "${IS_PREEXISTING_CM,,}" == "false" ) ]]; echo $?) + ECHO_RESOURCELIST=$([[ "${DEBUG,,}" == "true" ]]; echo $?) + + # If applicable, loads additional environment variables + if [[ -n "${CUSTOM_ENV_VARS}" ]]; + then + set -a + source <(echo "${CUSTOM_ENV_VARS}") + set +a + fi + + # Runs workflow + folder2list_generator \ + "${TEMPLATES_DIR}" \ + "${SUBSTITUTE_ENVIRONMENT}" \ + "${SUBSTITUTION_FILTER}" | \ + transform_if \ + "${HAS_INLINE_VALUES}" \ + add_values_to_helmrelease_via_ks \ + "${KUSTOMIZATION_NAME}" \ + "${HELMRELEASE_NAME}" \ + "${INLINE_VALUES}" | \ + transform_if \ + "${HAS_REFERENCES}" \ + add_ref_values_to_hr_via_ks \ + "${KUSTOMIZATION_NAME}" \ + "${HELMRELEASE_NAME}" \ + "${VALUES_SECRET_NAME}" \ + "${VALUES_CM_NAME}" | \ + transform_if \ + "${NEEDS_NEW_SECRET}" \ + make_generator \ + "hr-values-secret.yaml" \ + kubectl_encrypt \ + "${AGE_PUBLIC_KEY}" \ + create \ + secret \ + generic \ + "${VALUES_SECRET_NAME}" \ + --namespace="${TARGET_NS}" \ + --from-file="${SECRET_KEY}"=<(echo "${LOCAL_SECRET_VALUES}") \ + -o=yaml \ + --dry-run=client | \ + transform_if \ + "${NEEDS_NEW_CM}" \ + make_generator \ + "hr-values-configmap.yaml" \ + kubectl \ + create \ + configmap \ + "${VALUES_CM_NAME}" \ + --namespace="${TARGET_NS}" \ + --from-file="${SECRET_KEY}"=<(echo "${CM_VALUES}") \ + -o=yaml \ + --dry-run=client | \ + transform_if \ + "${ECHO_RESOURCELIST}" \ + tee /dev/stderr | \ + render_ksu_into_profile \ + "${KSU_NAME}" \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" \ + "${FLEET_REPO_DIR}" \ + "${SYNC}" +} + + +# High-level function to update a KSU for the case where +# 1. It is originated from an OKA, and +# 2. It is based on a HelmRelease. +# NOTE: It is an alias of `create_hr_ksu_into_profile`, setting `sync` to true +function update_hr_ksu_into_profile() { + # Base KSU generation from template + ## `TEMPLATES_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}/{{inputs.parameters.templates_path}}" + local TEMPLATES_DIR="$1" + local SUBSTITUTE_ENVIRONMENT="${2:-"false"}" + local SUBSTITUTION_FILTER="${3:-""}" + local CUSTOM_ENV_VARS="${4:-""}" + # Patch HelmRelease in KSU with inline values + local KUSTOMIZATION_NAME="$5" + local HELMRELEASE_NAME="$6" + local INLINE_VALUES="${7:-""}" + # Secret reference and generation (if required) + local IS_PREEXISTING_SECRET="${8:-"false"}" + local TARGET_NS="$9" + local VALUES_SECRET_NAME="${10}" + local SECRET_KEY="${11:-"values.yaml"}" + local AGE_PUBLIC_KEY="${12}" + ## `SECRET_VALUES` will be obtained from the + ## secret named after the input parameter `reference_secret_for_values`, + ## and from the key named after the input parameter `reference_key_for_values` + local LOCAL_SECRET_VALUES="${13:-"${SECRET_VALUES}"}" + # ConfigMap reference and generation (if required) + local IS_PREEXISTING_CM="${14:-"false"}" + local VALUES_CM_NAME="${15:-""}" + local CM_KEY="${16:-""}" + local CM_VALUES="${17:-""}" + # KSU rendering + local KSU_NAME="${18}" + local PROFILE_NAME="${19}" + local PROFILE_TYPE="${20}" + local PROJECT_NAME="${21:-"osm_admin"}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${22:-"/fleet/fleet-osm/"}" + # local SYNC="${23:-"true"}" + + + # This function is just an alias of `create_hr_ksu_into_profile` + # forcing synchronization over the KSU folder + create_hr_ksu_into_profile \ + "${TEMPLATES_DIR}" \ + "${SUBSTITUTE_ENVIRONMENT}" \ + "${SUBSTITUTION_FILTER}" \ + "${CUSTOM_ENV_VARS}" \ + "${KUSTOMIZATION_NAME}" \ + "${HELMRELEASE_NAME}" \ + "${INLINE_VALUES}" \ + "${IS_PREEXISTING_SECRET}" \ + "${TARGET_NS}" \ + "${VALUES_SECRET_NAME}" \ + "${SECRET_KEY}" \ + "${AGE_PUBLIC_KEY}" \ + "${LOCAL_SECRET_VALUES}" \ + "${IS_PREEXISTING_CM}" \ + "${VALUES_CM_NAME}" \ + "${CM_KEY}" \ + "${CM_VALUES}" \ + "${KSU_NAME}" \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" \ + "${FLEET_REPO_DIR}" \ + "true" +} + + +# High-level function to create a "generated" KSU into a profile when: +# 1. There is no template (OKA) available. +# 2. The SW is based on a Helm Chart that we want to deploy. +function create_generated_ksu_from_helm_into_profile() { + # HelmRelease generation + local HELMRELEASE_NAME="$1" + local CHART_NAME="$2" + local CHART_VERSION="$3" + local TARGET_NS="$4" + local CREATE_NS="${5:-"true"}" + # Repo source generation + local IS_PREEXISTING_REPO="${6:-"false"}" + local HELMREPO_NAME="$7" + local HELMREPO_URL="${8:-""}" + local HELMREPO_NS="${9:-"${TARGET_NS}"}" + local HELMREPO_SECRET_REF="${10:-""}" + # HelmRelease inline values (if any) + local INLINE_VALUES="${11:-""}" + # Secret reference and generation (if required) + local IS_PREEXISTING_SECRET="${12:-"false"}" + local VALUES_SECRET_NAME="${13}" + local SECRET_KEY="${14:-"values.yaml"}" + local AGE_PUBLIC_KEY="${15}" + ## `SECRET_VALUES` will be obtained from the + ## secret named after the input parameter `reference_secret_for_values`, + ## and from the key named after the input parameter `reference_key_for_values` + local LOCAL_SECRET_VALUES="${16:-"${SECRET_VALUES}"}" + # ConfigMap reference and generation (if required) + local IS_PREEXISTING_CM="${17:-"false"}" + local VALUES_CM_NAME="${18:-""}" + local CM_KEY="${19:-""}" + local CM_VALUES="${20:-""}" + # KSU rendering + local KSU_NAME="${21}" + local PROFILE_NAME="${22}" + local PROFILE_TYPE="${23}" + local PROJECT_NAME="${24:-"osm_admin"}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${25:-"/fleet/fleet-osm/"}" + # By default, it will not syncronize, so that we can easily accumulate more than + # one Helm chart into the same KSU if desired + local SYNC="${26:-"false"}" + + # Decides which steps may be skipped + local NEEDS_NEW_NS=$([[ "${CREATE_NS,,}" == "true" ]]; echo $?) + local NEEDS_NEW_REPO_SOURCE=$([[ "${IS_PREEXISTING_REPO,,}" == "false" ]]; echo $?) + local NEEDS_NEW_SECRET=$([[ ( -n "${VALUES_SECRET_NAME}" ) && ( "${IS_PREEXISTING_SECRET,,}" == "false" ) ]]; echo $?) + local NEEDS_NEW_CM=$([[ ( -n "${VALUES_CM_NAME}" ) && ( "${IS_PREEXISTING_CM,,}" == "false" ) ]]; echo $?) + local ECHO_RESOURCELIST=$([[ "${DEBUG,,}" == "true" ]]; echo $?) + + # Determine extra options for HelmRelease creation and define full command + OPTION_CHART_VERSION="" + [[ -n "${CHART_VERSION}" ]] && OPTION_CHART_VERSION='--chart-version=${CHART_VERSION}' + OPTION_INLINE_VALUES="" + [[ -n "${INLINE_VALUES}" ]] && OPTION_INLINE_VALUES='--values=<( + echo "${INLINE_VALUES}" + )' + OPTION_REFERENCE_SECRET="" + [[ -n "${VALUES_SECRET_NAME}" ]] && OPTION_REFERENCE_SECRET='--values-from=Secret/${VALUES_SECRET_NAME}' + OPTION_REFERENCE_CM="" + [[ -n "${VALUES_CM_NAME}" ]] && OPTION_REFERENCE_CM='--values-from=ConfigMap/${VALUES_CM_NAME}' + + export HR_COMMAND="\ + flux \ + -n "${TARGET_NS}" \ + create hr "${HELMRELEASE_NAME}" \ + --chart="${CHART_NAME}" \ + --source=HelmRepository/"${HELMREPO_NAME}.${HELMREPO_NS}" \ + "${OPTION_CHART_VERSION}" \ + "${OPTION_INLINE_VALUES}" \ + "${OPTION_REFERENCE_SECRET}" \ + "${OPTION_REFERENCE_CM}" \ + --export + " + + # Determine extra options for Helm source repo creation and define full command + OPTION_REPO_SECRET="" + [[ -n "${HELMREPO_SECRET_REF}" ]] && OPTION_REPO_SECRET='--secret-ref=${HELMREPO_SECRET_REF}' + + export REPO_COMMAND="\ + flux \ + -n "${HELMREPO_NS}" \ + create source helm "${HELMREPO_NAME}" \ + --url="${HELMREPO_URL}" \ + "${OPTION_REPO_SECRET}" \ + --export + " + + # Runs workflow + echo "" | \ + make_generator \ + "helm-release.yaml" \ + eval "${HR_COMMAND}" | \ + transform_if \ + "${NEEDS_NEW_NS}" \ + make_generator \ + "ns-for-hr.yaml" \ + kubectl \ + create \ + namespace \ + "${TARGET_NS}" \ + -o=yaml \ + --dry-run=client | \ + transform_if \ + "${NEEDS_NEW_REPO_SOURCE}" \ + make_generator \ + "helm-repo.yaml" \ + eval "${REPO_COMMAND}" | \ + transform_if \ + "${NEEDS_NEW_SECRET}" \ + make_generator \ + "hr-values-secret.yaml" \ + kubectl_encrypt \ + "${AGE_PUBLIC_KEY}" \ + create \ + secret \ + generic \ + "${VALUES_SECRET_NAME}" \ + --namespace="${TARGET_NS}" \ + --from-file="${SECRET_KEY}"=<(echo "${LOCAL_SECRET_VALUES}") \ + -o=yaml \ + --dry-run=client | \ + transform_if \ + "${NEEDS_NEW_CM}" \ + make_generator \ + "hr-values-configmap.yaml" \ + kubectl \ + create \ + configmap \ + "${VALUES_CM_NAME}" \ + --namespace="${TARGET_NS}" \ + --from-file="${SECRET_KEY}"=<(echo "${CM_VALUES}") \ + -o=yaml \ + --dry-run=client | \ + transform_if \ + "${ECHO_RESOURCELIST}" \ + tee /dev/stderr | \ + render_ksu_into_profile \ + "${KSU_NAME}" \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" \ + "${FLEET_REPO_DIR}" \ + "${SYNC}" +} + + +# High-level function to update a "generated" KSU: +# 1. There is no template (OKA) available. +# 2. The SW is based on a Helm Chart that we want to deploy. +# NOTE: It is an alias of `create_generated_ksu_from_helm_into_profile`, setting `sync` to true +function update_generated_ksu_from_helm_into_profile() { + # HelmRelease generation + local HELMRELEASE_NAME="$1" + local CHART_NAME="$2" + local CHART_VERSION="$3" + local TARGET_NS="$4" + local CREATE_NS="${5:-"true"}" + # Repo source generation + local IS_PREEXISTING_REPO="${6:-"false"}" + local HELMREPO_NAME="$7" + local HELMREPO_URL="${8:-""}" + local HELMREPO_NS="${9:-"${TARGET_NS}"}" + local HELMREPO_SECRET_REF="${10:-""}" + # HelmRelease inline values (if any) + local INLINE_VALUES="${11:-""}" + # Secret reference and generation (if required) + local IS_PREEXISTING_SECRET="${12:-"false"}" + local VALUES_SECRET_NAME="${13}" + local SECRET_KEY="${14:-"values.yaml"}" + local AGE_PUBLIC_KEY="${15}" + ## `SECRET_VALUES` will be obtained from the + ## secret named after the input parameter `reference_secret_for_values`, + ## and from the key named after the input parameter `reference_key_for_values` + local LOCAL_SECRET_VALUES="${16:-"${SECRET_VALUES}"}" + # ConfigMap reference and generation (if required) + local IS_PREEXISTING_CM="${17:-"false"}" + local VALUES_CM_NAME="${18:-""}" + local CM_KEY="${19:-""}" + local CM_VALUES="${20:-""}" + # KSU rendering + local KSU_NAME="${21}" + local PROFILE_NAME="${22}" + local PROFILE_TYPE="${23}" + local PROJECT_NAME="${24:-"osm_admin"}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${25:-"/fleet/fleet-osm/"}" + # By default, it will not syncronize, so that we can easily accumulate more than + # one Helm chart into the same KSU if desired + # local SYNC="${26:-"false"}" + + # Decides which steps may be skipped + local NEEDS_NEW_NS=$([[ "${CREATE_NS,,}" == "true" ]]; echo $?) + local NEEDS_NEW_REPO_SOURCE=$([[ "${IS_PREEXISTING_REPO,,}" == "false" ]]; echo $?) + local NEEDS_NEW_SECRET=$([[ ( -n "${VALUES_SECRET_NAME}" ) && ( "${IS_PREEXISTING_SECRET,,}" == "false" ) ]]; echo $?) + local NEEDS_NEW_CM=$([[ ( -n "${VALUES_CM_NAME}" ) && ( "${IS_PREEXISTING_CM,,}" == "false" ) ]]; echo $?) + local ECHO_RESOURCELIST=$([[ "${DEBUG,,}" == "true" ]]; echo $?) + + + # This function is just an alias of `create_generated_ksu_from_helm_into_profile` + # forcing synchronization over the KSU folder + create_generated_ksu_from_helm_into_profile \ + "${HELMRELEASE_NAME}" \ + "${CHART_NAME}" \ + "${CHART_VERSION}" \ + "${TARGET_NS}" \ + "${CREATE_NS}" \ + "${IS_PREEXISTING_REPO}" \ + "${HELMREPO_NAME}" \ + "${HELMREPO_URL}" \ + "${HELMREPO_NS}" \ + "${HELMREPO_SECRET_REF}" \ + "${INLINE_VALUES}" \ + "${IS_PREEXISTING_SECRET}" \ + "${VALUES_SECRET_NAME}" \ + "${SECRET_KEY}" \ + "${AGE_PUBLIC_KEY}" \ + "${LOCAL_SECRET_VALUES}" \ + "${IS_PREEXISTING_CM}" \ + "${VALUES_CM_NAME}" \ + "${CM_KEY}" \ + "${CM_VALUES}" \ + "${KSU_NAME}" \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" \ + "${FLEET_REPO_DIR}" \ + "true" +} + + +# Low-level function to delete a KSU from a profile +function delete_ksu_from_profile_path() { + local KSU_NAME="$1" + local TARGET_PROFILE_PATH="$2" + local FLEET_REPO_DIR="${3:-"${FLEET_REPO_DIR}"}" + + # Calculate profile folder + TARGET_PROFILE_FOLDER="${FLEET_REPO_DIR}/${TARGET_PROFILE_PATH}" + + # Delete the KSU folder + rm -rf "${TARGET_PROFILE_FOLDER}/${KSU_NAME}" +} + + +# High-level function to delete a KSU from a profile +function delete_ksu_from_profile() { + local KSU_NAME="$1" + local PROFILE_NAME="$2" + local PROFILE_TYPE="$3" + local PROJECT_NAME="${4:-"osm_admin"}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="$5" + + # Calculate profile folder + local TARGET_PROFILE_PATH=$( + path_to_profile \ + "${PROFILE_NAME}" \ + "${PROFILE_TYPE}" \ + "${PROJECT_NAME}" + ) + TARGET_PROFILE_FOLDER="${FLEET_REPO_DIR}/${TARGET_PROFILE_PATH}" + + # Delete the KSU folder + rm -rf "${TARGET_PROFILE_FOLDER}/${KSU_NAME}" +} + + +# High-level function to clone a KSU from a profile to another +function clone_ksu() { + local SOURCE_KSU_NAME="$1" + local SOURCE_PROFILE_NAME="$2" + local SOURCE_PROFILE_TYPE="$3" + local SOURCE_PROJECT_NAME="${4:-"osm_admin"}" + local DESTINATION_KSU_NAME="${5:-"${SOURCE_KSU_NAME}"}" + local DESTINATION_PROFILE_NAME="${6:-"${SOURCE_PROFILE_NAME}"}" + local DESTINATION_PROFILE_TYPE="${7:-"${SOURCE_PROFILE_TYPE}"}" + local DESTINATION_PROJECT_NAME="${8:-"${SOURCE_PROJECT_NAME}"}" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="$9" + + + # If source and destination are identical, aborts + if [[ + ("${SOURCE_KSU_NAME}" == "${DESTINATION_KSU_NAME}") && \ + ("${SOURCE_PROFILE_NAME}" == "${DESTINATION_PROFILE_NAME}") && \ + ("${SOURCE_PROFILE_TYPE}" == "${DESTINATION_PROFILE_TYPE}") && \ + ("${SOURCE_PROJECT_NAME}" == "${DESTINATION_PROJECT_NAME}") \ + ]]; + then + return 1 + fi + + # Calculate profile folders + local SOURCE_PROFILE_PATH=$( + path_to_profile \ + "${SOURCE_PROFILE_NAME}" \ + "${SOURCE_PROFILE_TYPE}" \ + "${SOURCE_PROJECT_NAME}" + ) + local SOURCE_PROFILE_FOLDER="${FLEET_REPO_DIR}/${SOURCE_PROFILE_PATH}" + local DESTINATION_PROFILE_PATH=$( + path_to_profile \ + "${DESTINATION_PROFILE_NAME}" \ + "${DESTINATION_PROFILE_TYPE}" \ + "${DESTINATION_PROJECT_NAME}" + ) + local DESTINATION_PROFILE_FOLDER="${FLEET_REPO_DIR}/${DESTINATION_PROFILE_PATH}" + + # Clone KSU folder + cp -ar \ + "${SOURCE_PROFILE_FOLDER}/${SOURCE_KSU_NAME}" \ + "${DESTINATION_PROFILE_FOLDER}/${DESTINATION_KSU_NAME}" +} + + +# Create a `ProviderConfig` for a CrossPlane provider +function create_crossplane_providerconfig() { + local PROVIDERCONFIG_NAME="$1" + # As of today, one among `azure`, `aws` or `gcp`: + local PROVIDER_TYPE="$2" + local CRED_SECRET_NAME="$3" + local CRED_SECRET_KEY="${4:-"creds"}" + local CRED_SECRET_NS="${5:-"crossplane-system"}" + # If empty, it assumes the secret already exists + local CRED_SECRET_CONTENT="${6:-"${CRED_SECRET_CONTENT:-""}"}" + local AGE_PUBLIC_KEY_MGMT="$7" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${8:-"${FLEET_REPO_DIR}"}" + ## `SW_CATALOGS_REPO_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}" + local SW_CATALOGS_REPO_DIR="${9:-"${SW_CATALOGS_REPO_DIR}"}" + # Only when applicable + local TARGET_GCP_PROJECT="${10:-""}" + # Do not touch unless strictly needed + local BASE_TEMPLATES_PATH="${11:-"infra-configs/crossplane/providers"}" + local OSM_PROJECT_NAME="${12:-"osm_admin"}" + local MGMT_CLUSTER_NAME="${13:-"_management"}" + + + # Is the provider type supported? + local VALID_PROVIDERS=("aws" "azure" "gcp") + PROVIDER_TYPE="${PROVIDER_TYPE,,}" + [[ ! ($(echo ${VALID_PROVIDERS[@]} | grep -w "${PROVIDER_TYPE}")) ]] && return 1 + + # Determines the source dir for the templates and the target folder in Fleet + local TEMPLATES_DIR="${SW_CATALOGS_REPO_DIR}/${BASE_TEMPLATES_PATH}/${PROVIDER_TYPE}/templates" + local TARGET_FOLDER="${FLEET_REPO_DIR}/${OSM_PROJECT_NAME}/infra-config-profiles/${MGMT_CLUSTER_NAME}/crossplane-providerconfigs/${PROVIDER_TYPE}" + + # Determine which optional steps may be needed + local NEEDS_NEW_SECRET=$([[ -n "${CRED_SECRET_CONTENT}" ]]; echo $?) + local NEEDS_PROJECT_NAME=$([[ "${PROVIDER_TYPE}" == "gcp" ]]; echo $?) + + # Renders the `ProviderConfig` manifest and the encrypted secret (if applicable) + echo "" | \ + folder2list_generator \ + "${TEMPLATES_DIR}" | \ + patch_replace \ + ".metadata.name" \ + "${PROVIDERCONFIG_NAME}" \ + "| select(.kind == \"ProviderConfig\")" | \ + patch_replace \ + ".spec.credentials.secretRef.name" \ + "${CRED_SECRET_NAME}" \ + "| select(.kind == \"ProviderConfig\")" | \ + patch_replace \ + ".spec.credentials.secretRef.key" \ + "${CRED_SECRET_KEY}" \ + "| select(.kind == \"ProviderConfig\")" | \ + patch_replace \ + ".spec.credentials.secretRef.namespace" \ + "${CRED_SECRET_NS}" \ + "| select(.kind == \"ProviderConfig\")" | \ + transform_if \ + "${NEEDS_PROJECT_NAME}" \ + patch_replace \ + ".spec.projectID" \ + "${TARGET_GCP_PROJECT}" \ + "| select(.kind == \"ProviderConfig\")" | \ + transform_if \ + "${NEEDS_NEW_SECRET}" \ + make_generator \ + "credentials-secret.yaml" \ + kubectl_encrypt \ + "${AGE_PUBLIC_KEY_MGMT}" \ + create \ + secret \ + generic \ + "${CRED_SECRET_NAME}" \ + --namespace="${CRED_SECRET_NS}" \ + --from-file="${CRED_SECRET_KEY}"=<(echo "${CRED_SECRET_CONTENT}") \ + -o=yaml \ + --dry-run=client | \ + prepend_folder_path \ + "${PROVIDERCONFIG_NAME}/" | \ + list2folder_cp_over \ + "${TARGET_FOLDER}" +} + + +# Delete a `ProviderConfig` for a CrossPlane provider +function delete_crossplane_providerconfig() { + local PROVIDERCONFIG_NAME="$1" + # As of today, one among `azure`, `aws` or `gcp`: + local PROVIDER_TYPE="$2" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${3:-"${FLEET_REPO_DIR}"}" + # Do not touch unless strictly needed + local OSM_PROJECT_NAME="${4:-"osm_admin"}" + local MGMT_CLUSTER_NAME="${5:-"_management"}" + + + # Is the provider type supported? + local VALID_PROVIDERS=("aws" "azure" "gcp") + PROVIDER_TYPE="${PROVIDER_TYPE,,}" + [[ ! ($(echo ${VALID_PROVIDERS[@]} | grep -w "${PROVIDER_TYPE}")) ]] && return 1 + + # Determines the target folder in Fleet + local PROVIDERCONFIG_FOLDER="${FLEET_REPO_DIR}/${OSM_PROJECT_NAME}/infra-config-profiles/${MGMT_CLUSTER_NAME}/crossplane-providerconfigs/${PROVIDER_TYPE}/${PROVIDERCONFIG_NAME}" + + # Delete the folder + rm -rf "${PROVIDERCONFIG_FOLDER}" +} + + +# Update a `ProviderConfig` for a CrossPlane provider +function update_crossplane_providerconfig() { + local PROVIDERCONFIG_NAME="$1" + # As of today, one among `azure`, `aws` or `gcp`: + local PROVIDER_TYPE="$2" + local CRED_SECRET_NAME="$3" + local CRED_SECRET_KEY="${4:-"creds"}" + local CRED_SECRET_NS="${5:-"crossplane-system"}" + # If empty, it assumes the secret already exists + local CRED_SECRET_CONTENT="${6:-"${CRED_SECRET_CONTENT:-""}"}" + local AGE_PUBLIC_KEY_MGMT="$7" + ## `FLEET_REPO_DIR` is the result of: + ## "{{inputs.parameters.fleet_mount_path}}/{{inputs.parameters.cloned_fleet_folder_name}}" + local FLEET_REPO_DIR="${8:-"${FLEET_REPO_DIR}"}" + ## `SW_CATALOGS_REPO_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}" + local SW_CATALOGS_REPO_DIR="${9:-"${SW_CATALOGS_REPO_DIR}"}" + # Only when applicable + local TARGET_GCP_PROJECT="${10:-""}" + # Do not touch unless strictly needed + local BASE_TEMPLATES_PATH="${11:-"infra-configs/crossplane/providers"}" + local OSM_PROJECT_NAME="${12:-"osm_admin"}" + local MGMT_CLUSTER_NAME="${13:-"_management"}" + + + # Is the provider type supported? + local VALID_PROVIDERS=("aws" "azure" "gcp") + PROVIDER_TYPE="${PROVIDER_TYPE,,}" + [[ ! ($(echo ${VALID_PROVIDERS[@]} | grep -w "${PROVIDER_TYPE}")) ]] && return 1 + + # First, delete; then, re-create + delete_crossplane_providerconfig \ + "${PROVIDERCONFIG_NAME}" \ + "${PROVIDER_TYPE}" \ + "${FLEET_REPO_DIR}" \ + "${OSM_PROJECT_NAME}" \ + "${MGMT_CLUSTER_NAME}" + + create_crossplane_providerconfig \ + "${PROVIDERCONFIG_NAME}" \ + "${PROVIDER_TYPE}" \ + "${CRED_SECRET_NAME}" \ + "${CRED_SECRET_KEY}" \ + "${CRED_SECRET_NS}" \ + "${CRED_SECRET_CONTENT}" \ + "${AGE_PUBLIC_KEY_MGMT}" \ + "${FLEET_REPO_DIR}" \ + "${SW_CATALOGS_REPO_DIR}" \ + "${TARGET_GCP_PROJECT}" \ + "${BASE_TEMPLATES_PATH}" \ + "${OSM_PROJECT_NAME}" \ + "${MGMT_CLUSTER_NAME}" +} + + +# Helper function to return the relative path of a location in SW Catalogs for an OKA +function path_to_catalog() { + local OKA_TYPE="$1" + local PROJECT_NAME="${2:-"osm_admin"}" + + # Corrects `osm_admin` project, since it uses the root folder + PROJECT_NAME="${PROJECT_NAME}" + [[ "${PROJECT_NAME}" == "osm_admin" ]] && PROJECT_NAME="." + + # Echoes the relate path from the SW-Catalogs root + case "${OKA_TYPE,,}" in + + "controller" | "infra-controller" | "infra-controllers" | "infra_controller" | "infra_controllers") + echo -n "${PROJECT_NAME}/infra-controllers" + return 0 + ;; + + "config" | "infra-config" | "infra-configs" | "infra_config" | "infra_configs") + echo -n "${PROJECT_NAME}/infra-configs" + return 0 + ;; + + "managed" | "resources" | "managed-resources" | "managed_resources" | "cloud-resources" | "cloud_resources") + echo -n "${PROJECT_NAME}/cloud-resources" + return 0 + ;; + + "app" |"apps" | "applications" | "cnf" | "cnfs" | "nf" | "nfs") + echo -n "${PROJECT_NAME}/apps" + return 0 + ;; + + *) + echo -n "------------ ERROR ------------" + return 1 + ;; + esac +} + + +# Create OKA of a specific kind +function create_oka() { + local OKA_NAME="$1" + local OKA_TYPE="$2" + local PROJECT_NAME="${3:-"."}" + ## `SW_CATALOGS_REPO_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}" + local SW_CATALOGS_REPO_DIR="$4" + local OKA_LOCATION="${5:-"."}" + local TARBALL_FILE="${6:-"true"}" + + + # Finds the corresponding catalog path from the SW-Catalogs root + # and create the destination + local CATALOG_PATH=$(\ + path_to_catalog \ + "${OKA_TYPE}" \ + "${PROJECT_NAME}" + ) + local DESTINATION="${SW_CATALOGS_REPO_DIR}/${CATALOG_PATH}/${OKA_NAME}" + mkdir -p "${DESTINATION}" + + # When the OKA comes as a `tar.gz` + if [[ "${TARBALL_FILE,,}" == "true" ]]; + then + tar xvfz "${OKA_LOCATION}/${OKA_NAME}.tar.gz" -C "${DESTINATION}" + else + # Otherwise it must be a folder structure + cp -var "${OKA_LOCATION}/${OKA_NAME}/*" "${DESTINATION}/" + fi +} + + +# Delete OKA of a specific kind +function delete_oka() { + local OKA_NAME="$1" + local OKA_TYPE="$2" + local PROJECT_NAME="${3:-"."}" + ## `SW_CATALOGS_REPO_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}" + local SW_CATALOGS_REPO_DIR="$4" + + + # Finds the corresponding catalog path from the SW-Catalogs root + # and determine the destination + local CATALOG_PATH=$(\ + path_to_catalog \ + "${OKA_TYPE}" \ + "${PROJECT_NAME}" + ) + local DESTINATION="${SW_CATALOGS_REPO_DIR}/${CATALOG_PATH}/${OKA_NAME}" + + # Remove the folder + rm -rf "${DESTINATION}" +} + + +# Update OKA of a specific kind +function update_oka() { + local OKA_NAME="$1" + local OKA_TYPE="$2" + local PROJECT_NAME="${3:-"."}" + ## `SW_CATALOGS_REPO_DIR` is the result of: + ## "{{inputs.parameters.sw_catalogs_mount_path}}/{{inputs.parameters.cloned_sw_catalogs_folder_name}}" + local SW_CATALOGS_REPO_DIR="$4" + local OKA_LOCATION="${5:-"."}" + local TARBALL_FILE="${6:-"true"}" + + + # Finds the corresponding catalog path from the SW-Catalogs root + # and determine the destination + local CATALOG_PATH=$(\ + path_to_catalog \ + "${OKA_TYPE}" \ + "${PROJECT_NAME}" + ) + local DESTINATION="${SW_CATALOGS_REPO_DIR}/${CATALOG_PATH}/${OKA_NAME}" + + # Remove and re-create + rm -rf "${DESTINATION}" + create_oka \ + "${OKA_NAME}" \ + "${OKA_TYPE}" \ + "${PROJECT_NAME}" \ + "${SW_CATALOGS_REPO_DIR}" \ + "${OKA_LOCATION}" \ + "${TARBALL_FILE}" +}