* under the License.
*/
-pipeline {
- agent { label 'pool' }
- parameters {
- string(defaultValue: env.BRANCH_NAME, description: '', name: 'GERRIT_BRANCH')
+import groovy.transform.Field
+
+properties([
+ parameters([
+ // ----------------------------
+ // Core: install / VM lifecycle
+ // ----------------------------
+ string(
+ defaultValue: env.GERRIT_BRANCH ?: env.BRANCH_NAME ?: 'master',
+ description: 'Branch used to name downstream resources',
+ name: 'GERRIT_BRANCH'
+ ),
+ string(
+ defaultValue: '',
+ description: 'Prebuilt container tag to test (fallbacks to auto-generated name if empty)',
+ name: 'CONTAINER_NAME'
+ ),
+ string(
+ defaultValue: 'ubuntu22.04',
+ description: 'Glance image to use for the remote VM',
+ name: 'OPENSTACK_BASE_IMAGE'
+ ),
+ string(
+ defaultValue: 'osm.sanity',
+ description: 'OpenStack flavor for the remote VM',
+ name: 'OPENSTACK_OSM_FLAVOR'
+ ),
+ booleanParam(
+ defaultValue: true,
+ description: 'Spawn the remote VM and perform installation steps',
+ name: 'DO_INSTALL'
+ ),
+ booleanParam(
+ defaultValue: false,
+ description: 'Preserve VM on failure for further debugging',
+ name: 'SAVE_CONTAINER_ON_FAIL'
+ ),
+ booleanParam(
+ defaultValue: false,
+ description: 'Preserve VM on success',
+ name: 'SAVE_CONTAINER_ON_PASS'
+ ),
+
+ // ---------------------------------
+ // Module under test / installation
+ // ---------------------------------
+ string(
+ defaultValue: '',
+ description: 'Name of the module under test',
+ name: 'MODULE_NAME'
+ ),
+ string(
+ name: 'GERRIT_REFSPEC',
+ defaultValue: '',
+ description: 'Gerrit refspec to checkout only for devops module (overrides COMMIT_ID if set)'
+ ),
+
+ // ----------------------------
+ // Robot / system integration
+ // ----------------------------
+ booleanParam(
+ defaultValue: false,
+ description: 'Run Robot system integration tests after installation',
+ name: 'DO_ROBOT'
+ ),
+ string(
+ defaultValue: 'sanity',
+ description: 'Robot tag selection (sanity/regression/daily are common options)',
+ name: 'ROBOT_TAG_NAME'
+ ),
+ string(
+ defaultValue: '/home/jenkins/hive/robot-systest.cfg',
+ description: 'Robot environment file (ETSI VIM)',
+ name: 'ROBOT_VIM'
+ ),
+ string(
+ defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
+ description: 'Port mapping file for SDN assist in ETSI VIM',
+ name: 'ROBOT_PORT_MAPPING_VIM'
+ ),
+ string(
+ defaultValue: '/home/jenkins/hive/etsi-vim-prometheus.json',
+ description: 'Prometheus configuration file in ETSI VIM',
+ name: 'PROMETHEUS_CONFIG_VIM'
+ ),
+ string(
+ defaultValue: '/home/jenkins/hive/kubeconfig.yaml',
+ description: 'Kubeconfig used by Robot for ETSI VIM cluster registration',
+ name: 'KUBECONFIG'
+ ),
+ string(
+ defaultValue: '/home/jenkins/hive/clouds.yaml',
+ description: 'OpenStack clouds.yaml used by Robot',
+ name: 'CLOUDS'
+ ),
+ string(
+ defaultValue: 'oci://osm.etsi.org:5050/devops/test',
+ description: 'OCI registry used by Robot system tests',
+ name: 'OCI_REGISTRY_URL'
+ ),
+ string(
+ defaultValue: '100.0',
+ description: '% passed Robot tests to mark the build as passed',
+ name: 'ROBOT_PASS_THRESHOLD'
+ ),
+ string(
+ defaultValue: '80.0',
+ description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)',
+ name: 'ROBOT_UNSTABLE_THRESHOLD'
+ )
+ ])
+])
+
+@Field final String HIVE_ENV_EXPORT = 'for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export $line ; done'
+@Field final String INSTALLER_URL = 'https://osm-download.etsi.org/ftp/osm-19.0-nineteen/install_osm.sh'
+@Field final String OPENSTACK_NET_ID = 'osm-ext'
+@Field final String VCLUSTER_NAMESPACE = 'vcluster'
+@Field final String VCLUSTER_NAME = 'e2e'
+@Field final String ROBOT_VCLUSTER_KUBECONFIG_CONTAINER_PATH = '/robot-systest/cluster-kubeconfig.yaml'
+@Field final Integer PROMETHEUS_PORT_DEFAULT = 80
+@Field final String INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
+@Field final String INTERNAL_DOCKER_REGISTRY_HOST = INTERNAL_DOCKER_REGISTRY.split('/')[0]
+
+// Main pipeline
+node('pool') {
+ // Use absolute path for the SSH key to avoid tilde-expansion issues with sshCommand
+ final String SSH_KEY = "${env.HOME ?: '/home/jenkins'}/hive/cicd_rsa"
+ final String INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
+ String serverId = null
+ String ipAddress = ''
+ String kubeTmpDir = null
+ Map remote = null
+ boolean alive = false
+
+ sh 'env'
+
+ // Debug: list hive directory to verify SSH key presence
+ sh 'ls -la ~/hive || true'
+
+ stage('Checkout') {
+ checkout scm
+ }
+
+ def containerName = params.CONTAINER_NAME?.trim()
+
+ // Tags for installer:
+ // -t : common tag for other OSM modules (stable merge build for the branch)
+ // -T : tag for the module under test
+ // -m : module name under test
+ def branchTag = (params.GERRIT_BRANCH ?: 'master').trim().toLowerCase().replaceAll('[^a-z0-9._-]', '-')
+ def commonModulesTag = "osm-${branchTag}-merge"
+ def testedModuleName = params.MODULE_NAME?.trim()
+ def testedModuleTag = containerName ?: commonModulesTag
+ // The `opensourcemano/tests` image is produced by the `test` module; when testing any other module,
+ // the tests image tag must come from the common merge build for the branch.
+ def testsImageTag = (testedModuleName?.equalsIgnoreCase('test') || testedModuleName?.equalsIgnoreCase('tests')) ? testedModuleTag : commonModulesTag
+
+ Closure<List<String>> buildInstallerArgs = { String registryUser, String registryPassword ->
+ List<String> installArgs = ['-y']
+ String installerRefspec = params.GERRIT_REFSPEC?.trim()
+ if (testedModuleName?.equalsIgnoreCase('devops') && installerRefspec) {
+ installArgs << "-S ${installerRefspec}"
+ }
+ installArgs << "-d ${registryUser}:${registryPassword}@${INTERNAL_DOCKER_REGISTRY}"
+ installArgs << "-p ${INTERNAL_DOCKER_PROXY}"
+ installArgs << "-t ${commonModulesTag}"
+ installArgs << "-T ${testedModuleTag}"
+ installArgs << "-m ${testedModuleName}"
+ return installArgs
+ }
+
+ try {
+ if (params.DO_INSTALL) {
+///////////////////////////////////////////////////////////////////////////////////////
+// Launch VM
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Spawn Remote VM') {
+ println('Launching new VM')
+ def output = runHiveCommand("""
+ openstack server create --flavor ${params.OPENSTACK_OSM_FLAVOR} \
+ --image ${params.OPENSTACK_BASE_IMAGE} \
+ --key-name CICD \
+ --property build_url=\"${BUILD_URL}\" \
+ --nic net-id=${OPENSTACK_NET_ID} \
+ ${containerName}
+ """)
+
+ serverId = get_value('id', output)
+
+ if (serverId == null) {
+ println('VM launch output:')
+ println(output)
+ throw new Exception('VM Launch failed')
+ }
+ println("Target VM is ${serverId}, waiting for IP address to be assigned")
+
+ ipAddress = waitForServerIp(serverId)
+ println("Waiting for VM at ${ipAddress} to be reachable")
+
+ remote = [
+ name: containerName ?: "osm-e2e-${BUILD_NUMBER}",
+ host: ipAddress,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+
+ alive = false
+ timeout(time: 1, unit: 'MINUTES') {
+ while (!alive) {
+ def sshStatus = sh(
+ returnStatus: true,
+ script: "ssh -T -i ${SSH_KEY} " +
+ "-o StrictHostKeyChecking=no " +
+ "-o UserKnownHostsFile=/dev/null " +
+ "-o ConnectTimeout=5 ubuntu@${ipAddress} 'echo Alive'")
+ alive = (sshStatus == 0)
+ }
+ }
+ println('VM is ready and accepting ssh connections')
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins (via native ssh)...')
+ sh """ssh -T -i ${SSH_KEY} \
+ -o StrictHostKeyChecking=no \
+ -o UserKnownHostsFile=/dev/null \
+ ubuntu@${ipAddress} \"echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config\"
+ """
+ sh """ssh -T -i ${SSH_KEY} \
+ -o StrictHostKeyChecking=no \
+ -o UserKnownHostsFile=/dev/null \
+ ubuntu@${ipAddress} \"echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config\"
+ """
+ sh """ssh -T -i ${SSH_KEY} \
+ -o StrictHostKeyChecking=no \
+ -o UserKnownHostsFile=/dev/null \
+ ubuntu@${ipAddress} \"sudo systemctl restart sshd\"
+ """
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ } // stage("Spawn Remote VM")
+///////////////////////////////////////////////////////////////////////////////////////
+// Checks before installation
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Checks before installation') {
+ if (!ipAddress?.trim()) {
+ error('Missing VM IP address, cannot run pre-installation checks')
+ }
+
+ sshCommand remote: remote, command: 'cloud-init status --wait'
+ sshCommand remote: remote, command: 'sudo apt-get -y update'
+ sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
+ sshCommand remote: remote, command: 'sudo service chrony stop'
+ sshCommand remote: remote, command: 'sudo chronyd -vq'
+ sshCommand remote: remote, command: 'sudo service chrony start'
+ } // stage("Checks before installation")
+///////////////////////////////////////////////////////////////////////////////////////
+// Install
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Install') {
+ if (!ipAddress?.trim()) {
+ error('Missing VM IP address, cannot run installation steps')
+ }
+
+ sshCommand remote: remote, command: """
+ wget ${INSTALLER_URL}
+ chmod +x ./install_osm.sh
+ sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
+ """
+
+
+ Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
+ credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME',
+ passwordVariable: 'PASSWORD']
+ withCredentials([gitlabCredentialsMap]) {
+ List<String> installArgs = buildInstallerArgs(USERNAME, PASSWORD)
+
+ String installCmd = "./install_osm.sh ${installArgs.join(' ')}"
+ sshCommand remote: remote, command: """
+ ${installCmd}
+ """
+ }
+ } // stage("Install")
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Health check of installed OSM in remote VM
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('OSM Health') {
+ if (!ipAddress?.trim()) {
+ error('Missing VM IP address, cannot run OSM health checks')
+ }
+ if (!remote) {
+ error('Missing remote target, cannot run OSM health checks')
+ }
+
+ timeout(time: 5, unit: 'MINUTES') {
+ String osmHostname = "nbi.${ipAddress}.nip.io"
+ sshCommand remote: remote, command: """
+ export PATH=\"\\$PATH:\\$HOME/.local/bin\"
+ OSM_HOSTNAME=\"${osmHostname}\" osm vim-list
+ """
+ }
+ } // stage('OSM Health')
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Get OSM Kubeconfig and store it for future usage (Robot/vCluster)
+///////////////////////////////////////////////////////////////////////////////////////
+ if (params.DO_ROBOT) {
+ stage('OSM Get kubeconfig') {
+ kubeTmpDir = pwd(tmp: true)
+ env.OSM_KUBECONFIG_PATH = "${kubeTmpDir}/osm_config"
+ env.VCLUSTER_KUBECONFIG_PATH = "${kubeTmpDir}/vcluster_config"
+
+ sshGet remote: remote,
+ from: "/home/ubuntu/.kube/config",
+ into: env.OSM_KUBECONFIG_PATH,
+ override: true
+ sh "chmod 600 ${env.OSM_KUBECONFIG_PATH}"
+ sh "test -s ${env.OSM_KUBECONFIG_PATH}"
+ // Debug: show the Kubernetes API endpoint used by the kubeconfig.
+ // (k3s defaults to 127.0.0.1:6443, which is not reachable from the Jenkins agent container)
+ sh "grep -nE '^\\s*server:' ${env.OSM_KUBECONFIG_PATH} || true"
+ } // stage('OSM Get kubeconfig')
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Create vCluster for GitOps/Robot test execution
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Create vCluster') {
+ println("Creating vcluster ${VCLUSTER_NAME} in namespace ${VCLUSTER_NAMESPACE}")
+ dockerLoginInternalRegistry()
+ create_vcluster(INTERNAL_DOCKER_REGISTRY, testsImageTag, env.OSM_KUBECONFIG_PATH, env.VCLUSTER_KUBECONFIG_PATH, VCLUSTER_NAME, VCLUSTER_NAMESPACE)
+ sh "chmod 600 ${env.VCLUSTER_KUBECONFIG_PATH}"
+ sh "test -s ${env.VCLUSTER_KUBECONFIG_PATH}"
+ } // stage('Create vCluster')
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Execute Robot tests
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('System Integration Test') {
+ String prometheusHostname = "prometheus.${ipAddress}.nip.io"
+ Integer prometheusPort = PROMETHEUS_PORT_DEFAULT
+ String osmHostnameRobot = "nbi.${ipAddress}.nip.io:443"
+
+ register_etsi_vim_account(
+ INTERNAL_DOCKER_REGISTRY,
+ testsImageTag,
+ osmHostnameRobot,
+ params.ROBOT_VIM,
+ params.ROBOT_PORT_MAPPING_VIM,
+ params.KUBECONFIG,
+ params.CLOUDS,
+ params.PROMETHEUS_CONFIG_VIM
+ )
+ register_etsi_k8s_cluster(
+ INTERNAL_DOCKER_REGISTRY,
+ testsImageTag,
+ osmHostnameRobot,
+ params.ROBOT_VIM,
+ params.ROBOT_PORT_MAPPING_VIM,
+ params.KUBECONFIG,
+ params.CLOUDS,
+ params.PROMETHEUS_CONFIG_VIM
+ )
+
+ // IMPORTANT: tests expect the vcluster kubeconfig at this container path.
+ String robotVclusterKubeconfigPath = ROBOT_VCLUSTER_KUBECONFIG_CONTAINER_PATH
+ run_robot_systest(
+ INTERNAL_DOCKER_REGISTRY,
+ testsImageTag,
+ params.ROBOT_TAG_NAME,
+ osmHostnameRobot,
+ prometheusHostname,
+ prometheusPort,
+ params.OCI_REGISTRY_URL,
+ params.ROBOT_VIM,
+ params.ROBOT_PORT_MAPPING_VIM,
+ params.KUBECONFIG,
+ params.CLOUDS,
+ null,
+ SSH_KEY,
+ params.ROBOT_PASS_THRESHOLD,
+ params.ROBOT_UNSTABLE_THRESHOLD,
+ // extraEnvVars map of extra environment variables
+ ['CLUSTER_KUBECONFIG_CREDENTIALS': robotVclusterKubeconfigPath],
+ // extraVolMounts map of extra volume mounts
+ [(env.VCLUSTER_KUBECONFIG_PATH): robotVclusterKubeconfigPath]
+ )
+ } // stage('System Integration Test')
+ } else {
+ println('Skipping kubeconfig/vcluster steps because DO_ROBOT is set to false')
+ }
+ } else {
+ println('Skipping VM spawn because DO_INSTALL is set to false')
+ }
+ } finally {
+ stage('Archive Logs') {
+ if (params.DO_INSTALL && remote) {
+ try {
+ archiveLogs(remote)
+ } catch (Exception e) {
+ println("Archive logs failed: ${e.message}")
+ }
+ } else {
+ println('No remote target to collect logs from')
+ }
+ }
+
+ stage('Cleanup') {
+ // Always attempt to cleanup temp kubeconfig directory if created.
+ if (kubeTmpDir?.trim()) {
+ sh "rm -rf ${kubeTmpDir} || true"
+ kubeTmpDir = null
+ }
+
+ if (!params.DO_INSTALL || serverId == null) {
+ println('No VM to cleanup')
+ return
+ }
+
+ String buildState = currentBuild.currentResult ?: 'SUCCESS'
+ boolean buildFailed = buildState == 'FAILURE'
+
+ boolean deleteVm = true
+ if (buildFailed && params.SAVE_CONTAINER_ON_FAIL) {
+ deleteVm = false
+ }
+ if (!buildFailed && params.SAVE_CONTAINER_ON_PASS) {
+ deleteVm = false
+ }
+
+ if (deleteVm) {
+ println("Deleting VM: ${serverId}")
+ try {
+ runHiveCommand("""
+ openstack server delete ${serverId}
+ """)
+ } catch (Exception e) {
+ // Avoid masking an earlier failure with cleanup failure.
+ println("VM delete failed: ${e.message}")
+ }
+ } else {
+ println("Preserving VM ${serverId} (build state: ${buildState})")
+ }
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Helper Classes & Functions (ported from ci_stage_3.groovy)
+// Placed below the pipeline for readability.
+////////////////////////////////////////////////////////////////////////////////////////
+
+/** Usage:
+ * def dr = new DockerRunner(this)
+ * stdout = dr.run(
+ * image : "${INTERNAL_DOCKER_REGISTRY}opensourcemano/tests:${tag}",
+ * entry : "/usr/bin/osm", // optional
+ * envVars : [ "OSM_HOSTNAME=${host}" ],
+ * envFile : myEnv,
+ * mounts : [
+ * "${clouds}:/etc/openstack/clouds.yaml",
+ * "${kubeconfig}:/root/.kube/config"
+ * ],
+ * cmd : "vim-create --name osm …"
+ * )
+ */
+class DockerRunner implements Serializable {
+ def steps
+ DockerRunner(def steps) { this.steps = steps }
+
+ /** Returns stdout (trimmed) if returnStdout is true; throws Exception on non-zero exit */
+ String run(Map args = [:]) {
+ def returnStdout = args.remove('returnStdout') ?: false
+ def envFile = args.envFile ?: ''
+ def entry = args.entry ? "--entrypoint ${args.entry}" : ''
+ def mounts = (args.mounts ?: [])
+ .findAll { it && it.trim() }
+ .collect { "-v ${it}" }
+ .join(' ')
+ def envs = (args.envVars ?: [])
+ .findAll { it && it.trim() }
+ .collect { "--env ${it}" }
+ .join(' ')
+ def image = args.image ?: ''
+ def cmd = args.cmd ?: ''
+ def fullCmd = "docker run --rm ${entry} ${envs} ${envFile ? "--env-file ${envFile}" : ''} ${mounts} ${image} ${cmd}".trim()
+
+ def result = null
+ try {
+ if (returnStdout) {
+ result = steps.sh(returnStdout: true, script: fullCmd).trim()
+ } else {
+ steps.sh(script: fullCmd)
+ }
+ } catch (Exception ex) {
+ throw new Exception("docker run failed -> ${ex.message}")
+ } finally {
+ steps.echo("Command executed: ${fullCmd}")
+ }
+ return result ?: ''
+ }
+}
+
+/* -------------------------------------------------------------------
+ * create_vcluster – spin up a vcluster in the target OSM cluster
+ * @params:
+ * tagName - The OSM test docker image tag to use
+ * kubeconfigPath - The path of the OSM kubernetes master configuration file
+ * vclusterKubeconfigOutPath - Output path for the vcluster kubeconfig
+ * vclusterName - Name of the vcluster
+ * vclusterNamespace - Namespace for the vcluster
+ * ------------------------------------------------------------------- */
+void create_vcluster(String dockerRegistryUrl, String tagName, String kubeconfigPath, String vclusterKubeconfigOutPath, String vclusterName, String vclusterNamespace) {
+ def dr = new DockerRunner(this)
+ def mounts = ["${kubeconfigPath}:/root/.kube/config"]
+ def envs = ["KUBECONFIG=/root/.kube/config"]
+ def image = "${dockerRegistryUrl}opensourcemano/tests:${tagName}"
+
+ // 1) create vcluster namespace
+ println("vcluster: ensuring namespace '${vclusterNamespace}' exists")
+ dr.run(
+ image: image,
+ entry: 'kubectl',
+ envVars: envs,
+ mounts: mounts,
+ cmd: "create namespace ${vclusterNamespace} || true"
+ )
+
+ // 2) create vcluster (no connect)
+ println("vcluster: creating '${vclusterName}' (no connect)")
+ dr.run(
+ image: image,
+ entry: 'vcluster',
+ envVars: envs,
+ mounts: mounts,
+ cmd: "create ${vclusterName} -n ${vclusterNamespace} --connect=false -f /etc/vcluster.yaml"
+ )
+
+ // 3) poll until Status is Running
+ int maxWaitMinutes = 10
+ long deadline = System.currentTimeMillis() + (maxWaitMinutes * 60 * 1000)
+ boolean running = false
+ String lastOut = ''
+
+ println("vcluster: waiting for '${vclusterName}' to reach status 'Running' (timeout: ${maxWaitMinutes} minutes)")
+ while (System.currentTimeMillis() < deadline) {
+ try {
+ lastOut = dr.run(
+ returnStdout: true,
+ image: image,
+ entry: '/bin/sh',
+ envVars: envs,
+ mounts: mounts,
+ cmd: '''-c "vcluster list --output json | jq -r ".[] | select(.Name==\"''' + vclusterName + '''\") | .Status\"'''
+ ).trim()
+ } catch (Exception e) {
+ println("Polling command failed: ${e.message}. Will retry.")
+ lastOut = "Error: ${e.message}"
+ }
+
+ println("Polling for vcluster status. Current status: '${lastOut}'")
+ if (lastOut == 'Running') {
+ running = true
+ break
+ }
+ sleep 10
+ }
+
+ if (!running) {
+ println("vcluster status after timeout: ${lastOut}")
+ throw new Exception("vcluster '${vclusterName}' did not reach 'Running' state within ${maxWaitMinutes} minutes.")
+ }
+
+ // 4) get vcluster kubeconfig
+ String outPath = vclusterKubeconfigOutPath ?: "${WORKSPACE}/kubeconfig/vcluster_config"
+ // Ensure destination directory exists on the Jenkins agent before relying on shell redirection.
+ String outDir = outPath.contains('/') ? outPath.substring(0, outPath.lastIndexOf('/')) : '.'
+ sh "mkdir -p ${outDir}"
+ println("vcluster: exporting kubeconfig to '${outPath}'")
+ dr.run(
+ image: image,
+ entry: 'vcluster',
+ envVars: envs,
+ mounts: mounts,
+ cmd: "connect ${vclusterName} -n ${vclusterNamespace} --server ${vclusterName}.${vclusterNamespace}.svc.cluster.local:443 --print > ${outPath}"
+ )
+}
+
+void retryWithDocker(int maxAttempts, int delaySeconds, Closure action) {
+ int attempts = maxAttempts
+ while (attempts >= 0) {
+ try {
+ if (action()) return
+ } catch (Exception e) {
+ println("Attempt failed: ${e.message}")
+ }
+ println("Retrying... (${attempts} attempts left)")
+ sleep delaySeconds
+ attempts--
}
- stages {
- stage('TEST') {
- agent { label 'pool' }
- steps {
- echo "HELLO"
+ throw new Exception("Operation failed after ${maxAttempts} retries")
+}
+
+void register_etsi_vim_account(
+ String dockerRegistryUrl,
+ String tagName,
+ String osmHostname,
+ String envfile = null,
+ String portmappingfile = null,
+ String kubeconfig = null,
+ String clouds = null,
+ String prometheusconfigfile = null
+) {
+ String VIM_TARGET = "osm"
+ String VIM_MGMT_NET = "osm-ext"
+ String OS_PROJECT_NAME = "osm_jenkins"
+ String OS_AUTH_URL = "http://172.21.247.1:5000/v3"
+ String entrypointCmd = "/usr/bin/osm"
+
+ def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+ String environmentFile = envfile ?: "${tempdir}/env"
+ if (!envfile) {
+ sh(script: "touch ${environmentFile}")
+ }
+
+ retryWithDocker(3, 10) {
+ def dr = new DockerRunner(this)
+ try {
+ println("Attempting to register VIM account")
+ withCredentials([usernamePassword(credentialsId: 'openstack-jenkins-credentials',
+ passwordVariable: 'OS_PASSWORD', usernameVariable: 'OS_USERNAME')]) {
+ String entrypointArgs = """vim-create --name ${VIM_TARGET} --user ${OS_USERNAME} \
+ --password ${OS_PASSWORD} --tenant ${OS_PROJECT_NAME} \
+ --auth_url ${OS_AUTH_URL} --account_type openstack --description vim \
+ --prometheus_config_file /root/etsi-vim-prometheus.json \
+ --config '{management_network_name: ${VIM_MGMT_NET}, dataplane_physical_net: physnet2}' || true"""
+ dr.run(
+ image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
+ entry: entrypointCmd,
+ envVars: ["OSM_HOSTNAME=${osmHostname}"],
+ envFile: environmentFile,
+ mounts: [
+ clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
+ kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
+ portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
+ prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
+ ].findAll { it != null },
+ cmd: entrypointArgs,
+ returnStdout: true
+ )
+ }
+
+ // Check if the VIM is ENABLED
+ int statusChecks = 5
+ while (statusChecks > 0) {
+ sleep 10
+ String vimList = dr.run(
+ image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
+ entry: entrypointCmd,
+ envVars: ["OSM_HOSTNAME=${osmHostname}"],
+ envFile: environmentFile,
+ mounts: [
+ clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
+ kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
+ portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
+ prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
+ ].findAll { it != null },
+ cmd: "vim-list --long | grep ${VIM_TARGET}",
+ returnStdout: true
+ )
+ if (vimList.contains("ENABLED")) {
+ println("VIM successfully registered and is ENABLED.")
+ return true
+ }
+ statusChecks--
}
+ } catch (Exception e) {
+ println("VIM registration check failed: ${e.message}")
}
+
+ // If we get here, VIM is not enabled or creation failed. cleanup and retry.
+ println("VIM not enabled, deleting and retrying...")
+ dr.run(
+ image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
+ entry: entrypointCmd,
+ envVars: ["OSM_HOSTNAME=${osmHostname}"],
+ envFile: environmentFile,
+ mounts: [
+ clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
+ kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
+ portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
+ prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
+ ].findAll { it != null },
+ cmd: "vim-delete --force ${VIM_TARGET}",
+ returnStdout: true
+ )
+ return false
}
}
+void register_etsi_k8s_cluster(
+ String dockerRegistryUrl,
+ String tagName,
+ String osmHostname,
+ String envfile = null,
+ String portmappingfile = null,
+ String kubeconfig = null,
+ String clouds = null,
+ String prometheusconfigfile = null
+) {
+ String K8S_CLUSTER_TARGET = "osm"
+ String VIM_TARGET = "osm"
+ String VIM_MGMT_NET = "osm-ext"
+ String K8S_CREDENTIALS = "/root/.kube/config"
+ String entrypointCmd = "/usr/bin/osm"
+
+ def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+ String environmentFile = envfile ?: "${tempdir}/env"
+ if (!envfile) {
+ sh(script: "touch ${environmentFile}")
+ }
+
+ retryWithDocker(3, 10) {
+ def dr = new DockerRunner(this)
+ try {
+ println("Attempting to register K8s cluster")
+ dr.run(
+ image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
+ entry: entrypointCmd,
+ envVars: ["OSM_HOSTNAME=${osmHostname}"],
+ envFile: environmentFile,
+ mounts: [
+ clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
+ kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
+ portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
+ prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
+ ].findAll { it != null },
+ cmd: """k8scluster-add ${K8S_CLUSTER_TARGET} --creds ${K8S_CREDENTIALS} --version \"v1\" \
+ --description \"Robot-cluster\" --skip-jujubundle --vim ${VIM_TARGET} \
+ --k8s-nets '{net1: ${VIM_MGMT_NET}}'""",
+ returnStdout: true
+ )
+
+ // Check if the K8s cluster is ENABLED
+ int statusChecks = 10
+ while (statusChecks > 0) {
+ sleep 10
+ String clusterList = dr.run(
+ image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
+ entry: entrypointCmd,
+ envVars: ["OSM_HOSTNAME=${osmHostname}"],
+ envFile: environmentFile,
+ mounts: [
+ clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
+ kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
+ portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
+ prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
+ ].findAll { it != null },
+ cmd: "k8scluster-list | grep ${K8S_CLUSTER_TARGET}",
+ returnStdout: true
+ )
+ if (clusterList.contains("ENABLED")) {
+ println("K8s cluster successfully registered and is ENABLED.")
+ return true
+ }
+ statusChecks--
+ }
+ } catch (Exception e) {
+ println("K8s cluster registration check failed: ${e.message}")
+ }
+
+ // If we get here, cluster is not enabled or creation failed. cleanup and retry.
+ println("K8s cluster not enabled, deleting and retrying...")
+ dr.run(
+ image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
+ entry: entrypointCmd,
+ envVars: ["OSM_HOSTNAME=${osmHostname}"],
+ envFile: environmentFile,
+ mounts: [
+ clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
+ kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
+ portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
+ prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
+ ].findAll { it != null },
+ cmd: "k8scluster-delete ${K8S_CLUSTER_TARGET}"
+ )
+ return false
+ }
+}
+
+void run_robot_systest(
+ String dockerRegistryUrl,
+ String tagName,
+ String testName,
+ String osmHostname,
+ String prometheusHostname,
+ Integer prometheusPort = null,
+ String ociRegistryUrl = null,
+ String envfile = null,
+ String portmappingfile = null,
+ String kubeconfig = null,
+ String clouds = null,
+ String hostfile = null,
+ String osmRSAfile = null,
+ String passThreshold = '0.0',
+ String unstableThreshold = '0.0',
+ Map extraEnvVars = null,
+ Map extraVolMounts = null
+) {
+ def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+ String environmentFile = envfile ?: "${tempdir}/env"
+ if (!envfile) {
+ sh(script: "touch ${environmentFile}")
+ }
+
+ def prometheusPortVar = prometheusPort != null ? "PROMETHEUS_PORT=${prometheusPort}" : null
+ def hostfilemount = hostfile ? "${hostfile}:/etc/hosts" : null
+
+ try {
+ withCredentials([usernamePassword(credentialsId: 'gitlab-oci-test',
+ passwordVariable: 'OCI_REGISTRY_PSW', usernameVariable: 'OCI_REGISTRY_USR')]) {
+ def baseEnvVars = [
+ "OSM_HOSTNAME=${osmHostname}",
+ "PROMETHEUS_HOSTNAME=${prometheusHostname}",
+ prometheusPortVar,
+ ociRegistryUrl ? "OCI_REGISTRY_URL=${ociRegistryUrl}" : null,
+ "OCI_REGISTRY_USER=${OCI_REGISTRY_USR}",
+ "OCI_REGISTRY_PASSWORD=${OCI_REGISTRY_PSW}"
+ ].findAll { it != null }
+
+ def baseMounts = [
+ clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
+ osmRSAfile ? "${osmRSAfile}:/root/osm_id_rsa" : null,
+ kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
+ "${tempdir}:/robot-systest/reports",
+ portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
+ hostfilemount
+ ].findAll { it != null }
+
+ def extraEnvVarsList = extraEnvVars?.collect { key, value -> "${key}=${value}" } ?: []
+ def extraVolMountsList = extraVolMounts?.collect { hostPath, containerPath -> "${hostPath}:${containerPath}" } ?: []
+
+ def dr = new DockerRunner(this)
+ dr.run(
+ image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
+ envVars: baseEnvVars + extraEnvVarsList,
+ envFile: "${environmentFile}",
+ mounts: baseMounts + extraVolMountsList,
+ cmd: "-t ${testName}"
+ )
+ }
+ } finally {
+ // Best-effort publish Robot results from tempdir into workspace
+ sh("cp ${tempdir}/*.xml . 2>/dev/null || true")
+ sh("cp ${tempdir}/*.html . 2>/dev/null || true")
+
+ def outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
+ sh("command -v tree >/dev/null 2>&1 && tree ${outputDirectory} || ls -la ${outputDirectory}")
+
+ try {
+ step([
+ $class: 'RobotPublisher',
+ outputPath: "${outputDirectory}",
+ outputFileName: '*.xml',
+ disableArchiveOutput: false,
+ reportFileName: 'report.html',
+ logFileName: 'log.html',
+ passThreshold: passThreshold,
+ unstableThreshold: unstableThreshold,
+ otherFiles: '*.png'
+ ])
+ } catch (Exception e) {
+ println("RobotPublisher failed: ${e.message}")
+ }
+ }
+}
+
+String get_value(String key, String output) {
+ for (String line : output.split('\n')) {
+ def data = line.split('\\|')
+ if (data.length > 1 && data[1].trim() == key) {
+ return data[2].trim()
+ }
+ }
+ return null
+}
+
+void dockerLoginInternalRegistry() {
+ withCredentials([usernamePassword(credentialsId: 'gitlab-registry',
+ passwordVariable: 'REGISTRY_PASSWORD', usernameVariable: 'REGISTRY_USERNAME')]) {
+ sh """
+ set -e
+ echo "${REGISTRY_PASSWORD}" | docker login ${INTERNAL_DOCKER_REGISTRY_HOST} -u "${REGISTRY_USERNAME}" --password-stdin
+ """
+ }
+}
+
+String withHiveEnv(String commandBody) {
+ """#!/bin/sh -e
+${HIVE_ENV_EXPORT}
+${commandBody.stripIndent()}
+"""
+}
+
+String runHiveCommand(String commandBody) {
+ sh(returnStdout: true, script: withHiveEnv(commandBody)).trim()
+}
+
+String waitForServerIp(String id) {
+ String addr = ''
+ timeout(time: 5, unit: 'MINUTES') {
+ waitUntil {
+ def showOutput = runHiveCommand("""
+ openstack server show ${id}
+ """)
+ def rawAddress = get_value('addresses', showOutput)
+ if (rawAddress) {
+ addr = rawAddress.split('=')[1]
+ return true
+ }
+ sleep 5
+ return false
+ }
+ }
+ return addr
+}
+
+// Collect logs from the remote VM and archive them in Jenkins
+void archiveLogs(Map remoteTarget) {
+ sshCommand remote: remoteTarget, command: '''mkdir -p logs/dags logs/vcluster logs/flux-system logs/events logs/system'''
+
+ // Collect Kubernetes events
+ sshCommand remote: remoteTarget, command: '''
+ echo "Extracting Kubernetes events"
+ kubectl get events --all-namespaces --sort-by='.lastTimestamp' -o wide > logs/events/k8s-events.log 2>&1 || true
+ kubectl get events -n osm --sort-by='.lastTimestamp' -o wide > logs/events/osm-events.log 2>&1 || true
+ kubectl get events -n vcluster --sort-by='.lastTimestamp' -o wide > logs/events/vcluster-events.log 2>&1 || true
+ kubectl get events -n flux-system --sort-by='.lastTimestamp' -o wide > logs/events/flux-system-events.log 2>&1 || true
+ '''
+
+ // Collect host logs and system info
+ sshCommand remote: remoteTarget, command: '''
+ echo "Collect system logs"
+ if command -v journalctl >/dev/null; then
+ journalctl > logs/system/system.log
+ fi
+
+ for entry in syslog messages; do
+ [ -e "/var/log/${entry}" ] && cp -f /var/log/${entry} logs/system/"${entry}.log"
+ done
+
+ echo "Collect active services"
+ case "$(cat /proc/1/comm)" in
+ systemd)
+ systemctl list-units > logs/system/services.txt 2>&1
+ ;;
+ *)
+ service --status-all >> logs/system/services.txt 2>&1
+ ;;
+ esac
+
+ top -b -n 1 > logs/system/top.txt 2>&1
+ ps fauxwww > logs/system/ps.txt 2>&1
+ '''
+
+ // Collect OSM namespace workloads
+ sshCommand remote: remoteTarget, command: '''
+ for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+ echo "Extracting log for $deployment"
+ kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log || true
+ done
+ '''
+ sshCommand remote: remoteTarget, command: '''
+ for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+ echo "Extracting log for $statefulset"
+ kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log || true
+ done
+ '''
+ sshCommand remote: remoteTarget, command: '''
+ schedulerPod="$(kubectl get pods -n osm | grep osm-scheduler| awk '{print $1; exit}')"; \
+ echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
+ kubectl -n osm cp ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler 2>&1 || true
+ '''
+
+ // Collect vcluster and flux-system namespace logs
+ sshCommand remote: remoteTarget, command: '''
+ echo "Extracting logs from vcluster namespace"
+ for pod in `kubectl get pods -n vcluster | grep -v NAME | awk '{print $1}'`; do
+ echo "Extracting log for vcluster pod: $pod"
+ kubectl logs -n vcluster $pod --timestamps=true --all-containers 2>&1 > logs/vcluster/$pod.log || true
+ done
+ '''
+ sshCommand remote: remoteTarget, command: '''
+ echo "Extracting logs from flux-system namespace"
+ for pod in `kubectl get pods -n flux-system | grep -v NAME | awk '{print $1}'`; do
+ echo "Extracting log for flux-system pod: $pod"
+ kubectl logs -n flux-system $pod --timestamps=true --all-containers 2>&1 > logs/flux-system/$pod.log || true
+ done
+ '''
+
+ sh 'rm -rf logs'
+ sshCommand remote: remoteTarget, command: '''ls -al logs logs/vcluster logs/events logs/flux-system logs/system'''
+ sshGet remote: remoteTarget, from: 'logs', into: '.', override: true
+ archiveArtifacts artifacts: 'logs/*.log, logs/dags/*.log, logs/vcluster/*.log, logs/events/*.log, logs/flux-system/*.log, logs/system/**'
+}