blob: 3747a569f61c68249c0f3998b048b1fb7334f5ec [file] [log] [blame]
garciadeblas640e9182025-11-07 14:52:57 +01001/* Copyright ETSI Contributors and Others
2 *
3 * All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License"); you may
6 * not use this file except in compliance with the License. You may obtain
7 * a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 * License for the specific language governing permissions and limitations
15 * under the License.
16 */
17
mesaj8f49a952025-11-10 22:05:22 +010018import groovy.transform.Field
19
20properties([
21 parameters([
22 // ----------------------------
23 // Core: install / VM lifecycle
24 // ----------------------------
25 string(
26 defaultValue: env.GERRIT_BRANCH ?: env.BRANCH_NAME ?: 'master',
27 description: 'Branch used to name downstream resources',
28 name: 'GERRIT_BRANCH'
29 ),
30 string(
31 defaultValue: '',
32 description: 'Prebuilt container tag to test (fallbacks to auto-generated name if empty)',
33 name: 'CONTAINER_NAME'
34 ),
35 string(
garciadeblas1314ca52026-01-16 10:34:25 +010036 defaultValue: 'ubuntu24.04',
mesaj8f49a952025-11-10 22:05:22 +010037 description: 'Glance image to use for the remote VM',
38 name: 'OPENSTACK_BASE_IMAGE'
39 ),
40 string(
41 defaultValue: 'osm.sanity',
42 description: 'OpenStack flavor for the remote VM',
43 name: 'OPENSTACK_OSM_FLAVOR'
44 ),
45 booleanParam(
46 defaultValue: true,
47 description: 'Spawn the remote VM and perform installation steps',
48 name: 'DO_INSTALL'
49 ),
50 booleanParam(
51 defaultValue: false,
52 description: 'Preserve VM on failure for further debugging',
53 name: 'SAVE_CONTAINER_ON_FAIL'
54 ),
55 booleanParam(
56 defaultValue: false,
57 description: 'Preserve VM on success',
58 name: 'SAVE_CONTAINER_ON_PASS'
59 ),
60
61 // ---------------------------------
62 // Module under test / installation
63 // ---------------------------------
64 string(
65 defaultValue: '',
66 description: 'Name of the module under test',
67 name: 'MODULE_NAME'
68 ),
69 string(
70 name: 'GERRIT_REFSPEC',
71 defaultValue: '',
72 description: 'Gerrit refspec to checkout only for devops module (overrides COMMIT_ID if set)'
73 ),
74
75 // ----------------------------
76 // Robot / system integration
77 // ----------------------------
78 booleanParam(
79 defaultValue: false,
80 description: 'Run Robot system integration tests after installation',
81 name: 'DO_ROBOT'
82 ),
83 string(
84 defaultValue: 'sanity',
85 description: 'Robot tag selection (sanity/regression/daily are common options)',
86 name: 'ROBOT_TAG_NAME'
87 ),
88 string(
89 defaultValue: '/home/jenkins/hive/robot-systest.cfg',
90 description: 'Robot environment file (ETSI VIM)',
91 name: 'ROBOT_VIM'
92 ),
93 string(
94 defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
95 description: 'Port mapping file for SDN assist in ETSI VIM',
96 name: 'ROBOT_PORT_MAPPING_VIM'
97 ),
98 string(
99 defaultValue: '/home/jenkins/hive/etsi-vim-prometheus.json',
100 description: 'Prometheus configuration file in ETSI VIM',
101 name: 'PROMETHEUS_CONFIG_VIM'
102 ),
103 string(
104 defaultValue: '/home/jenkins/hive/kubeconfig.yaml',
105 description: 'Kubeconfig used by Robot for ETSI VIM cluster registration',
106 name: 'KUBECONFIG'
107 ),
108 string(
109 defaultValue: '/home/jenkins/hive/clouds.yaml',
110 description: 'OpenStack clouds.yaml used by Robot',
111 name: 'CLOUDS'
112 ),
113 string(
114 defaultValue: 'oci://osm.etsi.org:5050/devops/test',
115 description: 'OCI registry used by Robot system tests',
116 name: 'OCI_REGISTRY_URL'
117 ),
118 string(
119 defaultValue: '100.0',
120 description: '% passed Robot tests to mark the build as passed',
121 name: 'ROBOT_PASS_THRESHOLD'
122 ),
123 string(
124 defaultValue: '80.0',
125 description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)',
126 name: 'ROBOT_UNSTABLE_THRESHOLD'
127 )
128 ])
129])
130
131@Field final String HIVE_ENV_EXPORT = 'for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export $line ; done'
132@Field final String INSTALLER_URL = 'https://osm-download.etsi.org/ftp/osm-19.0-nineteen/install_osm.sh'
133@Field final String OPENSTACK_NET_ID = 'osm-ext'
134@Field final String VCLUSTER_NAMESPACE = 'vcluster'
135@Field final String VCLUSTER_NAME = 'e2e'
136@Field final String ROBOT_VCLUSTER_KUBECONFIG_CONTAINER_PATH = '/robot-systest/cluster-kubeconfig.yaml'
137@Field final Integer PROMETHEUS_PORT_DEFAULT = 80
138@Field final String INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
139@Field final String INTERNAL_DOCKER_REGISTRY_HOST = INTERNAL_DOCKER_REGISTRY.split('/')[0]
140
141// Main pipeline
142node('pool') {
143 // Use absolute path for the SSH key to avoid tilde-expansion issues with sshCommand
144 final String SSH_KEY = "${env.HOME ?: '/home/jenkins'}/hive/cicd_rsa"
145 final String INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
146 String serverId = null
147 String ipAddress = ''
148 String kubeTmpDir = null
149 Map remote = null
150 boolean alive = false
151
152 sh 'env'
153
154 // Debug: list hive directory to verify SSH key presence
155 sh 'ls -la ~/hive || true'
156
157 stage('Checkout') {
158 checkout scm
garciadeblas640e9182025-11-07 14:52:57 +0100159 }
mesaj8f49a952025-11-10 22:05:22 +0100160
161 def containerName = params.CONTAINER_NAME?.trim()
162
163 // Tags for installer:
164 // -t : common tag for other OSM modules (stable merge build for the branch)
165 // -T : tag for the module under test
166 // -m : module name under test
167 def branchTag = (params.GERRIT_BRANCH ?: 'master').trim().toLowerCase().replaceAll('[^a-z0-9._-]', '-')
168 def commonModulesTag = "osm-${branchTag}-merge"
169 def testedModuleName = params.MODULE_NAME?.trim()
170 def testedModuleTag = containerName ?: commonModulesTag
171 // The `opensourcemano/tests` image is produced by the `test` module; when testing any other module,
172 // the tests image tag must come from the common merge build for the branch.
173 def testsImageTag = (testedModuleName?.equalsIgnoreCase('test') || testedModuleName?.equalsIgnoreCase('tests')) ? testedModuleTag : commonModulesTag
174
175 Closure<List<String>> buildInstallerArgs = { String registryUser, String registryPassword ->
176 List<String> installArgs = ['-y']
177 String installerRefspec = params.GERRIT_REFSPEC?.trim()
178 if (testedModuleName?.equalsIgnoreCase('devops') && installerRefspec) {
179 installArgs << "-S ${installerRefspec}"
180 }
181 installArgs << "-d ${registryUser}:${registryPassword}@${INTERNAL_DOCKER_REGISTRY}"
182 installArgs << "-p ${INTERNAL_DOCKER_PROXY}"
183 installArgs << "-t ${commonModulesTag}"
184 installArgs << "-T ${testedModuleTag}"
185 installArgs << "-m ${testedModuleName}"
186 return installArgs
187 }
188
189 try {
190 if (params.DO_INSTALL) {
191///////////////////////////////////////////////////////////////////////////////////////
192// Launch VM
193///////////////////////////////////////////////////////////////////////////////////////
194 stage('Spawn Remote VM') {
195 println('Launching new VM')
196 def output = runHiveCommand("""
197 openstack server create --flavor ${params.OPENSTACK_OSM_FLAVOR} \
198 --image ${params.OPENSTACK_BASE_IMAGE} \
199 --key-name CICD \
200 --property build_url=\"${BUILD_URL}\" \
201 --nic net-id=${OPENSTACK_NET_ID} \
202 ${containerName}
203 """)
204
205 serverId = get_value('id', output)
206
207 if (serverId == null) {
208 println('VM launch output:')
209 println(output)
210 throw new Exception('VM Launch failed')
211 }
212 println("Target VM is ${serverId}, waiting for IP address to be assigned")
213
214 ipAddress = waitForServerIp(serverId)
215 println("Waiting for VM at ${ipAddress} to be reachable")
216
217 remote = [
218 name: containerName ?: "osm-e2e-${BUILD_NUMBER}",
219 host: ipAddress,
220 user: 'ubuntu',
221 identityFile: SSH_KEY,
222 allowAnyHosts: true,
223 logLevel: 'INFO',
224 pty: true
225 ]
226
227 alive = false
228 timeout(time: 1, unit: 'MINUTES') {
229 while (!alive) {
230 def sshStatus = sh(
231 returnStatus: true,
232 script: "ssh -T -i ${SSH_KEY} " +
233 "-o StrictHostKeyChecking=no " +
234 "-o UserKnownHostsFile=/dev/null " +
235 "-o ConnectTimeout=5 ubuntu@${ipAddress} 'echo Alive'")
236 alive = (sshStatus == 0)
237 }
238 }
239 println('VM is ready and accepting ssh connections')
240 //////////////////////////////////////////////////////////////////////////////////////////////
241 println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins (via native ssh)...')
242 sh """ssh -T -i ${SSH_KEY} \
243 -o StrictHostKeyChecking=no \
244 -o UserKnownHostsFile=/dev/null \
245 ubuntu@${ipAddress} \"echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config\"
246 """
247 sh """ssh -T -i ${SSH_KEY} \
248 -o StrictHostKeyChecking=no \
249 -o UserKnownHostsFile=/dev/null \
250 ubuntu@${ipAddress} \"echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config\"
251 """
252 sh """ssh -T -i ${SSH_KEY} \
253 -o StrictHostKeyChecking=no \
254 -o UserKnownHostsFile=/dev/null \
garciadeblasa1a46492026-01-21 17:51:14 +0100255 ubuntu@${ipAddress} \"sudo systemctl restart ssh.service\"
mesaj8f49a952025-11-10 22:05:22 +0100256 """
257 //////////////////////////////////////////////////////////////////////////////////////////////
258 } // stage("Spawn Remote VM")
259///////////////////////////////////////////////////////////////////////////////////////
260// Checks before installation
261///////////////////////////////////////////////////////////////////////////////////////
262 stage('Checks before installation') {
263 if (!ipAddress?.trim()) {
264 error('Missing VM IP address, cannot run pre-installation checks')
265 }
266
267 sshCommand remote: remote, command: 'cloud-init status --wait'
268 sshCommand remote: remote, command: 'sudo apt-get -y update'
269 sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
270 sshCommand remote: remote, command: 'sudo service chrony stop'
271 sshCommand remote: remote, command: 'sudo chronyd -vq'
272 sshCommand remote: remote, command: 'sudo service chrony start'
273 } // stage("Checks before installation")
274///////////////////////////////////////////////////////////////////////////////////////
275// Install
276///////////////////////////////////////////////////////////////////////////////////////
277 stage('Install') {
278 if (!ipAddress?.trim()) {
279 error('Missing VM IP address, cannot run installation steps')
280 }
281
282 sshCommand remote: remote, command: """
283 wget ${INSTALLER_URL}
284 chmod +x ./install_osm.sh
285 sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
286 """
287
288
289 Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
290 credentialsId: 'gitlab-registry',
291 usernameVariable: 'USERNAME',
292 passwordVariable: 'PASSWORD']
293 withCredentials([gitlabCredentialsMap]) {
294 List<String> installArgs = buildInstallerArgs(USERNAME, PASSWORD)
295
296 String installCmd = "./install_osm.sh ${installArgs.join(' ')}"
297 sshCommand remote: remote, command: """
298 ${installCmd}
299 """
300 }
301 } // stage("Install")
302
303///////////////////////////////////////////////////////////////////////////////////////
304// Health check of installed OSM in remote VM
305///////////////////////////////////////////////////////////////////////////////////////
306 stage('OSM Health') {
307 if (!ipAddress?.trim()) {
308 error('Missing VM IP address, cannot run OSM health checks')
309 }
310 if (!remote) {
311 error('Missing remote target, cannot run OSM health checks')
312 }
313
314 timeout(time: 5, unit: 'MINUTES') {
315 String osmHostname = "nbi.${ipAddress}.nip.io"
316 sshCommand remote: remote, command: """
garciadeblasa1046ed2026-01-14 16:32:45 +0100317 OSM_HOSTNAME=${osmHostname} ~/.local/bin/osm vim-list
mesaj8f49a952025-11-10 22:05:22 +0100318 """
319 }
320 } // stage('OSM Health')
321
322///////////////////////////////////////////////////////////////////////////////////////
323// Get OSM Kubeconfig and store it for future usage (Robot/vCluster)
324///////////////////////////////////////////////////////////////////////////////////////
325 if (params.DO_ROBOT) {
326 stage('OSM Get kubeconfig') {
327 kubeTmpDir = pwd(tmp: true)
328 env.OSM_KUBECONFIG_PATH = "${kubeTmpDir}/osm_config"
329 env.VCLUSTER_KUBECONFIG_PATH = "${kubeTmpDir}/vcluster_config"
330
331 sshGet remote: remote,
332 from: "/home/ubuntu/.kube/config",
333 into: env.OSM_KUBECONFIG_PATH,
334 override: true
335 sh "chmod 600 ${env.OSM_KUBECONFIG_PATH}"
336 sh "test -s ${env.OSM_KUBECONFIG_PATH}"
337 // Debug: show the Kubernetes API endpoint used by the kubeconfig.
338 // (k3s defaults to 127.0.0.1:6443, which is not reachable from the Jenkins agent container)
339 sh "grep -nE '^\\s*server:' ${env.OSM_KUBECONFIG_PATH} || true"
340 } // stage('OSM Get kubeconfig')
341
342///////////////////////////////////////////////////////////////////////////////////////
343// Create vCluster for GitOps/Robot test execution
344///////////////////////////////////////////////////////////////////////////////////////
345 stage('Create vCluster') {
346 println("Creating vcluster ${VCLUSTER_NAME} in namespace ${VCLUSTER_NAMESPACE}")
347 dockerLoginInternalRegistry()
348 create_vcluster(INTERNAL_DOCKER_REGISTRY, testsImageTag, env.OSM_KUBECONFIG_PATH, env.VCLUSTER_KUBECONFIG_PATH, VCLUSTER_NAME, VCLUSTER_NAMESPACE)
349 sh "chmod 600 ${env.VCLUSTER_KUBECONFIG_PATH}"
350 sh "test -s ${env.VCLUSTER_KUBECONFIG_PATH}"
351 } // stage('Create vCluster')
352
353///////////////////////////////////////////////////////////////////////////////////////
354// Execute Robot tests
355///////////////////////////////////////////////////////////////////////////////////////
356 stage('System Integration Test') {
357 String prometheusHostname = "prometheus.${ipAddress}.nip.io"
358 Integer prometheusPort = PROMETHEUS_PORT_DEFAULT
359 String osmHostnameRobot = "nbi.${ipAddress}.nip.io:443"
360
361 register_etsi_vim_account(
362 INTERNAL_DOCKER_REGISTRY,
363 testsImageTag,
364 osmHostnameRobot,
365 params.ROBOT_VIM,
366 params.ROBOT_PORT_MAPPING_VIM,
367 params.KUBECONFIG,
368 params.CLOUDS,
369 params.PROMETHEUS_CONFIG_VIM
370 )
371 register_etsi_k8s_cluster(
372 INTERNAL_DOCKER_REGISTRY,
373 testsImageTag,
374 osmHostnameRobot,
375 params.ROBOT_VIM,
376 params.ROBOT_PORT_MAPPING_VIM,
377 params.KUBECONFIG,
378 params.CLOUDS,
379 params.PROMETHEUS_CONFIG_VIM
380 )
381
382 // IMPORTANT: tests expect the vcluster kubeconfig at this container path.
383 String robotVclusterKubeconfigPath = ROBOT_VCLUSTER_KUBECONFIG_CONTAINER_PATH
384 run_robot_systest(
385 INTERNAL_DOCKER_REGISTRY,
386 testsImageTag,
387 params.ROBOT_TAG_NAME,
388 osmHostnameRobot,
389 prometheusHostname,
390 prometheusPort,
391 params.OCI_REGISTRY_URL,
392 params.ROBOT_VIM,
393 params.ROBOT_PORT_MAPPING_VIM,
394 params.KUBECONFIG,
395 params.CLOUDS,
396 null,
397 SSH_KEY,
398 params.ROBOT_PASS_THRESHOLD,
399 params.ROBOT_UNSTABLE_THRESHOLD,
400 // extraEnvVars map of extra environment variables
401 ['CLUSTER_KUBECONFIG_CREDENTIALS': robotVclusterKubeconfigPath],
402 // extraVolMounts map of extra volume mounts
403 [(env.VCLUSTER_KUBECONFIG_PATH): robotVclusterKubeconfigPath]
404 )
405 } // stage('System Integration Test')
406 } else {
407 println('Skipping kubeconfig/vcluster steps because DO_ROBOT is set to false')
408 }
409 } else {
410 println('Skipping VM spawn because DO_INSTALL is set to false')
411 }
412 } finally {
413 stage('Archive Logs') {
414 if (params.DO_INSTALL && remote) {
415 try {
416 archiveLogs(remote)
417 } catch (Exception e) {
418 println("Archive logs failed: ${e.message}")
419 }
420 } else {
421 println('No remote target to collect logs from')
422 }
423 }
424
425 stage('Cleanup') {
426 // Always attempt to cleanup temp kubeconfig directory if created.
427 if (kubeTmpDir?.trim()) {
428 sh "rm -rf ${kubeTmpDir} || true"
429 kubeTmpDir = null
430 }
431
432 if (!params.DO_INSTALL || serverId == null) {
433 println('No VM to cleanup')
434 return
435 }
436
437 String buildState = currentBuild.currentResult ?: 'SUCCESS'
438 boolean buildFailed = buildState == 'FAILURE'
439
440 boolean deleteVm = true
441 if (buildFailed && params.SAVE_CONTAINER_ON_FAIL) {
442 deleteVm = false
443 }
444 if (!buildFailed && params.SAVE_CONTAINER_ON_PASS) {
445 deleteVm = false
446 }
447
448 if (deleteVm) {
449 println("Deleting VM: ${serverId}")
450 try {
451 runHiveCommand("""
452 openstack server delete ${serverId}
453 """)
454 } catch (Exception e) {
455 // Avoid masking an earlier failure with cleanup failure.
456 println("VM delete failed: ${e.message}")
457 }
458 } else {
459 println("Preserving VM ${serverId} (build state: ${buildState})")
garciadeblas640e9182025-11-07 14:52:57 +0100460 }
461 }
462 }
463}
464
mesaj8f49a952025-11-10 22:05:22 +0100465////////////////////////////////////////////////////////////////////////////////////////
466// Helper Classes & Functions (ported from ci_stage_3.groovy)
467// Placed below the pipeline for readability.
468////////////////////////////////////////////////////////////////////////////////////////
469
470/** Usage:
471 * def dr = new DockerRunner(this)
472 * stdout = dr.run(
473 * image : "${INTERNAL_DOCKER_REGISTRY}opensourcemano/tests:${tag}",
garciadeblase9727ca2026-01-27 23:24:34 +0100474 * entry : "osm", // optional
mesaj8f49a952025-11-10 22:05:22 +0100475 * envVars : [ "OSM_HOSTNAME=${host}" ],
476 * envFile : myEnv,
477 * mounts : [
478 * "${clouds}:/etc/openstack/clouds.yaml",
479 * "${kubeconfig}:/root/.kube/config"
480 * ],
481 * cmd : "vim-create --name osm …"
482 * )
483 */
484class DockerRunner implements Serializable {
485 def steps
486 DockerRunner(def steps) { this.steps = steps }
487
488 /** Returns stdout (trimmed) if returnStdout is true; throws Exception on non-zero exit */
489 String run(Map args = [:]) {
490 def returnStdout = args.remove('returnStdout') ?: false
491 def envFile = args.envFile ?: ''
492 def entry = args.entry ? "--entrypoint ${args.entry}" : ''
493 def mounts = (args.mounts ?: [])
494 .findAll { it && it.trim() }
495 .collect { "-v ${it}" }
496 .join(' ')
497 def envs = (args.envVars ?: [])
498 .findAll { it && it.trim() }
499 .collect { "--env ${it}" }
500 .join(' ')
501 def image = args.image ?: ''
502 def cmd = args.cmd ?: ''
503 def fullCmd = "docker run --rm ${entry} ${envs} ${envFile ? "--env-file ${envFile}" : ''} ${mounts} ${image} ${cmd}".trim()
504
505 def result = null
506 try {
507 if (returnStdout) {
508 result = steps.sh(returnStdout: true, script: fullCmd).trim()
509 } else {
510 steps.sh(script: fullCmd)
511 }
512 } catch (Exception ex) {
513 throw new Exception("docker run failed -> ${ex.message}")
514 } finally {
515 steps.echo("Command executed: ${fullCmd}")
516 }
517 return result ?: ''
518 }
519}
520
521/* -------------------------------------------------------------------
522 * create_vcluster – spin up a vcluster in the target OSM cluster
523 * @params:
524 * tagName - The OSM test docker image tag to use
525 * kubeconfigPath - The path of the OSM kubernetes master configuration file
526 * vclusterKubeconfigOutPath - Output path for the vcluster kubeconfig
527 * vclusterName - Name of the vcluster
528 * vclusterNamespace - Namespace for the vcluster
529 * ------------------------------------------------------------------- */
530void create_vcluster(String dockerRegistryUrl, String tagName, String kubeconfigPath, String vclusterKubeconfigOutPath, String vclusterName, String vclusterNamespace) {
531 def dr = new DockerRunner(this)
532 def mounts = ["${kubeconfigPath}:/root/.kube/config"]
533 def envs = ["KUBECONFIG=/root/.kube/config"]
534 def image = "${dockerRegistryUrl}opensourcemano/tests:${tagName}"
535
536 // 1) create vcluster namespace
537 println("vcluster: ensuring namespace '${vclusterNamespace}' exists")
538 dr.run(
539 image: image,
540 entry: 'kubectl',
541 envVars: envs,
542 mounts: mounts,
543 cmd: "create namespace ${vclusterNamespace} || true"
544 )
545
546 // 2) create vcluster (no connect)
547 println("vcluster: creating '${vclusterName}' (no connect)")
548 dr.run(
549 image: image,
550 entry: 'vcluster',
551 envVars: envs,
552 mounts: mounts,
553 cmd: "create ${vclusterName} -n ${vclusterNamespace} --connect=false -f /etc/vcluster.yaml"
554 )
555
556 // 3) poll until Status is Running
557 int maxWaitMinutes = 10
558 long deadline = System.currentTimeMillis() + (maxWaitMinutes * 60 * 1000)
559 boolean running = false
560 String lastOut = ''
561
562 println("vcluster: waiting for '${vclusterName}' to reach status 'Running' (timeout: ${maxWaitMinutes} minutes)")
563 while (System.currentTimeMillis() < deadline) {
564 try {
565 lastOut = dr.run(
566 returnStdout: true,
567 image: image,
568 entry: '/bin/sh',
569 envVars: envs,
570 mounts: mounts,
caviedesjca614ea2026-01-23 11:19:40 +0100571 cmd: """-c 'vcluster list --output json | jq -r ".[] | select(.Name==\\"${vclusterName}\\") | .Status"'"""
mesaj8f49a952025-11-10 22:05:22 +0100572 ).trim()
573 } catch (Exception e) {
574 println("Polling command failed: ${e.message}. Will retry.")
575 lastOut = "Error: ${e.message}"
576 }
577
578 println("Polling for vcluster status. Current status: '${lastOut}'")
579 if (lastOut == 'Running') {
580 running = true
581 break
582 }
583 sleep 10
584 }
585
586 if (!running) {
587 println("vcluster status after timeout: ${lastOut}")
588 throw new Exception("vcluster '${vclusterName}' did not reach 'Running' state within ${maxWaitMinutes} minutes.")
589 }
590
591 // 4) get vcluster kubeconfig
592 String outPath = vclusterKubeconfigOutPath ?: "${WORKSPACE}/kubeconfig/vcluster_config"
593 // Ensure destination directory exists on the Jenkins agent before relying on shell redirection.
594 String outDir = outPath.contains('/') ? outPath.substring(0, outPath.lastIndexOf('/')) : '.'
595 sh "mkdir -p ${outDir}"
596 println("vcluster: exporting kubeconfig to '${outPath}'")
597 dr.run(
598 image: image,
599 entry: 'vcluster',
600 envVars: envs,
601 mounts: mounts,
602 cmd: "connect ${vclusterName} -n ${vclusterNamespace} --server ${vclusterName}.${vclusterNamespace}.svc.cluster.local:443 --print > ${outPath}"
603 )
604}
605
606void retryWithDocker(int maxAttempts, int delaySeconds, Closure action) {
607 int attempts = maxAttempts
608 while (attempts >= 0) {
609 try {
610 if (action()) return
611 } catch (Exception e) {
612 println("Attempt failed: ${e.message}")
613 }
614 println("Retrying... (${attempts} attempts left)")
615 sleep delaySeconds
616 attempts--
617 }
618 throw new Exception("Operation failed after ${maxAttempts} retries")
619}
620
621void register_etsi_vim_account(
622 String dockerRegistryUrl,
623 String tagName,
624 String osmHostname,
625 String envfile = null,
626 String portmappingfile = null,
627 String kubeconfig = null,
628 String clouds = null,
629 String prometheusconfigfile = null
630) {
631 String VIM_TARGET = "osm"
632 String VIM_MGMT_NET = "osm-ext"
633 String OS_PROJECT_NAME = "osm_jenkins"
634 String OS_AUTH_URL = "http://172.21.247.1:5000/v3"
garciadeblase9727ca2026-01-27 23:24:34 +0100635 String entrypointCmd = "osm"
mesaj8f49a952025-11-10 22:05:22 +0100636
637 def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
638 String environmentFile = envfile ?: "${tempdir}/env"
639 if (!envfile) {
640 sh(script: "touch ${environmentFile}")
641 }
642
643 retryWithDocker(3, 10) {
644 def dr = new DockerRunner(this)
645 try {
646 println("Attempting to register VIM account")
647 withCredentials([usernamePassword(credentialsId: 'openstack-jenkins-credentials',
648 passwordVariable: 'OS_PASSWORD', usernameVariable: 'OS_USERNAME')]) {
649 String entrypointArgs = """vim-create --name ${VIM_TARGET} --user ${OS_USERNAME} \
650 --password ${OS_PASSWORD} --tenant ${OS_PROJECT_NAME} \
651 --auth_url ${OS_AUTH_URL} --account_type openstack --description vim \
652 --prometheus_config_file /root/etsi-vim-prometheus.json \
653 --config '{management_network_name: ${VIM_MGMT_NET}, dataplane_physical_net: physnet2}' || true"""
654 dr.run(
655 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
656 entry: entrypointCmd,
657 envVars: ["OSM_HOSTNAME=${osmHostname}"],
658 envFile: environmentFile,
659 mounts: [
660 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
661 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
662 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
663 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
664 ].findAll { it != null },
665 cmd: entrypointArgs,
666 returnStdout: true
667 )
668 }
669
670 // Check if the VIM is ENABLED
671 int statusChecks = 5
672 while (statusChecks > 0) {
673 sleep 10
674 String vimList = dr.run(
675 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
676 entry: entrypointCmd,
677 envVars: ["OSM_HOSTNAME=${osmHostname}"],
678 envFile: environmentFile,
679 mounts: [
680 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
681 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
682 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
683 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
684 ].findAll { it != null },
685 cmd: "vim-list --long | grep ${VIM_TARGET}",
686 returnStdout: true
687 )
688 if (vimList.contains("ENABLED")) {
689 println("VIM successfully registered and is ENABLED.")
690 return true
691 }
692 statusChecks--
693 }
694 } catch (Exception e) {
695 println("VIM registration check failed: ${e.message}")
696 }
697
698 // If we get here, VIM is not enabled or creation failed. cleanup and retry.
699 println("VIM not enabled, deleting and retrying...")
700 dr.run(
701 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
702 entry: entrypointCmd,
703 envVars: ["OSM_HOSTNAME=${osmHostname}"],
704 envFile: environmentFile,
705 mounts: [
706 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
707 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
708 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
709 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
710 ].findAll { it != null },
711 cmd: "vim-delete --force ${VIM_TARGET}",
712 returnStdout: true
713 )
714 return false
715 }
716}
717
718void register_etsi_k8s_cluster(
719 String dockerRegistryUrl,
720 String tagName,
721 String osmHostname,
722 String envfile = null,
723 String portmappingfile = null,
724 String kubeconfig = null,
725 String clouds = null,
726 String prometheusconfigfile = null
727) {
728 String K8S_CLUSTER_TARGET = "osm"
729 String VIM_TARGET = "osm"
730 String VIM_MGMT_NET = "osm-ext"
731 String K8S_CREDENTIALS = "/root/.kube/config"
garciadeblase9727ca2026-01-27 23:24:34 +0100732 String entrypointCmd = "osm"
mesaj8f49a952025-11-10 22:05:22 +0100733
734 def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
735 String environmentFile = envfile ?: "${tempdir}/env"
736 if (!envfile) {
737 sh(script: "touch ${environmentFile}")
738 }
739
740 retryWithDocker(3, 10) {
741 def dr = new DockerRunner(this)
742 try {
743 println("Attempting to register K8s cluster")
744 dr.run(
745 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
746 entry: entrypointCmd,
747 envVars: ["OSM_HOSTNAME=${osmHostname}"],
748 envFile: environmentFile,
749 mounts: [
750 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
751 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
752 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
753 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
754 ].findAll { it != null },
755 cmd: """k8scluster-add ${K8S_CLUSTER_TARGET} --creds ${K8S_CREDENTIALS} --version \"v1\" \
756 --description \"Robot-cluster\" --skip-jujubundle --vim ${VIM_TARGET} \
757 --k8s-nets '{net1: ${VIM_MGMT_NET}}'""",
758 returnStdout: true
759 )
760
761 // Check if the K8s cluster is ENABLED
762 int statusChecks = 10
763 while (statusChecks > 0) {
764 sleep 10
765 String clusterList = dr.run(
766 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
767 entry: entrypointCmd,
768 envVars: ["OSM_HOSTNAME=${osmHostname}"],
769 envFile: environmentFile,
770 mounts: [
771 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
772 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
773 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
774 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
775 ].findAll { it != null },
776 cmd: "k8scluster-list | grep ${K8S_CLUSTER_TARGET}",
777 returnStdout: true
778 )
779 if (clusterList.contains("ENABLED")) {
780 println("K8s cluster successfully registered and is ENABLED.")
781 return true
782 }
783 statusChecks--
784 }
785 } catch (Exception e) {
786 println("K8s cluster registration check failed: ${e.message}")
787 }
788
789 // If we get here, cluster is not enabled or creation failed. cleanup and retry.
790 println("K8s cluster not enabled, deleting and retrying...")
791 dr.run(
792 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
793 entry: entrypointCmd,
794 envVars: ["OSM_HOSTNAME=${osmHostname}"],
795 envFile: environmentFile,
796 mounts: [
797 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
798 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
799 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
800 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
801 ].findAll { it != null },
802 cmd: "k8scluster-delete ${K8S_CLUSTER_TARGET}"
803 )
804 return false
805 }
806}
807
808void run_robot_systest(
809 String dockerRegistryUrl,
810 String tagName,
811 String testName,
812 String osmHostname,
813 String prometheusHostname,
814 Integer prometheusPort = null,
815 String ociRegistryUrl = null,
816 String envfile = null,
817 String portmappingfile = null,
818 String kubeconfig = null,
819 String clouds = null,
820 String hostfile = null,
821 String osmRSAfile = null,
822 String passThreshold = '0.0',
823 String unstableThreshold = '0.0',
824 Map extraEnvVars = null,
825 Map extraVolMounts = null
826) {
827 def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
828 String environmentFile = envfile ?: "${tempdir}/env"
829 if (!envfile) {
830 sh(script: "touch ${environmentFile}")
831 }
832
833 def prometheusPortVar = prometheusPort != null ? "PROMETHEUS_PORT=${prometheusPort}" : null
834 def hostfilemount = hostfile ? "${hostfile}:/etc/hosts" : null
835
836 try {
837 withCredentials([usernamePassword(credentialsId: 'gitlab-oci-test',
838 passwordVariable: 'OCI_REGISTRY_PSW', usernameVariable: 'OCI_REGISTRY_USR')]) {
839 def baseEnvVars = [
840 "OSM_HOSTNAME=${osmHostname}",
841 "PROMETHEUS_HOSTNAME=${prometheusHostname}",
842 prometheusPortVar,
843 ociRegistryUrl ? "OCI_REGISTRY_URL=${ociRegistryUrl}" : null,
844 "OCI_REGISTRY_USER=${OCI_REGISTRY_USR}",
845 "OCI_REGISTRY_PASSWORD=${OCI_REGISTRY_PSW}"
846 ].findAll { it != null }
847
848 def baseMounts = [
849 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
850 osmRSAfile ? "${osmRSAfile}:/root/osm_id_rsa" : null,
851 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
852 "${tempdir}:/robot-systest/reports",
853 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
854 hostfilemount
855 ].findAll { it != null }
856
857 def extraEnvVarsList = extraEnvVars?.collect { key, value -> "${key}=${value}" } ?: []
858 def extraVolMountsList = extraVolMounts?.collect { hostPath, containerPath -> "${hostPath}:${containerPath}" } ?: []
859
860 def dr = new DockerRunner(this)
861 dr.run(
862 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
863 envVars: baseEnvVars + extraEnvVarsList,
864 envFile: "${environmentFile}",
865 mounts: baseMounts + extraVolMountsList,
866 cmd: "-t ${testName}"
867 )
868 }
869 } finally {
870 // Best-effort publish Robot results from tempdir into workspace
871 sh("cp ${tempdir}/*.xml . 2>/dev/null || true")
872 sh("cp ${tempdir}/*.html . 2>/dev/null || true")
873
874 def outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
875 sh("command -v tree >/dev/null 2>&1 && tree ${outputDirectory} || ls -la ${outputDirectory}")
876
877 try {
878 step([
879 $class: 'RobotPublisher',
880 outputPath: "${outputDirectory}",
881 outputFileName: '*.xml',
882 disableArchiveOutput: false,
883 reportFileName: 'report.html',
884 logFileName: 'log.html',
885 passThreshold: passThreshold,
886 unstableThreshold: unstableThreshold,
887 otherFiles: '*.png'
888 ])
889 } catch (Exception e) {
890 println("RobotPublisher failed: ${e.message}")
891 }
892 }
893}
894
895String get_value(String key, String output) {
896 for (String line : output.split('\n')) {
897 def data = line.split('\\|')
898 if (data.length > 1 && data[1].trim() == key) {
899 return data[2].trim()
900 }
901 }
902 return null
903}
904
905void dockerLoginInternalRegistry() {
906 withCredentials([usernamePassword(credentialsId: 'gitlab-registry',
907 passwordVariable: 'REGISTRY_PASSWORD', usernameVariable: 'REGISTRY_USERNAME')]) {
908 sh """
909 set -e
910 echo "${REGISTRY_PASSWORD}" | docker login ${INTERNAL_DOCKER_REGISTRY_HOST} -u "${REGISTRY_USERNAME}" --password-stdin
911 """
912 }
913}
914
915String withHiveEnv(String commandBody) {
916 """#!/bin/sh -e
917${HIVE_ENV_EXPORT}
918${commandBody.stripIndent()}
919"""
920}
921
922String runHiveCommand(String commandBody) {
923 sh(returnStdout: true, script: withHiveEnv(commandBody)).trim()
924}
925
926String waitForServerIp(String id) {
927 String addr = ''
928 timeout(time: 5, unit: 'MINUTES') {
929 waitUntil {
930 def showOutput = runHiveCommand("""
931 openstack server show ${id}
932 """)
933 def rawAddress = get_value('addresses', showOutput)
934 if (rawAddress) {
935 addr = rawAddress.split('=')[1]
936 return true
937 }
938 sleep 5
939 return false
940 }
941 }
942 return addr
943}
944
945// Collect logs from the remote VM and archive them in Jenkins
946void archiveLogs(Map remoteTarget) {
947 sshCommand remote: remoteTarget, command: '''mkdir -p logs/dags logs/vcluster logs/flux-system logs/events logs/system'''
948
949 // Collect Kubernetes events
950 sshCommand remote: remoteTarget, command: '''
951 echo "Extracting Kubernetes events"
952 kubectl get events --all-namespaces --sort-by='.lastTimestamp' -o wide > logs/events/k8s-events.log 2>&1 || true
953 kubectl get events -n osm --sort-by='.lastTimestamp' -o wide > logs/events/osm-events.log 2>&1 || true
954 kubectl get events -n vcluster --sort-by='.lastTimestamp' -o wide > logs/events/vcluster-events.log 2>&1 || true
955 kubectl get events -n flux-system --sort-by='.lastTimestamp' -o wide > logs/events/flux-system-events.log 2>&1 || true
956 '''
957
958 // Collect host logs and system info
959 sshCommand remote: remoteTarget, command: '''
960 echo "Collect system logs"
961 if command -v journalctl >/dev/null; then
962 journalctl > logs/system/system.log
963 fi
964
965 for entry in syslog messages; do
966 [ -e "/var/log/${entry}" ] && cp -f /var/log/${entry} logs/system/"${entry}.log"
967 done
968
969 echo "Collect active services"
970 case "$(cat /proc/1/comm)" in
971 systemd)
972 systemctl list-units > logs/system/services.txt 2>&1
973 ;;
974 *)
975 service --status-all >> logs/system/services.txt 2>&1
976 ;;
977 esac
978
979 top -b -n 1 > logs/system/top.txt 2>&1
980 ps fauxwww > logs/system/ps.txt 2>&1
981 '''
982
983 // Collect OSM namespace workloads
984 sshCommand remote: remoteTarget, command: '''
985 for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
986 echo "Extracting log for $deployment"
987 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log || true
988 done
989 '''
990 sshCommand remote: remoteTarget, command: '''
991 for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
992 echo "Extracting log for $statefulset"
993 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log || true
994 done
995 '''
996 sshCommand remote: remoteTarget, command: '''
997 schedulerPod="$(kubectl get pods -n osm | grep osm-scheduler| awk '{print $1; exit}')"; \
998 echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
999 kubectl -n osm cp ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler 2>&1 || true
1000 '''
1001
1002 // Collect vcluster and flux-system namespace logs
1003 sshCommand remote: remoteTarget, command: '''
1004 echo "Extracting logs from vcluster namespace"
1005 for pod in `kubectl get pods -n vcluster | grep -v NAME | awk '{print $1}'`; do
1006 echo "Extracting log for vcluster pod: $pod"
1007 kubectl logs -n vcluster $pod --timestamps=true --all-containers 2>&1 > logs/vcluster/$pod.log || true
1008 done
1009 '''
1010 sshCommand remote: remoteTarget, command: '''
1011 echo "Extracting logs from flux-system namespace"
1012 for pod in `kubectl get pods -n flux-system | grep -v NAME | awk '{print $1}'`; do
1013 echo "Extracting log for flux-system pod: $pod"
1014 kubectl logs -n flux-system $pod --timestamps=true --all-containers 2>&1 > logs/flux-system/$pod.log || true
1015 done
1016 '''
1017
1018 sh 'rm -rf logs'
1019 sshCommand remote: remoteTarget, command: '''ls -al logs logs/vcluster logs/events logs/flux-system logs/system'''
1020 sshGet remote: remoteTarget, from: 'logs', into: '.', override: true
1021 archiveArtifacts artifacts: 'logs/*.log, logs/dags/*.log, logs/vcluster/*.log, logs/events/*.log, logs/flux-system/*.log, logs/system/**'
1022}