blob: 0e8d68d9d7fcdd76abb4e77784069ba2e985d62b [file] [log] [blame]
garciadeblas640e9182025-11-07 14:52:57 +01001/* Copyright ETSI Contributors and Others
2 *
3 * All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License"); you may
6 * not use this file except in compliance with the License. You may obtain
7 * a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 * License for the specific language governing permissions and limitations
15 * under the License.
16 */
17
mesaj8f49a952025-11-10 22:05:22 +010018import groovy.transform.Field
19
20properties([
21 parameters([
22 // ----------------------------
23 // Core: install / VM lifecycle
24 // ----------------------------
25 string(
26 defaultValue: env.GERRIT_BRANCH ?: env.BRANCH_NAME ?: 'master',
27 description: 'Branch used to name downstream resources',
28 name: 'GERRIT_BRANCH'
29 ),
30 string(
31 defaultValue: '',
32 description: 'Prebuilt container tag to test (fallbacks to auto-generated name if empty)',
33 name: 'CONTAINER_NAME'
34 ),
35 string(
garciadeblas1314ca52026-01-16 10:34:25 +010036 defaultValue: 'ubuntu24.04',
mesaj8f49a952025-11-10 22:05:22 +010037 description: 'Glance image to use for the remote VM',
38 name: 'OPENSTACK_BASE_IMAGE'
39 ),
40 string(
41 defaultValue: 'osm.sanity',
42 description: 'OpenStack flavor for the remote VM',
43 name: 'OPENSTACK_OSM_FLAVOR'
44 ),
45 booleanParam(
46 defaultValue: true,
47 description: 'Spawn the remote VM and perform installation steps',
48 name: 'DO_INSTALL'
49 ),
50 booleanParam(
51 defaultValue: false,
52 description: 'Preserve VM on failure for further debugging',
53 name: 'SAVE_CONTAINER_ON_FAIL'
54 ),
55 booleanParam(
56 defaultValue: false,
57 description: 'Preserve VM on success',
58 name: 'SAVE_CONTAINER_ON_PASS'
59 ),
60
61 // ---------------------------------
62 // Module under test / installation
63 // ---------------------------------
64 string(
65 defaultValue: '',
66 description: 'Name of the module under test',
67 name: 'MODULE_NAME'
68 ),
69 string(
70 name: 'GERRIT_REFSPEC',
71 defaultValue: '',
72 description: 'Gerrit refspec to checkout only for devops module (overrides COMMIT_ID if set)'
73 ),
74
75 // ----------------------------
76 // Robot / system integration
77 // ----------------------------
78 booleanParam(
79 defaultValue: false,
80 description: 'Run Robot system integration tests after installation',
81 name: 'DO_ROBOT'
82 ),
83 string(
84 defaultValue: 'sanity',
85 description: 'Robot tag selection (sanity/regression/daily are common options)',
86 name: 'ROBOT_TAG_NAME'
87 ),
88 string(
89 defaultValue: '/home/jenkins/hive/robot-systest.cfg',
90 description: 'Robot environment file (ETSI VIM)',
91 name: 'ROBOT_VIM'
92 ),
93 string(
94 defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
95 description: 'Port mapping file for SDN assist in ETSI VIM',
96 name: 'ROBOT_PORT_MAPPING_VIM'
97 ),
98 string(
99 defaultValue: '/home/jenkins/hive/etsi-vim-prometheus.json',
100 description: 'Prometheus configuration file in ETSI VIM',
101 name: 'PROMETHEUS_CONFIG_VIM'
102 ),
103 string(
104 defaultValue: '/home/jenkins/hive/kubeconfig.yaml',
105 description: 'Kubeconfig used by Robot for ETSI VIM cluster registration',
106 name: 'KUBECONFIG'
107 ),
108 string(
109 defaultValue: '/home/jenkins/hive/clouds.yaml',
110 description: 'OpenStack clouds.yaml used by Robot',
111 name: 'CLOUDS'
112 ),
113 string(
114 defaultValue: 'oci://osm.etsi.org:5050/devops/test',
115 description: 'OCI registry used by Robot system tests',
116 name: 'OCI_REGISTRY_URL'
117 ),
118 string(
119 defaultValue: '100.0',
120 description: '% passed Robot tests to mark the build as passed',
121 name: 'ROBOT_PASS_THRESHOLD'
122 ),
123 string(
124 defaultValue: '80.0',
125 description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)',
126 name: 'ROBOT_UNSTABLE_THRESHOLD'
127 )
128 ])
129])
130
131@Field final String HIVE_ENV_EXPORT = 'for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export $line ; done'
132@Field final String INSTALLER_URL = 'https://osm-download.etsi.org/ftp/osm-19.0-nineteen/install_osm.sh'
133@Field final String OPENSTACK_NET_ID = 'osm-ext'
134@Field final String VCLUSTER_NAMESPACE = 'vcluster'
135@Field final String VCLUSTER_NAME = 'e2e'
136@Field final String ROBOT_VCLUSTER_KUBECONFIG_CONTAINER_PATH = '/robot-systest/cluster-kubeconfig.yaml'
137@Field final Integer PROMETHEUS_PORT_DEFAULT = 80
138@Field final String INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
139@Field final String INTERNAL_DOCKER_REGISTRY_HOST = INTERNAL_DOCKER_REGISTRY.split('/')[0]
140
141// Main pipeline
142node('pool') {
143 // Use absolute path for the SSH key to avoid tilde-expansion issues with sshCommand
144 final String SSH_KEY = "${env.HOME ?: '/home/jenkins'}/hive/cicd_rsa"
145 final String INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
146 String serverId = null
147 String ipAddress = ''
148 String kubeTmpDir = null
149 Map remote = null
150 boolean alive = false
151
152 sh 'env'
153
154 // Debug: list hive directory to verify SSH key presence
155 sh 'ls -la ~/hive || true'
156
157 stage('Checkout') {
158 checkout scm
garciadeblas640e9182025-11-07 14:52:57 +0100159 }
mesaj8f49a952025-11-10 22:05:22 +0100160
161 def containerName = params.CONTAINER_NAME?.trim()
162
163 // Tags for installer:
164 // -t : common tag for other OSM modules (stable merge build for the branch)
165 // -T : tag for the module under test
166 // -m : module name under test
167 def branchTag = (params.GERRIT_BRANCH ?: 'master').trim().toLowerCase().replaceAll('[^a-z0-9._-]', '-')
168 def commonModulesTag = "osm-${branchTag}-merge"
169 def testedModuleName = params.MODULE_NAME?.trim()
170 def testedModuleTag = containerName ?: commonModulesTag
171 // The `opensourcemano/tests` image is produced by the `test` module; when testing any other module,
172 // the tests image tag must come from the common merge build for the branch.
173 def testsImageTag = (testedModuleName?.equalsIgnoreCase('test') || testedModuleName?.equalsIgnoreCase('tests')) ? testedModuleTag : commonModulesTag
174
175 Closure<List<String>> buildInstallerArgs = { String registryUser, String registryPassword ->
176 List<String> installArgs = ['-y']
177 String installerRefspec = params.GERRIT_REFSPEC?.trim()
178 if (testedModuleName?.equalsIgnoreCase('devops') && installerRefspec) {
179 installArgs << "-S ${installerRefspec}"
180 }
181 installArgs << "-d ${registryUser}:${registryPassword}@${INTERNAL_DOCKER_REGISTRY}"
182 installArgs << "-p ${INTERNAL_DOCKER_PROXY}"
183 installArgs << "-t ${commonModulesTag}"
184 installArgs << "-T ${testedModuleTag}"
185 installArgs << "-m ${testedModuleName}"
186 return installArgs
187 }
188
189 try {
190 if (params.DO_INSTALL) {
191///////////////////////////////////////////////////////////////////////////////////////
192// Launch VM
193///////////////////////////////////////////////////////////////////////////////////////
194 stage('Spawn Remote VM') {
195 println('Launching new VM')
196 def output = runHiveCommand("""
197 openstack server create --flavor ${params.OPENSTACK_OSM_FLAVOR} \
198 --image ${params.OPENSTACK_BASE_IMAGE} \
199 --key-name CICD \
200 --property build_url=\"${BUILD_URL}\" \
201 --nic net-id=${OPENSTACK_NET_ID} \
202 ${containerName}
203 """)
204
205 serverId = get_value('id', output)
206
207 if (serverId == null) {
208 println('VM launch output:')
209 println(output)
210 throw new Exception('VM Launch failed')
211 }
212 println("Target VM is ${serverId}, waiting for IP address to be assigned")
213
214 ipAddress = waitForServerIp(serverId)
215 println("Waiting for VM at ${ipAddress} to be reachable")
216
217 remote = [
218 name: containerName ?: "osm-e2e-${BUILD_NUMBER}",
219 host: ipAddress,
220 user: 'ubuntu',
221 identityFile: SSH_KEY,
222 allowAnyHosts: true,
223 logLevel: 'INFO',
224 pty: true
225 ]
226
227 alive = false
228 timeout(time: 1, unit: 'MINUTES') {
229 while (!alive) {
230 def sshStatus = sh(
231 returnStatus: true,
232 script: "ssh -T -i ${SSH_KEY} " +
233 "-o StrictHostKeyChecking=no " +
234 "-o UserKnownHostsFile=/dev/null " +
235 "-o ConnectTimeout=5 ubuntu@${ipAddress} 'echo Alive'")
236 alive = (sshStatus == 0)
237 }
238 }
239 println('VM is ready and accepting ssh connections')
240 //////////////////////////////////////////////////////////////////////////////////////////////
241 println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins (via native ssh)...')
242 sh """ssh -T -i ${SSH_KEY} \
243 -o StrictHostKeyChecking=no \
244 -o UserKnownHostsFile=/dev/null \
245 ubuntu@${ipAddress} \"echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config\"
246 """
247 sh """ssh -T -i ${SSH_KEY} \
248 -o StrictHostKeyChecking=no \
249 -o UserKnownHostsFile=/dev/null \
250 ubuntu@${ipAddress} \"echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config\"
251 """
252 sh """ssh -T -i ${SSH_KEY} \
253 -o StrictHostKeyChecking=no \
254 -o UserKnownHostsFile=/dev/null \
garciadeblasa1a46492026-01-21 17:51:14 +0100255 ubuntu@${ipAddress} \"sudo systemctl restart ssh.service\"
mesaj8f49a952025-11-10 22:05:22 +0100256 """
257 //////////////////////////////////////////////////////////////////////////////////////////////
258 } // stage("Spawn Remote VM")
259///////////////////////////////////////////////////////////////////////////////////////
260// Checks before installation
261///////////////////////////////////////////////////////////////////////////////////////
262 stage('Checks before installation') {
263 if (!ipAddress?.trim()) {
264 error('Missing VM IP address, cannot run pre-installation checks')
265 }
266
267 sshCommand remote: remote, command: 'cloud-init status --wait'
268 sshCommand remote: remote, command: 'sudo apt-get -y update'
269 sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
270 sshCommand remote: remote, command: 'sudo service chrony stop'
271 sshCommand remote: remote, command: 'sudo chronyd -vq'
272 sshCommand remote: remote, command: 'sudo service chrony start'
273 } // stage("Checks before installation")
274///////////////////////////////////////////////////////////////////////////////////////
275// Install
276///////////////////////////////////////////////////////////////////////////////////////
277 stage('Install') {
278 if (!ipAddress?.trim()) {
279 error('Missing VM IP address, cannot run installation steps')
280 }
281
282 sshCommand remote: remote, command: """
283 wget ${INSTALLER_URL}
284 chmod +x ./install_osm.sh
285 sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
286 """
287
288
289 Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
290 credentialsId: 'gitlab-registry',
291 usernameVariable: 'USERNAME',
292 passwordVariable: 'PASSWORD']
293 withCredentials([gitlabCredentialsMap]) {
294 List<String> installArgs = buildInstallerArgs(USERNAME, PASSWORD)
295
296 String installCmd = "./install_osm.sh ${installArgs.join(' ')}"
297 sshCommand remote: remote, command: """
298 ${installCmd}
299 """
300 }
301 } // stage("Install")
302
303///////////////////////////////////////////////////////////////////////////////////////
304// Health check of installed OSM in remote VM
305///////////////////////////////////////////////////////////////////////////////////////
306 stage('OSM Health') {
307 if (!ipAddress?.trim()) {
308 error('Missing VM IP address, cannot run OSM health checks')
309 }
310 if (!remote) {
311 error('Missing remote target, cannot run OSM health checks')
312 }
313
314 timeout(time: 5, unit: 'MINUTES') {
315 String osmHostname = "nbi.${ipAddress}.nip.io"
316 sshCommand remote: remote, command: """
garciadeblasa1046ed2026-01-14 16:32:45 +0100317 OSM_HOSTNAME=${osmHostname} ~/.local/bin/osm vim-list
mesaj8f49a952025-11-10 22:05:22 +0100318 """
319 }
320 } // stage('OSM Health')
321
322///////////////////////////////////////////////////////////////////////////////////////
323// Get OSM Kubeconfig and store it for future usage (Robot/vCluster)
324///////////////////////////////////////////////////////////////////////////////////////
325 if (params.DO_ROBOT) {
326 stage('OSM Get kubeconfig') {
327 kubeTmpDir = pwd(tmp: true)
328 env.OSM_KUBECONFIG_PATH = "${kubeTmpDir}/osm_config"
329 env.VCLUSTER_KUBECONFIG_PATH = "${kubeTmpDir}/vcluster_config"
330
331 sshGet remote: remote,
332 from: "/home/ubuntu/.kube/config",
333 into: env.OSM_KUBECONFIG_PATH,
334 override: true
335 sh "chmod 600 ${env.OSM_KUBECONFIG_PATH}"
336 sh "test -s ${env.OSM_KUBECONFIG_PATH}"
337 // Debug: show the Kubernetes API endpoint used by the kubeconfig.
338 // (k3s defaults to 127.0.0.1:6443, which is not reachable from the Jenkins agent container)
339 sh "grep -nE '^\\s*server:' ${env.OSM_KUBECONFIG_PATH} || true"
340 } // stage('OSM Get kubeconfig')
341
342///////////////////////////////////////////////////////////////////////////////////////
343// Create vCluster for GitOps/Robot test execution
344///////////////////////////////////////////////////////////////////////////////////////
345 stage('Create vCluster') {
346 println("Creating vcluster ${VCLUSTER_NAME} in namespace ${VCLUSTER_NAMESPACE}")
347 dockerLoginInternalRegistry()
348 create_vcluster(INTERNAL_DOCKER_REGISTRY, testsImageTag, env.OSM_KUBECONFIG_PATH, env.VCLUSTER_KUBECONFIG_PATH, VCLUSTER_NAME, VCLUSTER_NAMESPACE)
349 sh "chmod 600 ${env.VCLUSTER_KUBECONFIG_PATH}"
350 sh "test -s ${env.VCLUSTER_KUBECONFIG_PATH}"
351 } // stage('Create vCluster')
352
353///////////////////////////////////////////////////////////////////////////////////////
354// Execute Robot tests
355///////////////////////////////////////////////////////////////////////////////////////
356 stage('System Integration Test') {
357 String prometheusHostname = "prometheus.${ipAddress}.nip.io"
358 Integer prometheusPort = PROMETHEUS_PORT_DEFAULT
359 String osmHostnameRobot = "nbi.${ipAddress}.nip.io:443"
360
361 register_etsi_vim_account(
362 INTERNAL_DOCKER_REGISTRY,
363 testsImageTag,
364 osmHostnameRobot,
365 params.ROBOT_VIM,
366 params.ROBOT_PORT_MAPPING_VIM,
367 params.KUBECONFIG,
368 params.CLOUDS,
369 params.PROMETHEUS_CONFIG_VIM
370 )
371 register_etsi_k8s_cluster(
372 INTERNAL_DOCKER_REGISTRY,
373 testsImageTag,
374 osmHostnameRobot,
375 params.ROBOT_VIM,
376 params.ROBOT_PORT_MAPPING_VIM,
377 params.KUBECONFIG,
378 params.CLOUDS,
379 params.PROMETHEUS_CONFIG_VIM
380 )
381
382 // IMPORTANT: tests expect the vcluster kubeconfig at this container path.
383 String robotVclusterKubeconfigPath = ROBOT_VCLUSTER_KUBECONFIG_CONTAINER_PATH
384 run_robot_systest(
385 INTERNAL_DOCKER_REGISTRY,
386 testsImageTag,
387 params.ROBOT_TAG_NAME,
388 osmHostnameRobot,
389 prometheusHostname,
390 prometheusPort,
391 params.OCI_REGISTRY_URL,
392 params.ROBOT_VIM,
393 params.ROBOT_PORT_MAPPING_VIM,
394 params.KUBECONFIG,
395 params.CLOUDS,
396 null,
397 SSH_KEY,
398 params.ROBOT_PASS_THRESHOLD,
399 params.ROBOT_UNSTABLE_THRESHOLD,
400 // extraEnvVars map of extra environment variables
401 ['CLUSTER_KUBECONFIG_CREDENTIALS': robotVclusterKubeconfigPath],
402 // extraVolMounts map of extra volume mounts
403 [(env.VCLUSTER_KUBECONFIG_PATH): robotVclusterKubeconfigPath]
404 )
405 } // stage('System Integration Test')
406 } else {
407 println('Skipping kubeconfig/vcluster steps because DO_ROBOT is set to false')
408 }
409 } else {
410 println('Skipping VM spawn because DO_INSTALL is set to false')
411 }
412 } finally {
413 stage('Archive Logs') {
414 if (params.DO_INSTALL && remote) {
415 try {
416 archiveLogs(remote)
417 } catch (Exception e) {
418 println("Archive logs failed: ${e.message}")
419 }
420 } else {
421 println('No remote target to collect logs from')
422 }
423 }
424
425 stage('Cleanup') {
426 // Always attempt to cleanup temp kubeconfig directory if created.
427 if (kubeTmpDir?.trim()) {
caviedesj6beda1d2026-02-11 16:53:20 +0100428 println("Cleaning up temporary Kubernetes directory at ${kubeTmpDir}")
429 sh(returnStatus: true, script: "rm -rf '${kubeTmpDir}'")
mesaj8f49a952025-11-10 22:05:22 +0100430 kubeTmpDir = null
431 }
432
433 if (!params.DO_INSTALL || serverId == null) {
434 println('No VM to cleanup')
435 return
436 }
437
438 String buildState = currentBuild.currentResult ?: 'SUCCESS'
439 boolean buildFailed = buildState == 'FAILURE'
440
441 boolean deleteVm = true
442 if (buildFailed && params.SAVE_CONTAINER_ON_FAIL) {
443 deleteVm = false
444 }
445 if (!buildFailed && params.SAVE_CONTAINER_ON_PASS) {
446 deleteVm = false
447 }
448
449 if (deleteVm) {
450 println("Deleting VM: ${serverId}")
451 try {
452 runHiveCommand("""
453 openstack server delete ${serverId}
454 """)
caviedesj6beda1d2026-02-11 16:53:20 +0100455 println("VM: ${serverId} deletion command issued successfully")
mesaj8f49a952025-11-10 22:05:22 +0100456 } catch (Exception e) {
457 // Avoid masking an earlier failure with cleanup failure.
458 println("VM delete failed: ${e.message}")
459 }
460 } else {
461 println("Preserving VM ${serverId} (build state: ${buildState})")
garciadeblas640e9182025-11-07 14:52:57 +0100462 }
463 }
464 }
465}
466
mesaj8f49a952025-11-10 22:05:22 +0100467////////////////////////////////////////////////////////////////////////////////////////
468// Helper Classes & Functions (ported from ci_stage_3.groovy)
469// Placed below the pipeline for readability.
470////////////////////////////////////////////////////////////////////////////////////////
471
472/** Usage:
473 * def dr = new DockerRunner(this)
474 * stdout = dr.run(
475 * image : "${INTERNAL_DOCKER_REGISTRY}opensourcemano/tests:${tag}",
garciadeblase9727ca2026-01-27 23:24:34 +0100476 * entry : "osm", // optional
mesaj8f49a952025-11-10 22:05:22 +0100477 * envVars : [ "OSM_HOSTNAME=${host}" ],
478 * envFile : myEnv,
479 * mounts : [
480 * "${clouds}:/etc/openstack/clouds.yaml",
481 * "${kubeconfig}:/root/.kube/config"
482 * ],
483 * cmd : "vim-create --name osm …"
484 * )
485 */
486class DockerRunner implements Serializable {
487 def steps
488 DockerRunner(def steps) { this.steps = steps }
489
490 /** Returns stdout (trimmed) if returnStdout is true; throws Exception on non-zero exit */
491 String run(Map args = [:]) {
492 def returnStdout = args.remove('returnStdout') ?: false
493 def envFile = args.envFile ?: ''
494 def entry = args.entry ? "--entrypoint ${args.entry}" : ''
495 def mounts = (args.mounts ?: [])
496 .findAll { it && it.trim() }
497 .collect { "-v ${it}" }
498 .join(' ')
499 def envs = (args.envVars ?: [])
500 .findAll { it && it.trim() }
501 .collect { "--env ${it}" }
502 .join(' ')
503 def image = args.image ?: ''
504 def cmd = args.cmd ?: ''
505 def fullCmd = "docker run --rm ${entry} ${envs} ${envFile ? "--env-file ${envFile}" : ''} ${mounts} ${image} ${cmd}".trim()
506
507 def result = null
508 try {
509 if (returnStdout) {
510 result = steps.sh(returnStdout: true, script: fullCmd).trim()
511 } else {
512 steps.sh(script: fullCmd)
513 }
514 } catch (Exception ex) {
515 throw new Exception("docker run failed -> ${ex.message}")
516 } finally {
517 steps.echo("Command executed: ${fullCmd}")
518 }
519 return result ?: ''
520 }
521}
522
523/* -------------------------------------------------------------------
524 * create_vcluster – spin up a vcluster in the target OSM cluster
525 * @params:
526 * tagName - The OSM test docker image tag to use
527 * kubeconfigPath - The path of the OSM kubernetes master configuration file
528 * vclusterKubeconfigOutPath - Output path for the vcluster kubeconfig
529 * vclusterName - Name of the vcluster
530 * vclusterNamespace - Namespace for the vcluster
531 * ------------------------------------------------------------------- */
532void create_vcluster(String dockerRegistryUrl, String tagName, String kubeconfigPath, String vclusterKubeconfigOutPath, String vclusterName, String vclusterNamespace) {
533 def dr = new DockerRunner(this)
534 def mounts = ["${kubeconfigPath}:/root/.kube/config"]
535 def envs = ["KUBECONFIG=/root/.kube/config"]
536 def image = "${dockerRegistryUrl}opensourcemano/tests:${tagName}"
537
538 // 1) create vcluster namespace
539 println("vcluster: ensuring namespace '${vclusterNamespace}' exists")
540 dr.run(
541 image: image,
542 entry: 'kubectl',
543 envVars: envs,
544 mounts: mounts,
545 cmd: "create namespace ${vclusterNamespace} || true"
546 )
547
548 // 2) create vcluster (no connect)
549 println("vcluster: creating '${vclusterName}' (no connect)")
550 dr.run(
551 image: image,
552 entry: 'vcluster',
553 envVars: envs,
554 mounts: mounts,
555 cmd: "create ${vclusterName} -n ${vclusterNamespace} --connect=false -f /etc/vcluster.yaml"
556 )
557
558 // 3) poll until Status is Running
559 int maxWaitMinutes = 10
560 long deadline = System.currentTimeMillis() + (maxWaitMinutes * 60 * 1000)
561 boolean running = false
562 String lastOut = ''
563
564 println("vcluster: waiting for '${vclusterName}' to reach status 'Running' (timeout: ${maxWaitMinutes} minutes)")
565 while (System.currentTimeMillis() < deadline) {
566 try {
567 lastOut = dr.run(
568 returnStdout: true,
569 image: image,
570 entry: '/bin/sh',
571 envVars: envs,
572 mounts: mounts,
caviedesjca614ea2026-01-23 11:19:40 +0100573 cmd: """-c 'vcluster list --output json | jq -r ".[] | select(.Name==\\"${vclusterName}\\") | .Status"'"""
mesaj8f49a952025-11-10 22:05:22 +0100574 ).trim()
575 } catch (Exception e) {
576 println("Polling command failed: ${e.message}. Will retry.")
577 lastOut = "Error: ${e.message}"
578 }
579
580 println("Polling for vcluster status. Current status: '${lastOut}'")
581 if (lastOut == 'Running') {
582 running = true
583 break
584 }
585 sleep 10
586 }
587
588 if (!running) {
589 println("vcluster status after timeout: ${lastOut}")
590 throw new Exception("vcluster '${vclusterName}' did not reach 'Running' state within ${maxWaitMinutes} minutes.")
591 }
592
593 // 4) get vcluster kubeconfig
594 String outPath = vclusterKubeconfigOutPath ?: "${WORKSPACE}/kubeconfig/vcluster_config"
595 // Ensure destination directory exists on the Jenkins agent before relying on shell redirection.
596 String outDir = outPath.contains('/') ? outPath.substring(0, outPath.lastIndexOf('/')) : '.'
597 sh "mkdir -p ${outDir}"
598 println("vcluster: exporting kubeconfig to '${outPath}'")
599 dr.run(
600 image: image,
601 entry: 'vcluster',
602 envVars: envs,
603 mounts: mounts,
604 cmd: "connect ${vclusterName} -n ${vclusterNamespace} --server ${vclusterName}.${vclusterNamespace}.svc.cluster.local:443 --print > ${outPath}"
605 )
606}
607
608void retryWithDocker(int maxAttempts, int delaySeconds, Closure action) {
609 int attempts = maxAttempts
610 while (attempts >= 0) {
611 try {
612 if (action()) return
613 } catch (Exception e) {
614 println("Attempt failed: ${e.message}")
615 }
616 println("Retrying... (${attempts} attempts left)")
617 sleep delaySeconds
618 attempts--
619 }
620 throw new Exception("Operation failed after ${maxAttempts} retries")
621}
622
623void register_etsi_vim_account(
624 String dockerRegistryUrl,
625 String tagName,
626 String osmHostname,
627 String envfile = null,
628 String portmappingfile = null,
629 String kubeconfig = null,
630 String clouds = null,
631 String prometheusconfigfile = null
632) {
633 String VIM_TARGET = "osm"
634 String VIM_MGMT_NET = "osm-ext"
635 String OS_PROJECT_NAME = "osm_jenkins"
636 String OS_AUTH_URL = "http://172.21.247.1:5000/v3"
garciadeblase9727ca2026-01-27 23:24:34 +0100637 String entrypointCmd = "osm"
mesaj8f49a952025-11-10 22:05:22 +0100638
639 def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
640 String environmentFile = envfile ?: "${tempdir}/env"
641 if (!envfile) {
642 sh(script: "touch ${environmentFile}")
643 }
644
645 retryWithDocker(3, 10) {
646 def dr = new DockerRunner(this)
647 try {
648 println("Attempting to register VIM account")
649 withCredentials([usernamePassword(credentialsId: 'openstack-jenkins-credentials',
650 passwordVariable: 'OS_PASSWORD', usernameVariable: 'OS_USERNAME')]) {
651 String entrypointArgs = """vim-create --name ${VIM_TARGET} --user ${OS_USERNAME} \
652 --password ${OS_PASSWORD} --tenant ${OS_PROJECT_NAME} \
653 --auth_url ${OS_AUTH_URL} --account_type openstack --description vim \
654 --prometheus_config_file /root/etsi-vim-prometheus.json \
655 --config '{management_network_name: ${VIM_MGMT_NET}, dataplane_physical_net: physnet2}' || true"""
656 dr.run(
657 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
658 entry: entrypointCmd,
659 envVars: ["OSM_HOSTNAME=${osmHostname}"],
660 envFile: environmentFile,
661 mounts: [
662 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
663 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
664 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
665 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
666 ].findAll { it != null },
667 cmd: entrypointArgs,
668 returnStdout: true
669 )
670 }
671
672 // Check if the VIM is ENABLED
673 int statusChecks = 5
674 while (statusChecks > 0) {
675 sleep 10
676 String vimList = dr.run(
677 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
678 entry: entrypointCmd,
679 envVars: ["OSM_HOSTNAME=${osmHostname}"],
680 envFile: environmentFile,
681 mounts: [
682 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
683 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
684 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
685 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
686 ].findAll { it != null },
687 cmd: "vim-list --long | grep ${VIM_TARGET}",
688 returnStdout: true
689 )
690 if (vimList.contains("ENABLED")) {
691 println("VIM successfully registered and is ENABLED.")
692 return true
693 }
694 statusChecks--
695 }
696 } catch (Exception e) {
697 println("VIM registration check failed: ${e.message}")
698 }
699
700 // If we get here, VIM is not enabled or creation failed. cleanup and retry.
701 println("VIM not enabled, deleting and retrying...")
702 dr.run(
703 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
704 entry: entrypointCmd,
705 envVars: ["OSM_HOSTNAME=${osmHostname}"],
706 envFile: environmentFile,
707 mounts: [
708 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
709 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
710 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
711 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
712 ].findAll { it != null },
713 cmd: "vim-delete --force ${VIM_TARGET}",
714 returnStdout: true
715 )
716 return false
717 }
718}
719
720void register_etsi_k8s_cluster(
721 String dockerRegistryUrl,
722 String tagName,
723 String osmHostname,
724 String envfile = null,
725 String portmappingfile = null,
726 String kubeconfig = null,
727 String clouds = null,
728 String prometheusconfigfile = null
729) {
730 String K8S_CLUSTER_TARGET = "osm"
731 String VIM_TARGET = "osm"
732 String VIM_MGMT_NET = "osm-ext"
733 String K8S_CREDENTIALS = "/root/.kube/config"
garciadeblase9727ca2026-01-27 23:24:34 +0100734 String entrypointCmd = "osm"
mesaj8f49a952025-11-10 22:05:22 +0100735
736 def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
737 String environmentFile = envfile ?: "${tempdir}/env"
738 if (!envfile) {
739 sh(script: "touch ${environmentFile}")
740 }
741
742 retryWithDocker(3, 10) {
743 def dr = new DockerRunner(this)
744 try {
745 println("Attempting to register K8s cluster")
746 dr.run(
747 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
748 entry: entrypointCmd,
749 envVars: ["OSM_HOSTNAME=${osmHostname}"],
750 envFile: environmentFile,
751 mounts: [
752 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
753 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
754 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
755 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
756 ].findAll { it != null },
757 cmd: """k8scluster-add ${K8S_CLUSTER_TARGET} --creds ${K8S_CREDENTIALS} --version \"v1\" \
758 --description \"Robot-cluster\" --skip-jujubundle --vim ${VIM_TARGET} \
759 --k8s-nets '{net1: ${VIM_MGMT_NET}}'""",
760 returnStdout: true
761 )
762
763 // Check if the K8s cluster is ENABLED
764 int statusChecks = 10
765 while (statusChecks > 0) {
766 sleep 10
767 String clusterList = dr.run(
768 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
769 entry: entrypointCmd,
770 envVars: ["OSM_HOSTNAME=${osmHostname}"],
771 envFile: environmentFile,
772 mounts: [
773 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
774 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
775 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
776 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
777 ].findAll { it != null },
778 cmd: "k8scluster-list | grep ${K8S_CLUSTER_TARGET}",
779 returnStdout: true
780 )
781 if (clusterList.contains("ENABLED")) {
782 println("K8s cluster successfully registered and is ENABLED.")
783 return true
784 }
785 statusChecks--
786 }
787 } catch (Exception e) {
788 println("K8s cluster registration check failed: ${e.message}")
789 }
790
791 // If we get here, cluster is not enabled or creation failed. cleanup and retry.
792 println("K8s cluster not enabled, deleting and retrying...")
793 dr.run(
794 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
795 entry: entrypointCmd,
796 envVars: ["OSM_HOSTNAME=${osmHostname}"],
797 envFile: environmentFile,
798 mounts: [
799 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
800 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
801 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
802 prometheusconfigfile ? "${prometheusconfigfile}:/root/etsi-vim-prometheus.json" : null
803 ].findAll { it != null },
804 cmd: "k8scluster-delete ${K8S_CLUSTER_TARGET}"
805 )
806 return false
807 }
808}
809
810void run_robot_systest(
811 String dockerRegistryUrl,
812 String tagName,
813 String testName,
814 String osmHostname,
815 String prometheusHostname,
816 Integer prometheusPort = null,
817 String ociRegistryUrl = null,
818 String envfile = null,
819 String portmappingfile = null,
820 String kubeconfig = null,
821 String clouds = null,
822 String hostfile = null,
823 String osmRSAfile = null,
824 String passThreshold = '0.0',
825 String unstableThreshold = '0.0',
826 Map extraEnvVars = null,
827 Map extraVolMounts = null
828) {
829 def tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
830 String environmentFile = envfile ?: "${tempdir}/env"
831 if (!envfile) {
832 sh(script: "touch ${environmentFile}")
833 }
834
835 def prometheusPortVar = prometheusPort != null ? "PROMETHEUS_PORT=${prometheusPort}" : null
836 def hostfilemount = hostfile ? "${hostfile}:/etc/hosts" : null
837
838 try {
839 withCredentials([usernamePassword(credentialsId: 'gitlab-oci-test',
840 passwordVariable: 'OCI_REGISTRY_PSW', usernameVariable: 'OCI_REGISTRY_USR')]) {
841 def baseEnvVars = [
842 "OSM_HOSTNAME=${osmHostname}",
843 "PROMETHEUS_HOSTNAME=${prometheusHostname}",
844 prometheusPortVar,
845 ociRegistryUrl ? "OCI_REGISTRY_URL=${ociRegistryUrl}" : null,
846 "OCI_REGISTRY_USER=${OCI_REGISTRY_USR}",
847 "OCI_REGISTRY_PASSWORD=${OCI_REGISTRY_PSW}"
848 ].findAll { it != null }
849
850 def baseMounts = [
851 clouds ? "${clouds}:/etc/openstack/clouds.yaml" : null,
852 osmRSAfile ? "${osmRSAfile}:/root/osm_id_rsa" : null,
853 kubeconfig ? "${kubeconfig}:/root/.kube/config" : null,
854 "${tempdir}:/robot-systest/reports",
855 portmappingfile ? "${portmappingfile}:/root/port-mapping.yaml" : null,
856 hostfilemount
857 ].findAll { it != null }
858
859 def extraEnvVarsList = extraEnvVars?.collect { key, value -> "${key}=${value}" } ?: []
860 def extraVolMountsList = extraVolMounts?.collect { hostPath, containerPath -> "${hostPath}:${containerPath}" } ?: []
861
862 def dr = new DockerRunner(this)
863 dr.run(
864 image: "${dockerRegistryUrl}opensourcemano/tests:${tagName}",
865 envVars: baseEnvVars + extraEnvVarsList,
866 envFile: "${environmentFile}",
867 mounts: baseMounts + extraVolMountsList,
868 cmd: "-t ${testName}"
869 )
870 }
871 } finally {
872 // Best-effort publish Robot results from tempdir into workspace
873 sh("cp ${tempdir}/*.xml . 2>/dev/null || true")
874 sh("cp ${tempdir}/*.html . 2>/dev/null || true")
875
876 def outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
877 sh("command -v tree >/dev/null 2>&1 && tree ${outputDirectory} || ls -la ${outputDirectory}")
878
879 try {
880 step([
881 $class: 'RobotPublisher',
882 outputPath: "${outputDirectory}",
883 outputFileName: '*.xml',
884 disableArchiveOutput: false,
885 reportFileName: 'report.html',
886 logFileName: 'log.html',
887 passThreshold: passThreshold,
888 unstableThreshold: unstableThreshold,
889 otherFiles: '*.png'
890 ])
891 } catch (Exception e) {
892 println("RobotPublisher failed: ${e.message}")
893 }
894 }
895}
896
897String get_value(String key, String output) {
898 for (String line : output.split('\n')) {
899 def data = line.split('\\|')
900 if (data.length > 1 && data[1].trim() == key) {
901 return data[2].trim()
902 }
903 }
904 return null
905}
906
907void dockerLoginInternalRegistry() {
908 withCredentials([usernamePassword(credentialsId: 'gitlab-registry',
909 passwordVariable: 'REGISTRY_PASSWORD', usernameVariable: 'REGISTRY_USERNAME')]) {
910 sh """
911 set -e
912 echo "${REGISTRY_PASSWORD}" | docker login ${INTERNAL_DOCKER_REGISTRY_HOST} -u "${REGISTRY_USERNAME}" --password-stdin
913 """
914 }
915}
916
917String withHiveEnv(String commandBody) {
918 """#!/bin/sh -e
919${HIVE_ENV_EXPORT}
920${commandBody.stripIndent()}
921"""
922}
923
924String runHiveCommand(String commandBody) {
925 sh(returnStdout: true, script: withHiveEnv(commandBody)).trim()
926}
927
928String waitForServerIp(String id) {
929 String addr = ''
930 timeout(time: 5, unit: 'MINUTES') {
931 waitUntil {
932 def showOutput = runHiveCommand("""
933 openstack server show ${id}
934 """)
935 def rawAddress = get_value('addresses', showOutput)
936 if (rawAddress) {
937 addr = rawAddress.split('=')[1]
938 return true
939 }
940 sleep 5
941 return false
942 }
943 }
944 return addr
945}
946
947// Collect logs from the remote VM and archive them in Jenkins
948void archiveLogs(Map remoteTarget) {
949 sshCommand remote: remoteTarget, command: '''mkdir -p logs/dags logs/vcluster logs/flux-system logs/events logs/system'''
950
951 // Collect Kubernetes events
952 sshCommand remote: remoteTarget, command: '''
953 echo "Extracting Kubernetes events"
954 kubectl get events --all-namespaces --sort-by='.lastTimestamp' -o wide > logs/events/k8s-events.log 2>&1 || true
955 kubectl get events -n osm --sort-by='.lastTimestamp' -o wide > logs/events/osm-events.log 2>&1 || true
956 kubectl get events -n vcluster --sort-by='.lastTimestamp' -o wide > logs/events/vcluster-events.log 2>&1 || true
957 kubectl get events -n flux-system --sort-by='.lastTimestamp' -o wide > logs/events/flux-system-events.log 2>&1 || true
958 '''
959
960 // Collect host logs and system info
961 sshCommand remote: remoteTarget, command: '''
962 echo "Collect system logs"
963 if command -v journalctl >/dev/null; then
964 journalctl > logs/system/system.log
965 fi
966
967 for entry in syslog messages; do
968 [ -e "/var/log/${entry}" ] && cp -f /var/log/${entry} logs/system/"${entry}.log"
969 done
970
971 echo "Collect active services"
972 case "$(cat /proc/1/comm)" in
973 systemd)
974 systemctl list-units > logs/system/services.txt 2>&1
975 ;;
976 *)
977 service --status-all >> logs/system/services.txt 2>&1
978 ;;
979 esac
980
981 top -b -n 1 > logs/system/top.txt 2>&1
982 ps fauxwww > logs/system/ps.txt 2>&1
983 '''
984
985 // Collect OSM namespace workloads
986 sshCommand remote: remoteTarget, command: '''
987 for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
988 echo "Extracting log for $deployment"
989 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log || true
990 done
991 '''
992 sshCommand remote: remoteTarget, command: '''
993 for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
994 echo "Extracting log for $statefulset"
995 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log || true
996 done
997 '''
998 sshCommand remote: remoteTarget, command: '''
999 schedulerPod="$(kubectl get pods -n osm | grep osm-scheduler| awk '{print $1; exit}')"; \
1000 echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
1001 kubectl -n osm cp ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler 2>&1 || true
1002 '''
1003
1004 // Collect vcluster and flux-system namespace logs
1005 sshCommand remote: remoteTarget, command: '''
1006 echo "Extracting logs from vcluster namespace"
1007 for pod in `kubectl get pods -n vcluster | grep -v NAME | awk '{print $1}'`; do
1008 echo "Extracting log for vcluster pod: $pod"
1009 kubectl logs -n vcluster $pod --timestamps=true --all-containers 2>&1 > logs/vcluster/$pod.log || true
1010 done
1011 '''
1012 sshCommand remote: remoteTarget, command: '''
1013 echo "Extracting logs from flux-system namespace"
1014 for pod in `kubectl get pods -n flux-system | grep -v NAME | awk '{print $1}'`; do
1015 echo "Extracting log for flux-system pod: $pod"
1016 kubectl logs -n flux-system $pod --timestamps=true --all-containers 2>&1 > logs/flux-system/$pod.log || true
1017 done
1018 '''
1019
1020 sh 'rm -rf logs'
1021 sshCommand remote: remoteTarget, command: '''ls -al logs logs/vcluster logs/events logs/flux-system logs/system'''
1022 sshGet remote: remoteTarget, from: 'logs', into: '.', override: true
1023 archiveArtifacts artifacts: 'logs/*.log, logs/dags/*.log, logs/vcluster/*.log, logs/events/*.log, logs/flux-system/*.log, logs/system/**'
1024}