1 /* Copyright ETSI Contributors and Others
5 * Licensed under the Apache License, Version 2.0 (the "License"); you may
6 * not use this file except in compliance with the License. You may obtain
7 * a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 * License for the specific language governing permissions and limitations
20 string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21 string(defaultValue: 'system', description: '', name: 'NODE'),
22 string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23 string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24 string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25 string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26 string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27 string(defaultValue: 'release', description: '', name: 'RELEASE'),
28 string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29 string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30 string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
31 string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
32 string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
33 string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
34 string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
35 string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
36 booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
37 booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
38 booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
39 booleanParam(defaultValue: true, description: '', name: 'DO_BUILD'),
40 booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
41 booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
42 booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
43 string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
44 booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
45 string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
46 string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47 string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
48 string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
49 string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
50 string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
51 string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
52 string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
57 ////////////////////////////////////////////////////////////////////////////////////////
59 ////////////////////////////////////////////////////////////////////////////////////////
60 void run_robot_systest(String tagName,
63 String prometheusHostname,
64 Integer prometheusPort=null,
66 String portmappingfile=null,
67 String kubeconfig=null,
70 String jujuPassword=null,
71 String osmRSAfile=null,
73 String unstable_th='0.0') {
74 tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
76 sh(script: "touch ${tempdir}/env")
77 envfile="${tempdir}/env"
79 PROMETHEUS_PORT_VAR = ""
80 if ( prometheusPort != null) {
81 PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
85 hostfilemount="-v "+hostfile+":/etc/hosts"
88 JUJU_PASSWORD_VAR = ""
89 if ( jujuPassword != null) {
90 JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
94 sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
96 sh "cp ${tempdir}/* ."
97 outputDirectory = sh(returnStdout: true, script: "pwd").trim()
98 println ("Present Directory is : ${outputDirectory}")
100 $class : 'RobotPublisher',
101 outputPath : "${outputDirectory}",
102 outputFileName : "*.xml",
103 disableArchiveOutput : false,
104 reportFileName : "report.html",
105 logFileName : "log.html",
106 passThreshold : pass_th,
107 unstableThreshold: unstable_th,
108 otherFiles : "*.png",
113 def archive_logs(remote) {
115 sshCommand remote: remote, command: '''mkdir -p logs'''
116 if (useCharmedInstaller) {
117 sshCommand remote: remote, command: '''
118 for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
119 logfile=`echo $container | cut -d- -f1`
120 echo "Extracting log for $logfile"
121 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
125 sshCommand remote: remote, command: '''
126 for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
127 echo "Extracting log for $deployment"
128 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
131 sshCommand remote: remote, command: '''
132 for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
133 echo "Extracting log for $statefulset"
134 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
140 sshCommand remote: remote, command: '''ls -al logs'''
141 sshGet remote: remote, from: 'logs', into: '.', override: true
143 archiveArtifacts artifacts: '*.log'
146 def get_value(key, output) {
147 for (String line : output.split( '\n' )) {
148 data = line.split( '\\|' )
149 if (data.length > 1) {
150 if ( data[1].trim() == key ) {
151 return data[2].trim()
157 ////////////////////////////////////////////////////////////////////////////////////////
159 ////////////////////////////////////////////////////////////////////////////////////////
160 node("${params.NODE}") {
162 INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
163 INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
164 SSH_KEY = '~/hive/cicd_rsa'
167 tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
173 ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
175 def upstream_main_job = params.UPSTREAM_SUFFIX
177 // upstream jobs always use merged artifacts
178 upstream_main_job += '-merge'
179 container_name_prefix = "osm-${tag_or_branch}"
180 container_name = "${container_name_prefix}"
182 keep_artifacts = false
183 if ( JOB_NAME.contains('merge') ) {
184 container_name += "-merge"
186 // On a merge job, we keep artifacts on smoke success
187 keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
189 container_name += "-${BUILD_NUMBER}"
192 http_server_name = null
194 useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
198 ///////////////////////////////////////////////////////////////////////////////////////
199 // Fetch stage 2 .deb artifacts
200 ///////////////////////////////////////////////////////////////////////////////////////
201 stage("Copy Artifacts") {
202 // cleanup any previous repo
207 RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim()
209 // check if an upstream artifact based on specific build number has been requested
210 // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
211 // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
212 def upstreamComponent=""
213 if ( params.UPSTREAM_JOB_NAME ) {
214 println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
215 lock('Artifactory') {
216 step ([$class: 'CopyArtifact',
217 projectName: "${params.UPSTREAM_JOB_NAME}",
218 selector: [$class: 'SpecificBuildSelector',
219 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
222 upstreamComponent = ci_helper.get_mdg_from_project(
223 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
224 def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
225 dir("$upstreamComponent") {
226 // the upstream job name contains suffix with the project. Need this stripped off
227 def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
228 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
231 "${project_without_branch} :: ${GERRIT_BRANCH}",
234 packageList.addAll(packages)
235 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
237 } // lock artifactory
241 def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
242 if (upstreamComponent.length()>0) {
243 println("Skipping upstream fetch of "+upstreamComponent)
244 list.remove(upstreamComponent)
246 for (buildStep in list) {
247 def component = buildStep
248 parallelSteps[component] = {
250 println("Fetching artifact for ${component}")
251 step ([$class: 'CopyArtifact',
252 projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
254 // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
255 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
258 "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
259 ci_helper.get_env_value('build.env','BUILD_NUMBER'))
260 packageList.addAll(packages)
261 println("Fetched ${component}: ${packages}")
266 lock('Artifactory') {
267 parallel parallelSteps
270 ///////////////////////////////////////////////////////////////////////////////////////
271 // Create Devops APT repository
272 ///////////////////////////////////////////////////////////////////////////////////////
274 for (component in [ "devops", "IM", "osmclient" ]) {
275 sh "ls -al ${component}/pool/"
276 sh "cp -r ${component}/pool/* pool/"
277 sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
278 sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
279 sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
280 sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
283 // create and sign the release file
284 sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
285 sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
287 // copy the public key into the release folder
288 // this pulls the key from the home dir of the current user (jenkins)
289 sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
290 sh "cp ~/${REPO_KEY_NAME} ."
293 // start an apache server to serve up the packages
294 http_server_name = "${container_name}-apache"
296 pwd = sh(returnStdout:true, script: 'pwd').trim()
297 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
298 repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
299 NODE_IP_ADDRESS=sh(returnStdout: true, script:
300 "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
303 // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
304 osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
305 devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
306 println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
307 sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
308 OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
309 // Convert URLs from stage 2 packages to arguments that can be passed to docker build
310 for (remotePath in packageList) {
311 packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
312 packageName=packageName.substring(0,packageName.indexOf('_'))
313 builtModules[packageName]=remotePath
317 ///////////////////////////////////////////////////////////////////////////////////////
318 // Build docker containers
319 ///////////////////////////////////////////////////////////////////////////////////////
323 if ( params.DO_BUILD ) {
324 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
325 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
326 sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
329 for (packageName in builtModules.keySet()) {
330 envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
331 moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
335 containerList = sh(returnStdout: true, script:
336 "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
337 containerList=Arrays.asList(containerList.split("\n"))
340 for (buildStep in containerList) {
341 def module = buildStep
342 def moduleName = buildStep.toLowerCase()
343 def moduleTag = container_name
344 parallelSteps[module] = {
346 sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
347 println("Tagging ${moduleName}:${moduleTag}")
348 sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
349 sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
353 parallel parallelSteps
356 } // if ( params.DO_BUILD )
358 if ( params.DO_INSTALL ) {
359 ///////////////////////////////////////////////////////////////////////////////////////
361 ///////////////////////////////////////////////////////////////////////////////////////
362 stage("Spawn Remote VM") {
363 println("Launching new VM")
364 output=sh(returnStdout: true, script: """#!/bin/sh -e
365 for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
366 openstack server create --flavor osm.sanity \
367 --image ${OPENSTACK_BASE_IMAGE} \
369 --property build_url="${BUILD_URL}" \
370 --nic net-id=osm-ext \
374 server_id = get_value('id', output)
376 if (server_id == null) {
377 println("VM launch output: ")
379 throw new Exception("VM Launch failed")
381 println("Target VM is ${server_id}, waiting for IP address to be assigned")
385 while (IP_ADDRESS == "") {
386 output=sh(returnStdout: true, script: """#!/bin/sh -e
387 for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
388 openstack server show ${server_id}
390 IP_ADDRESS = get_value('addresses', output)
392 IP_ADDRESS = IP_ADDRESS.split('=')[1]
393 println("Waiting for VM at ${IP_ADDRESS} to be reachable")
397 output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
398 println("output is [$output]")
399 alive = output.contains("succeeded")
401 println("VM is ready and accepting ssh connections")
402 } // stage("Spawn Remote VM")
404 ///////////////////////////////////////////////////////////////////////////////////////
406 ///////////////////////////////////////////////////////////////////////////////////////
413 if ( params.COMMIT_ID )
415 commit_id = "-b ${params.COMMIT_ID}"
418 if ( params.REPO_DISTRO )
420 repo_distro = "-r ${params.REPO_DISTRO}"
423 if ( params.REPO_KEY_NAME )
425 repo_key_name = "-k ${params.REPO_KEY_NAME}"
428 if ( params.RELEASE )
430 release = "-R ${params.RELEASE}"
433 if ( params.REPOSITORY_BASE )
435 repo_base_url = "-u ${params.REPOSITORY_BASE}"
439 repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
442 remote.name = container_name
443 remote.host = IP_ADDRESS
444 remote.user = 'ubuntu'
445 remote.identityFile = SSH_KEY
446 remote.allowAnyHosts = true
447 remote.logLevel = 'INFO'
450 // Force time sync to avoid clock drift and invalid certificates
451 sshCommand remote: remote, command: """
453 sudo apt install -y chrony
454 sudo service chrony stop
456 sudo service chrony start
459 sshCommand remote: remote, command: """
460 wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh
461 chmod +x ./install_osm.sh
462 sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
465 if ( useCharmedInstaller ) {
466 // Use local proxy for docker hub
467 sshCommand remote: remote, command: '''
468 sudo snap install microk8s --classic --channel=1.19/stable
469 sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
470 sudo systemctl restart snap.microk8s.daemon-containerd.service
471 sudo snap alias microk8s.kubectl kubectl
474 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
475 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
476 sshCommand remote: remote, command: """
477 ./install_osm.sh -y \
480 ${release} -r unstable \
482 --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
483 --tag ${container_name}
486 prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
488 osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
490 // Run -k8s installer here specifying internal docker registry and docker proxy
491 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
492 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
493 sshCommand remote: remote, command: """
494 ./install_osm.sh -y \
497 ${release} -r unstable \
498 -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
499 -p ${INTERNAL_DOCKER_PROXY} \
500 -t ${container_name} \
504 prometheusHostname = IP_ADDRESS
505 prometheusPort = 9091
506 osmHostname = IP_ADDRESS
508 } // stage("Install")
509 ///////////////////////////////////////////////////////////////////////////////////////
510 // Health check of installed OSM in remote vm
511 ///////////////////////////////////////////////////////////////////////////////////////
512 stage("OSM Health") {
514 sshCommand remote: remote, command: """
515 /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
517 } // stage("OSM Health")
518 } // if ( params.DO_INSTALL )
521 ///////////////////////////////////////////////////////////////////////////////////////
522 // Execute Robot tests
523 ///////////////////////////////////////////////////////////////////////////////////////
524 stage_archive = false
525 if ( params.DO_ROBOT ) {
527 stage("System Integration Test") {
528 if ( useCharmedInstaller ) {
529 tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
530 sh(script: "touch ${tempdir}/hosts")
531 hostfile="${tempdir}/hosts"
532 sh """cat << EOF > ${hostfile}
534 ${remote.host} prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
540 jujuPassword=sshCommand remote: remote, command: """
541 echo `juju gui 2>&1 | grep password | cut -d: -f2`
546 params.ROBOT_TAG_NAME,
551 params.ROBOT_PORT_MAPPING_VIM,
557 params.ROBOT_PASS_THRESHOLD,
558 params.ROBOT_UNSTABLE_THRESHOLD
560 } // stage("System Integration Test")
562 stage("Archive Container Logs") {
563 // Archive logs to containers_logs.txt
565 if ( ! currentBuild.result.equals('FAILURE') ) {
566 stage_archive = keep_artifacts
568 println ("Systest test failed, throwing error")
569 error = new Exception("Systest test failed")
570 currentBuild.result = 'FAILURE'
575 } // if ( params.DO_ROBOT )
577 if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
579 sh "echo ${container_name} > build_version.txt"
580 archiveArtifacts artifacts: "build_version.txt", fingerprint: true
582 // Archive the tested repo
583 dir("${RELEASE_DIR}") {
584 ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
586 if ( params.DO_DOCKERPUSH ) {
587 stage("Publish to Dockerhub") {
589 for (buildStep in containerList) {
590 def module = buildStep
591 def moduleName = buildStep.toLowerCase()
592 def dockerTag = params.DOCKER_TAG
593 def moduleTag = container_name
595 parallelSteps[module] = {
597 sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}"
598 sh "docker push opensourcemano/${moduleName}:${dockerTag}"
602 parallel parallelSteps
605 stage("Snap promotion") {
606 def snaps = ["osmclient"]
607 sh "snapcraft login --with ~/.snapcraft/config"
608 for (snap in snaps) {
610 if (BRANCH_NAME.startsWith("v")) {
611 channel=BRANCH_NAME.substring(1)+"/"
612 } else if (BRANCH_NAME!="master") {
613 channel+="/"+BRANCH_NAME.replaceAll('/','-')
615 track=channel+"edge\\*"
616 edge_rev=sh(returnStdout: true,
617 script: "snapcraft revisions $snap | " +
618 "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
619 print "edge rev is $edge_rev"
620 track=channel+"beta\\*"
621 beta_rev=sh(returnStdout: true,
622 script: "snapcraft revisions $snap | " +
623 "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
624 print "beta rev is $beta_rev"
626 if ( edge_rev != beta_rev ) {
627 print "Promoting $edge_rev to beta in place of $beta_rev"
628 beta_track=channel+"beta"
629 sh "snapcraft release $snap $edge_rev $beta_track"
632 } // stage("Snap promotion")
633 } // if ( params.DO_DOCKERPUSH )
634 } // stage("Archive")
635 } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
638 if ( params.DO_INSTALL && server_id != null) {
640 if (error && params.SAVE_CONTAINER_ON_FAIL ) {
643 if (!error && params.SAVE_CONTAINER_ON_PASS ) {
648 if (server_id != null) {
649 println("Deleting VM: $server_id")
651 for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
652 openstack server delete ${server_id}
655 println("Saved VM $server_id in ETSI VIM")
659 if ( http_server_name != null ) {
660 sh "docker stop ${http_server_name} || true"
661 sh "docker rm ${http_server_name} || true"
664 if ( devopstempdir != null ) {
665 sh "rm -rf ${devopstempdir}"