Fixup path
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
index 18cc6b3..6368381 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2017 Sandvine
+/* Copyright ETSI Contributors and Others
  *
  * All Rights Reserved.
  *
@@ -32,7 +32,7 @@ properties([
         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
-        booleanParam(defaultValue: true, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
+        booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
         booleanParam(defaultValue: true, description: '', name: 'DO_STAGE_4'),
@@ -51,26 +51,10 @@ properties([
     ])
 ])
 
-def uninstall_osm(stackName) {
-    sh """
-         export OSM_USE_LOCAL_DEVOPS=true
-         export PATH=$PATH:/snap/bin
-         installers/full_install_osm.sh -y -c swarm -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall
-       """
-}
-
-def run_systest(stackName,tagName,testName,envfile=null) {
-    tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
-    if ( !envfile )
-    {
-        sh(script: "touch ${tempdir}/env")
-        envfile="${tempdir}/env"
-    }
-    sh "docker run --network net${stackName} --env-file ${envfile} -v ${tempdir}:/usr/share/osm-devops/systest/reports opensourcemano/osmclient:${tagName} make -C /usr/share/osm-devops/systest ${testName}"
-    sh "cp ${tempdir}/* ."
-    junit  '*.xml'
-}
 
+////////////////////////////////////////////////////////////////////////////////////////
+// Helper Functions
+////////////////////////////////////////////////////////////////////////////////////////
 def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus_port=null,envfile=null,kubeconfig=null,clouds=null,hostfile=null,jujuPassword=null) {
     tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
     if ( !envfile )
@@ -120,20 +104,20 @@ def archive_logs(remote) {
             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
                 logfile=`echo $container | cut -d- -f1`
                 echo "Extracting log for $logfile"
-                kubectl logs -n osm $container 2>&1 > logs/$logfile.log
+                kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
             done
         '''
     } else {
         sshCommand remote: remote, command: '''
             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
                 echo "Extracting log for $deployment"
-                kubectl -n osm logs deployments/$deployment --all-containers 2>&1 > logs/$deployment.log
+                kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
             done
         '''
         sshCommand remote: remote, command: '''
             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
                 echo "Extracting log for $statefulset"
-                kubectl -n osm logs statefulsets/$statefulset --all-containers 2>&1 > logs/$statefulset.log
+                kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
             done
         '''
     }
@@ -156,6 +140,9 @@ def get_value(key, output) {
     }
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+// Main Script
+////////////////////////////////////////////////////////////////////////////////////////
 node("${params.NODE}") {
 
     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
@@ -187,130 +174,178 @@ node("${params.NODE}") {
     }
     container_name += "-${BUILD_NUMBER}"
 
-    // Copy the artifacts from the upstream jobs
-    stage("Copy Artifacts") {
-        // cleanup any previous repo
-        sh 'rm -rf repo'
-        dir("repo") {
-            // grab all stable upstream builds based on the
-
-            dir("${RELEASE}") {
-                def list = ["RO", "openvim", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "LW-UI", "NG-UI", "PLA", "tests"]
-                for (component in list) {
-                    step ([$class: 'CopyArtifact',
-                           projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
-
-                    // grab the build name/number
-                    build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
-
-                    // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
-                    ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", build_num)
-
-                    // cleanup any prevously defined dists
-                    sh "rm -rf dists"
-                }
-
-                // check if an upstream artifact based on specific build number has been requested
-                // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
-                // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
-                if ( params.UPSTREAM_JOB_NAME ) {
-                    step ([$class: 'CopyArtifact',
-                           projectName: "${params.UPSTREAM_JOB_NAME}",
-                           selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
-                          ])
-
-                    build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
-                    component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
-
-                    // the upstream job name contains suffix with the project. Need this stripped off
-                    def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
-
-                    // Remove the previous artifact for this component. Use the new upstream artifact
-                    sh "rm -rf pool/${component}"
-
-                    ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${project_without_branch} :: ${GERRIT_BRANCH}", build_num)
+    server_id = null
+    http_server_name = null
+    devopstempdir = null
+    useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
 
-                    sh "rm -rf dists"
-                }
+    try {
+        builtModules = [:]
+///////////////////////////////////////////////////////////////////////////////////////
+// Fetch stage 2 .deb artifacts
+///////////////////////////////////////////////////////////////////////////////////////
+        stage("Copy Artifacts") {
+            // cleanup any previous repo
+            sh 'rm -rf repo'
+            dir("repo") {
+                packageList = []
+                dir("${RELEASE}") {
+                    RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
+
+                    // check if an upstream artifact based on specific build number has been requested
+                    // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
+                    // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
+                    def upstreamComponent=""
+                    if ( params.UPSTREAM_JOB_NAME ) {
+                        println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
+
+                        step ([$class: 'CopyArtifact',
+                               projectName: "${params.UPSTREAM_JOB_NAME}",
+                               selector: [$class: 'SpecificBuildSelector',
+                               buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
+                              ])
+
+                        upstreamComponent = ci_helper.get_mdg_from_project(
+                            ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
+                        def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
+                        dir("$upstreamComponent") {
+                            // the upstream job name contains suffix with the project. Need this stripped off
+                            def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
+                            def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+                                upstreamComponent,
+                                GERRIT_BRANCH,
+                                "${project_without_branch} :: ${GERRIT_BRANCH}",
+                                buildNumber)
+
+                            packageList.addAll(packages)
+                            println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
+                        }
+                    }
 
-                // sign all the components
-                for (component in list) {
-                    sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
+                    parallelSteps = [:]
+                    def list = ["RO", "openvim", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "LW-UI", "NG-UI", "PLA", "tests"]
+                    if (upstreamComponent.length()>0) {
+                        println("Skipping upstream fetch of "+upstreamComponent)
+                        list.remove(upstreamComponent)
+                    }
+                    for (buildStep in list) {
+                        def component = buildStep
+                        parallelSteps[component] = {
+                            dir("$component") {
+                                println("Fetching artifact for ${component}")
+                                step ([$class: 'CopyArtifact',
+                                       projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
+
+                                // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
+                                def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+                                    component,
+                                    GERRIT_BRANCH,
+                                    "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
+                                    ci_helper.get_env_value('build.env','BUILD_NUMBER'))
+                                packageList.addAll(packages)
+                                println("Fetched ${component}: ${packages}")
+                                sh "rm -rf dists"
+                            }
+                        }
+                    }
+                    parallel parallelSteps
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Create Devops APT repository
+///////////////////////////////////////////////////////////////////////////////////////
+                    sh "mv devops/pool/ pool"
+                    sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/devops/*"
+                    sh "mkdir -p dists/${params.REPO_DISTRO}/devops/binary-amd64/"
+                    sh "apt-ftparchive packages pool/devops > dists/${params.REPO_DISTRO}/devops/binary-amd64/Packages"
+                    sh "gzip -9fk dists/${params.REPO_DISTRO}/devops/binary-amd64/Packages"
+
+                    // create and sign the release file
+                    sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
+                    sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
+
+                    // copy the public key into the release folder
+                    // this pulls the key from the home dir of the current user (jenkins)
+                    sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
+                    sh "cp ~/${REPO_KEY_NAME} ."
                 }
 
-                // now create the distro
-                for (component in list) {
-                    sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
-                    sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
-                    sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
-                }
+                // start an apache server to serve up the packages
+                http_server_name = "${container_name}-apache"
 
-                // create and sign the release file
-                sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
-                sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
-
-                // copy the public key into the release folder
-                // this pulls the key from the home dir of the current user (jenkins)
-                sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
-                sh "cp ~/${REPO_KEY_NAME} ."
-
-                // merge the change logs
-                sh """
-                   rm -f changelog/changelog-osm.html
-                   [ ! -d changelog ] || for mdgchange in \$(ls changelog); do cat changelog/\$mdgchange >> changelog/changelog-osm.html; done
-                   """
-                RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
+                pwd = sh(returnStdout:true,  script: 'pwd').trim()
+                repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
+                repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
+                NODE_IP_ADDRESS=sh(returnStdout: true, script:
+                    "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
             }
-            // start an apache server to serve up the images
-            http_server_name = "${container_name}-apache"
-
-            pwd = sh(returnStdout:true,  script: 'pwd').trim()
-            repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
-            repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
-            NODE_IP_ADDRESS=sh(returnStdout: true, script:
-                "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
-        }
-
-        // now pull the devops package and install in temporary location
-        tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
-        osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim()
-        sh "dpkg -x ${osm_devops_dpkg} ${tempdir}"
-        OSM_DEVOPS="${tempdir}/usr/share/osm-devops"
-        println("Repo base URL=${repo_base_url}")
-    }
 
-    dir(OSM_DEVOPS) {
-        def remote = [:]
-        error = null
-
-        if ( params.DO_BUILD ) {
-            stage("Build") {
-                sh "make -C docker clean"
-                sh "make -C docker -j `nproc` Q= CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}"
+            // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
+            osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
+            devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+            println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
+            sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
+            OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
+            // Convert URLs from stage 2 packages to arguments that can be passed to docker build
+            for (remotePath in packageList) {
+                packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
+                packageName=packageName.substring(0,packageName.indexOf('_'))
+                builtModules[packageName]=remotePath
             }
+        }
 
-            stage("Push to internal registry") {
+///////////////////////////////////////////////////////////////////////////////////////
+// Build docker containers
+///////////////////////////////////////////////////////////////////////////////////////
+        dir(OSM_DEVOPS) {
+            def remote = [:]
+            error = null
+            if ( params.DO_BUILD ) {
                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
                 }
-                sh "make -C docker push INPUT_TAG=${container_name} TAG=${container_name} DOCKER_REGISTRY=${INTERNAL_DOCKER_REGISTRY}"
-            }
-
-        }
-
-        try {
-            useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
+                moduleBuildArgs = ""
+                for (packageName in builtModules.keySet()) {
+                    envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
+                    moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
+                }
+                dir ("docker") {
+                    stage("Build") {
+                        containerList = sh(returnStdout: true, script:
+                            "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
+                        containerList=Arrays.asList(containerList.split("\n"))
+                        print(containerList)
+                        parallelSteps = [:]
+                        for (buildStep in containerList) {
+                            def module = buildStep
+                            def moduleName = buildStep.toLowerCase()
+                            def moduleTag = container_name
+                            parallelSteps[module] = {
+                                dir("$module") {
+                                    sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
+                                    println("Tagging ${moduleName}:${moduleTag}")
+                                    sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
+                                    sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
+                                }
+                            }
+                        }
+                        parallel parallelSteps
+                    }
+                }
+            } // if ( params.DO_BUILD )
 
             if ( params.DO_INSTALL ) {
-
+///////////////////////////////////////////////////////////////////////////////////////
+// Launch VM
+///////////////////////////////////////////////////////////////////////////////////////
                 stage("Spawn Remote VM") {
                     println("Launching new VM")
                     output=sh(returnStdout: true, script: """#!/bin/sh -e
                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
-                        openstack server create --flavor m1.xlarge \
+                        openstack server create --flavor osm.sanity \
                                                 --image ubuntu18.04 \
                                                 --key-name CICD \
+                                                --property build_url="${BUILD_URL}" \
                                                 --nic net-id=osm-ext \
                                                 ${container_name}
                     """).trim()
@@ -343,8 +378,11 @@ node("${params.NODE}") {
                         alive = output.contains("succeeded")
                     }
                     println("VM is ready and accepting ssh connections")
-                }
+                } // stage("Spawn Remote VM")
 
+///////////////////////////////////////////////////////////////////////////////////////
+// Installation
+///////////////////////////////////////////////////////////////////////////////////////
                 stage("Install") {
                     commit_id = ''
                     repo_distro = ''
@@ -395,7 +433,6 @@ node("${params.NODE}") {
                     """
 
                     if ( useCharmedInstaller ) {
-
                         // Use local proxy for docker hub
                         sshCommand remote: remote, command: '''
                             sudo snap install microk8s --classic --channel=1.19/stable
@@ -437,26 +474,26 @@ node("${params.NODE}") {
                         prometheusPort = 9091
                         osmHostname = IP_ADDRESS
                     }
-                }
-            }
+                } // stage("Install")
+            } // if ( params.DO_INSTALL )
 
-            stage_archive = false
+///////////////////////////////////////////////////////////////////////////////////////
+// Health check of installed OSM in remote vm
+///////////////////////////////////////////////////////////////////////////////////////
             if ( params.DO_SMOKE ) {
                 stage("OSM Health") {
-                    if ( useCharmedInstaller ) {
-                        stackName = "osm"
-                    } else {
-                        stackName = container_name
-                    }
+                    stackName = "osm"
                     sshCommand remote: remote, command: """
                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
                     """
                 }
             }
 
+///////////////////////////////////////////////////////////////////////////////////////
+// Execute Robot tests
+///////////////////////////////////////////////////////////////////////////////////////
+            stage_archive = false
             if ( params.DO_STAGE_4 ) {
-                // override stage_archive to only archive on stable
-                stage_archive = false
                 try {
                     stage("System Integration Test") {
                         if ( params.DO_ROBOT ) {
@@ -488,9 +525,9 @@ EOF"""
                                 hostfile,
                                 jujuPassword)
                         }
-                    }
+                    } // stage("System Integration Test")
                 } finally {
-                    stage("Archive Contailer Logs") {
+                    stage("Archive Container Logs") {
                         // Archive logs to containers_logs.txt
                         archive_logs(remote)
                         if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) {
@@ -503,9 +540,8 @@ EOF"""
                         }
                     }
                 }
-            }
+            } // if ( params.DO_STAGE_4 )
 
-            // override to save the artifacts
             if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
                 stage("Archive") {
                     sh "echo ${container_name} > build_version.txt"
@@ -516,14 +552,27 @@ EOF"""
                         ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
                     }
                     if ( params.DO_DOCKERPUSH ) {
-                        stage("Docker Push") {
-                            sh "make -C docker push INPUT_TAG=${container_name} TAG=${params.DOCKER_TAG}"
+                        stage("Publish to Dockerhub") {
+                            parallelSteps = [:]
+                            for (buildStep in containerList) {
+                                def module = buildStep
+                                def moduleName = buildStep.toLowerCase()
+                                def moduleTag = params.DOCKER_TAG
+                                parallelSteps[module] = {
+                                    dir("$module") {
+                                        sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${moduleTag}"
+                                        sh "docker push opensourcemano/${moduleName}:${moduleTag}"
+                                    }
+                                }
+                            }
+                            parallel parallelSteps
                         }
 
                         stage("Snap promotion") {
                             def snaps = ["osmclient"]
+                            sh "snapcraft login --with ~/.snapcraft/config"
                             for (snap in snaps) {
-                                channel=""
+                                channel="latest/"
                                 if (BRANCH_NAME.startsWith("v")) {
                                     channel=BRANCH_NAME.substring(1)+"/"
                                 } else if (BRANCH_NAME!="master") {
@@ -531,63 +580,55 @@ EOF"""
                                 }
                                 track=channel+"edge\\*"
                                 edge_rev=sh(returnStdout: true,
-                                    script: "sudo docker run -v ~/.snapcraft:/snapcraft -v ${WORKSPACE}:/build " +
-                                    "-w /build snapcore/snapcraft:stable /bin/bash -c " +
-                                    "\"snapcraft login --with /snapcraft/config &>/dev/null && " +
-                                    "snapcraft revisions $snap\" | " +
-                                    "grep \" $track\" | tail -1 | awk '{print \$1}'").trim()
+                                    script: "snapcraft revisions $snap | " +
+                                    "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+                                print "edge rev is $edge_rev"
                                 track=channel+"beta\\*"
                                 beta_rev=sh(returnStdout: true,
-                                    script: "sudo docker run -v ~/.snapcraft:/snapcraft -v ${WORKSPACE}:/build " +
-                                    "-w /build snapcore/snapcraft:stable /bin/bash -c " +
-                                    "\"snapcraft login --with /snapcraft/config &>/dev/null && " +
-                                    "snapcraft revisions $snap\" | " +
-                                    "grep \" $track\" | tail -1 | awk '{print \$1}'").trim()
+                                    script: "snapcraft revisions $snap | " +
+                                    "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+                                print "beta rev is $beta_rev"
 
                                 if ( edge_rev != beta_rev ) {
                                     print "Promoting $edge_rev to beta in place of $beta_rev"
                                     beta_track=channel+"beta"
-                                    sh("sudo docker run -v ~/.snapcraft:/snapcraft -v ${WORKSPACE}:/build " +
-                                        "-w /build snapcore/snapcraft:stable /bin/bash -c " +
-                                        "\"snapcraft login --with /snapcraft/config &>/dev/null && " +
-                                        "snapcraft release $snap $edge_rev $beta_track\"")
+                                    sh "snapcraft release $snap $edge_rev $beta_track"
                                 }
                             }
-                        }
-                    }
-                }
+                        } // stage("Snap promotion")
+                    } // if ( params.DO_DOCKERPUSH )
+                } // stage("Archive")
+            } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
+        } // dir(OSM_DEVOPS)
+    } finally {
+        if ( params.DO_INSTALL && server_id != null) {
+            delete_vm = true
+            if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+                delete_vm = false
+            }
+            if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+                delete_vm = false
             }
-        }
-        catch(Exception ex) {
-            error = ex
-            currentBuild.result = 'FAILURE'
-            println("Caught error: "+ex)
-        }
-        finally {
-            println("Entered finally block")
-            if ( params.DO_INSTALL && server_id != null) {
-                delete_vm = true
-                if (error && params.SAVE_CONTAINER_ON_FAIL ) {
-                    delete_vm = false
-                }
-                if (!error && params.SAVE_CONTAINER_ON_PASS ) {
-                    delete_vm = false
-                }
 
-                if ( delete_vm ) {
-                    if (server_id != null) {
-                        println("Deleting VM: $server_id")
-                        sh """#!/bin/sh -e
-                            for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
-                            openstack server delete ${server_id}
-                        """
-                    } else {
-                        println("Saved VM $server_id in ETSI VIM")
-                    }
+            if ( delete_vm ) {
+                if (server_id != null) {
+                    println("Deleting VM: $server_id")
+                    sh """#!/bin/sh -e
+                        for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+                        openstack server delete ${server_id}
+                    """
+                } else {
+                    println("Saved VM $server_id in ETSI VIM")
                 }
             }
+        }
+        if ( http_server_name != null ) {
             sh "docker stop ${http_server_name} || true"
             sh "docker rm ${http_server_name} || true"
         }
+
+        if ( devopstempdir != null ) {
+            sh "rm -rf ${devopstempdir}"
+        }
     }
 }