Bug 1561 correction
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
index 0dfea86..e8e6d25 100644 (file)
@@ -1,7 +1,7 @@
-/* Copyright 2017 Sandvine
+/* Copyright ETSI Contributors and Others
  *
  * All Rights Reserved.
- * 
+ *
  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
  *   not use this file except in compliance with the License. You may obtain
  *   a copy of the License at
  *   License for the specific language governing permissions and limitations
  *   under the License.
  */
-/* Change log:
- * 1. Bug 745 : Jayant Madavi, Mrityunjay Yadav : JM00553988@techmahindra.com : 23-july-2019 : Improvement to the code, typically we have 2 *    or more branches whose build gets triggered, ex master & release branch, the previous code was removing any/all docker. 
- *       Now removing previous docker of the same branch, so that the other branch failed docker should not be removed. It also 
- *    acts as clean-up for previous docker remove failure.
- * 2. Feature 7829 : Mrityunjay Yadav, Jayant Madavi: MY00514913@techmahindra.com : 19-Aug-2019 : Added a parameters & function to invoke Robot test.
- */
 
 properties([
     parameters([
@@ -34,338 +27,661 @@ properties([
         string(defaultValue: 'release', description: '', name: 'RELEASE'),
         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
-        string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
-        string(defaultValue: 'releaseseven-daily', description: '', name: 'DOCKER_TAG'),
-        booleanParam(defaultValue: true, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
+        string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
+        string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
+        booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
-        booleanParam(defaultValue: true, description: '', name: 'DO_STAGE_4'),
         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
-        booleanParam(defaultValue: true, description: '', name: 'DO_SMOKE'),
         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
-        booleanParam(defaultValue: false, description: '', name: 'DO_ROBOT'),
-        string(defaultValue: 'sanity', description: 'smoke/vim/sanity/comprehensive are the options', name: 'TEST_NAME'),
+        booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
+        string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
+               name: 'ROBOT_TAG_NAME'),
         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
+        string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
+               description: 'Port mapping file for SDN assist in ETSI VIM',
+               name: 'ROBOT_PORT_MAPPING_VIM'),
+        string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
+        string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
+        string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
+        string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
+               name: 'ROBOT_PASS_THRESHOLD'),
+        string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
+               '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
     ])
 ])
 
-def uninstall_osm(stackName) {
-    sh """
-         export OSM_USE_LOCAL_DEVOPS=true
-         export PATH=$PATH:/snap/bin
-         installers/full_install_osm.sh -y -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall
-       """
+////////////////////////////////////////////////////////////////////////////////////////
+// Helper Functions
+////////////////////////////////////////////////////////////////////////////////////////
+void run_robot_systest(String tagName,
+                       String testName,
+                       String osmHostname,
+                       String prometheusHostname,
+                       Integer prometheusPort=null,
+                       String envfile=null,
+                       String portmappingfile=null,
+                       String kubeconfig=null,
+                       String clouds=null,
+                       String hostfile=null,
+                       String jujuPassword=null,
+                       String osmRSAfile=null,
+                       String passThreshold='0.0',
+                       String unstableThreshold='0.0') {
+    tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+    String environmentFile = ''
+    if (envfile) {
+        environmentFile = envfile
+    } else {
+        sh(script: "touch ${tempdir}/env")
+        environmentFile = "${tempdir}/env"
+    }
+    PROMETHEUS_PORT_VAR = ''
+    if (prometheusPort != null) {
+        PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
+    }
+    hostfilemount = ''
+    if (hostfile) {
+        hostfilemount = "-v ${hostfile}:/etc/hosts"
+    }
+
+    JUJU_PASSWORD_VAR = ''
+    if (jujuPassword != null) {
+        JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
+    }
+
+    try {
+        sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
+           ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
+           -v ${clouds}:/etc/openstack/clouds.yaml \
+           -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
+           -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
+           -c -t ${testName}""")
+    } finally {
+        sh("cp ${tempdir}/* .")
+        outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
+        println("Present Directory is : ${outputDirectory}")
+        step([
+            $class : 'RobotPublisher',
+            outputPath : "${outputDirectory}",
+            outputFileName : '*.xml',
+            disableArchiveOutput : false,
+            reportFileName : 'report.html',
+            logFileName : 'log.html',
+            passThreshold : passThreshold,
+            unstableThreshold: unstableThreshold,
+            otherFiles : '*.png',
+        ])
+    }
 }
 
-def run_systest(stackName,tagName,testName,envfile=null) {
-    tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
-    if ( !envfile )
-    {
-        sh(script: "touch ${tempdir}/env")
-        envfile="${tempdir}/env"
+void archive_logs(Map remote) {
+
+    sshCommand remote: remote, command: '''mkdir -p logs'''
+    if (useCharmedInstaller) {
+        sshCommand remote: remote, command: '''
+            for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+                logfile=`echo $container | cut -d- -f1`
+                echo "Extracting log for $logfile"
+                kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
+            done
+        '''
+    } else {
+        sshCommand remote: remote, command: '''
+            for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+                echo "Extracting log for $deployment"
+                kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
+                > logs/$deployment.log
+            done
+        '''
+        sshCommand remote: remote, command: '''
+            for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+                echo "Extracting log for $statefulset"
+                kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
+                > logs/$statefulset.log
+            done
+        '''
     }
-    sh "docker run --network net${stackName} --env-file ${envfile} -v ${tempdir}:/usr/share/osm-devops/systest/reports opensourcemano/osmclient:${tagName} make -C /usr/share/osm-devops/systest ${testName}"
-    sh "cp ${tempdir}/* ."
-    junit  '*.xml'
+
+    sh 'rm -rf logs'
+    sshCommand remote: remote, command: '''ls -al logs'''
+    sshGet remote: remote, from: 'logs', into: '.', override: true
+    sh 'cp logs/* .'
+    archiveArtifacts artifacts: '*.log'
 }
 
-def run_robot_systest(stackName,tagName,testName,envfile=null) {
-    tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
-    if ( !envfile )
-    {
-        sh(script: "touch ${tempdir}/env")
-        envfile="${tempdir}/env"
+String get_value(String key, String output) {
+    for (String line : output.split( '\n' )) {
+        data = line.split( '\\|' )
+        if (data.length > 1) {
+            if ( data[1].trim() == key ) {
+                return data[2].trim()
+            }
+        }
     }
-    sh "docker run --network net${stackName} --env-file ${envfile} -v ${tempdir}:/usr/share/osm-devops/robot-systest/reports opensourcemano/osmclient:${tagName} bash -C /usr/share/osm-devops/robot-systest/run_test.sh --do_install -t ${testName}"
-    sh "cp ${tempdir}/* ."
-    outputDirectory = sh(returnStdout: true, script: "pwd").trim()
-    println ("Present Directory is : ${outputDirectory}")
-    step([
-        $class : 'RobotPublisher',
-        outputPath : "${outputDirectory}",
-        outputFileName : "*.xml",
-        disableArchiveOutput : false,
-        reportFileName : "report.html",
-        logFileName : "log.html",
-        passThreshold : 0,
-        unstableThreshold: 0,
-        otherFiles : "*.png",
-    ])
 }
 
+////////////////////////////////////////////////////////////////////////////////////////
+// Main Script
+////////////////////////////////////////////////////////////////////////////////////////
 node("${params.NODE}") {
 
+    INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
+    INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
+    APT_PROXY = 'http://172.21.1.1:3142'
+    SSH_KEY = '~/hive/cicd_rsa'
     sh 'env'
 
-    tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
+    tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
 
-    stage("Checkout") {
+    stage('Checkout') {
         checkout scm
     }
 
-    ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
+    ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
 
-    def upstream_main_job = params.UPSTREAM_SUFFIX
+    def upstreamMainJob = params.UPSTREAM_SUFFIX
 
     // upstream jobs always use merged artifacts
-    upstream_main_job += '-merge'
-    container_name_prefix = "osm-${tag_or_branch}"
-    container_name = "${container_name_prefix}"
+    upstreamMainJob += '-merge'
+    containerNamePrefix = "osm-${tag_or_branch}"
+    containerName = "${containerNamePrefix}"
 
     keep_artifacts = false
     if ( JOB_NAME.contains('merge') ) {
-        container_name += "-merge"
+        containerName += '-merge'
 
         // On a merge job, we keep artifacts on smoke success
         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
     }
-    container_name += "-${BUILD_NUMBER}"
-
-    // Copy the artifacts from the upstream jobs
-    stage("Copy Artifacts") {
-        // cleanup any previous repo
-        sh 'rm -rf repo'
-        dir("repo") {
-            // grab all stable upstream builds based on the
-
-            dir("${RELEASE}") {
-                def list = ["RO", "openvim", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "LW-UI","NG-UI"]
-                for (component in list) {
-                    step ([$class: 'CopyArtifact',
-                           projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
-
-                    // grab the build name/number
-                    //options = get_env_from_build('build.env')
-                    build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
-
-                    // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
-                    ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", build_num)
-
-                    // cleanup any prevously defined dists
-                    sh "rm -rf dists"
-                }
-
-                // check if an upstream artifact based on specific build number has been requested
-                // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
-                // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
-                if ( params.UPSTREAM_JOB_NAME ) {
-                    step ([$class: 'CopyArtifact',
-                           projectName: "${params.UPSTREAM_JOB_NAME}",
-                           selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
-                          ])
-
-                    //options = get_env_from_build('build.env')
-                    // grab the build name/number
-                    //build_num = sh(returnStdout:true,  script: "cat build.env | awk -F= '/BUILD_NUMBER/{print \$2}'").trim()
-                    build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
-                    component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
-
-                    // the upstream job name contains suffix with the project. Need this stripped off
-                    def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
+    containerName += "-${BUILD_NUMBER}"
+
+    server_id = null
+    http_server_name = null
+    devopstempdir = null
+    useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
+
+    try {
+        builtModules = [:]
+///////////////////////////////////////////////////////////////////////////////////////
+// Fetch stage 2 .deb artifacts
+///////////////////////////////////////////////////////////////////////////////////////
+        stage('Copy Artifacts') {
+            // cleanup any previous repo
+            sh 'rm -rf repo'
+            dir('repo') {
+                packageList = []
+                dir("${RELEASE}") {
+                    RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
+
+                    // check if an upstream artifact based on specific build number has been requested
+                    // This is the case of a merge build and the upstream merge build is not yet complete
+                    // (it is not deemed a successful build yet). The upstream job is calling this downstream
+                    // job (with the its build artifact)
+                    def upstreamComponent = ''
+                    if (params.UPSTREAM_JOB_NAME) {
+                        println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
+                        lock('Artifactory') {
+                            step ([$class: 'CopyArtifact',
+                                projectName: "${params.UPSTREAM_JOB_NAME}",
+                                selector: [$class: 'SpecificBuildSelector',
+                                buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
+                                ])
+
+                            upstreamComponent = ci_helper.get_mdg_from_project(
+                                ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
+                            def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
+                            dir("$upstreamComponent") {
+                                // the upstream job name contains suffix with the project. Need this stripped off
+                                project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
+                                packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+                                    upstreamComponent,
+                                    GERRIT_BRANCH,
+                                    "${project_without_branch} :: ${GERRIT_BRANCH}",
+                                    buildNumber)
+
+                                packageList.addAll(packages)
+                                println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
+                            }
+                        } // lock artifactory
+                    }
 
-                    // Remove the previous artifact for this component. Use the new upstream artifact
-                    sh "rm -rf pool/${component}"
+                    parallelSteps = [:]
+                    list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
+                            'common', 'LCM', 'POL', 'NG-UI', 'PLA', 'tests']
+                    if (upstreamComponent.length() > 0) {
+                        println("Skipping upstream fetch of ${upstreamComponent}")
+                        list.remove(upstreamComponent)
+                    }
+                    for (buildStep in list) {
+                        def component = buildStep
+                        parallelSteps[component] = {
+                            dir("$component") {
+                                println("Fetching artifact for ${component}")
+                                step([$class: 'CopyArtifact',
+                                       projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
+
+                                // grab the archives from the stage_2 builds
+                                // (ie. this will be the artifacts stored based on a merge)
+                                packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+                                    component,
+                                    GERRIT_BRANCH,
+                                    "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
+                                    ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
+                                packageList.addAll(packages)
+                                println("Fetched ${component}: ${packages}")
+                                sh 'rm -rf dists'
+                            }
+                        }
+                    }
+                    lock('Artifactory') {
+                        parallel parallelSteps
+                    }
 
-                    ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${project_without_branch} :: ${GERRIT_BRANCH}", build_num)
+///////////////////////////////////////////////////////////////////////////////////////
+// Create Devops APT repository
+///////////////////////////////////////////////////////////////////////////////////////
+                    sh 'mkdir -p pool'
+                    for (component in [ 'devops', 'IM', 'osmclient' ]) {
+                        sh "ls -al ${component}/pool/"
+                        sh "cp -r ${component}/pool/* pool/"
+                        sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
+                        sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
+                        sh("""apt-ftparchive packages pool/${component} \
+                           > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
+                        sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
+                    }
 
-                    sh "rm -rf dists"
-                }
-                
-                // sign all the components
-                for (component in list) {
-                    sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
-                }
+                    // create and sign the release file
+                    sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
+                    sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
+                       -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
 
-                // now create the distro
-                for (component in list) {
-                    sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
-                    sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
-                    sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
+                    // copy the public key into the release folder
+                    // this pulls the key from the home dir of the current user (jenkins)
+                    sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
+                    sh "cp ~/${REPO_KEY_NAME} ."
                 }
 
-                // create and sign the release file
-                sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
-                sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
-
-                // copy the public key into the release folder
-                // this pulls the key from the home dir of the current user (jenkins)
-                sh "cp ~/${REPO_KEY_NAME} ."
+                // start an apache server to serve up the packages
+                http_server_name = "${containerName}-apache"
 
-                // merge the change logs
-                sh """
-                   rm -f changelog/changelog-osm.html
-                   [ ! -d changelog ] || for mdgchange in \$(ls changelog); do cat changelog/\$mdgchange >> changelog/changelog-osm.html; done
-                   """
-                RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
+                pwd = sh(returnStdout:true,  script: 'pwd').trim()
+                repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
+                               'print(s.getsockname()[1]); s.close()\');',
+                               returnStdout: true).trim()
+                repo_base_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
+                NODE_IP_ADDRESS = sh(returnStdout: true, script:
+                    "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
             }
-            // start an apache server to serve up the images
-            http_server_name = "${container_name}-apache"
 
-            pwd = sh(returnStdout:true,  script: 'pwd').trim()
-            repo_base_url = ci_helper.start_http_server(pwd,http_server_name)
-        }
-
-        // now pull the devops package and install in temporary location
-        tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
-        osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim()
-        sh "dpkg -x ${osm_devops_dpkg} ${tempdir}"
-        OSM_DEVOPS="${tempdir}/usr/share/osm-devops"
-    }
-
-    dir(OSM_DEVOPS) {
-        error = null
-        if ( params.DO_BUILD ) {
-            stage("Build") {
-                sh "make -C docker clean"
-                sh "make -C docker Q= CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}"
+            // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
+            osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
+            devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+            println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
+            sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
+            OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
+            // Convert URLs from stage 2 packages to arguments that can be passed to docker build
+            for (remotePath in packageList) {
+                packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
+                packageName = packageName[0 .. packageName.indexOf('_') - 1]
+                builtModules[packageName] = remotePath
             }
         }
 
-        try {
-            if ( params.DO_INSTALL ) {
-                stage("Install") {
+///////////////////////////////////////////////////////////////////////////////////////
+// Build docker containers
+///////////////////////////////////////////////////////////////////////////////////////
+        dir(OSM_DEVOPS) {
+            Map remote = [:]
+            error = null
+            if ( params.DO_BUILD ) {
+                withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+                                usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+                    sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
+                }
+                datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
+                moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
+                for (packageName in builtModules.keySet()) {
+                    envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
+                    moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
+                }
+                dir('docker') {
+                    stage('Build') {
+                        containerList = sh(returnStdout: true, script:
+                            "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
+                        containerList = Arrays.asList(containerList.split('\n'))
+                        print(containerList)
+                        parallelSteps = [:]
+                        for (buildStep in containerList) {
+                            def module = buildStep
+                            def moduleName = buildStep.toLowerCase()
+                            def moduleTag = containerName
+                            parallelSteps[module] = {
+                                dir("$module") {
+                                    sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
+                                    -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
+                                    println("Tagging ${moduleName}:${moduleTag}")
+                                    sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
+                                    ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
+                                    sh("""docker push \
+                                    ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
+                                }
+                            }
+                        }
+                        parallel parallelSteps
+                    }
+                }
+            } // if (params.DO_BUILD)
+
+            if (params.DO_INSTALL) {
+///////////////////////////////////////////////////////////////////////////////////////
+// Launch VM
+///////////////////////////////////////////////////////////////////////////////////////
+                stage('Spawn Remote VM') {
+                    println('Launching new VM')
+                    output = sh(returnStdout: true, script: """#!/bin/sh -e
+                        for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+                        openstack server create --flavor osm.sanity \
+                                                --image ${OPENSTACK_BASE_IMAGE} \
+                                                --key-name CICD \
+                                                --property build_url="${BUILD_URL}" \
+                                                --nic net-id=osm-ext \
+                                                ${containerName}
+                    """).trim()
+
+                    server_id = get_value('id', output)
+
+                    if (server_id == null) {
+                        println('VM launch output: ')
+                        println(output)
+                        throw new Exception('VM Launch failed')
+                    }
+                    println("Target VM is ${server_id}, waiting for IP address to be assigned")
+
+                    IP_ADDRESS = ''
 
-                    //will by default always delete containers on complete
-                    //sh "jenkins/system/delete_old_containers.sh ${container_name_prefix}"
+                    while (IP_ADDRESS == '') {
+                        output = sh(returnStdout: true, script: """#!/bin/sh -e
+                            for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+                            openstack server show ${server_id}
+                        """).trim()
+                        IP_ADDRESS = get_value('addresses', output)
+                    }
+                    IP_ADDRESS = IP_ADDRESS.split('=')[1]
+                    println("Waiting for VM at ${IP_ADDRESS} to be reachable")
+
+                    alive = false
+                    while (!alive) {
+                        output = sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
+                        println("output is [$output]")
+                        alive = output.contains('succeeded')
+                    }
+                    println('VM is ready and accepting ssh connections')
+                } // stage("Spawn Remote VM")
 
+///////////////////////////////////////////////////////////////////////////////////////
+// Installation
+///////////////////////////////////////////////////////////////////////////////////////
+                stage('Install') {
                     commit_id = ''
                     repo_distro = ''
                     repo_key_name = ''
                     release = ''
 
-                    if ( params.COMMIT_ID )
-                    {
+                    if (params.COMMIT_ID) {
                         commit_id = "-b ${params.COMMIT_ID}"
                     }
-
-                    if ( params.REPO_DISTRO )
-                    {
+                    if (params.REPO_DISTRO) {
                         repo_distro = "-r ${params.REPO_DISTRO}"
                     }
-
-                    if ( params.REPO_KEY_NAME )
-                    {
+                    if (params.REPO_KEY_NAME) {
                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
                     }
-
-                    if ( params.RELEASE )
-                    {
+                    if (params.RELEASE) {
                         release = "-R ${params.RELEASE}"
                     }
-             
-                    if ( params.REPOSITORY_BASE )
-                    {
+                    if (params.REPOSITORY_BASE) {
                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
+                    } else {
+                        repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
                     }
-                                       if ( params.DO_STAGE_4 ) {
-                                           try {
-                        sh "docker stack list |grep \"${container_name_prefix}\"|  awk '{ print \$1 }'| xargs docker stack rm"
-                                               }
-                                               catch (caughtError) {
-                                                 println("Caught error: docker stack rm failed!")
-                                               }
-                                       }
-                    sh """
-                        export PATH=$PATH:/snap/bin
-                        installers/full_install_osm.sh -y -s ${container_name} --test --nolxd --nodocker --nojuju --nohostports --nohostclient \
-                                                        --nodockerbuild -t ${container_name} \
-                                                        -w /tmp/osm \
-                                                        ${commit_id} \
-                                                        ${repo_distro} \
-                                                        ${repo_base_url} \
-                                                        ${repo_key_name} \
-                                                        ${release} \
-                                                        ${params.BUILD_FROM_SOURCE}
-                       """
-                }
-            }
-
-            stage_archive = false
-            if ( params.DO_SMOKE ) {
-                stage("OSM Health") {
-                    sh "installers/osm_health.sh -s ${container_name}"
-                }
-                stage("Smoke") {
-                    run_systest(container_name,container_name,"smoke")
-                    // archive smoke success until stage_4 is ready
 
-                    if ( ! currentBuild.result.equals('UNSTABLE') ) {
-                        stage_archive = keep_artifacts
+                    remote = [
+                        name: containerName,
+                        host: IP_ADDRESS,
+                        user: 'ubuntu',
+                        identityFile: SSH_KEY,
+                        allowAnyHosts: true,
+                        logLevel: 'INFO',
+                        pty: true
+                    ]
+
+                    // Force time sync to avoid clock drift and invalid certificates
+                    sshCommand remote: remote, command: '''
+                        sudo apt update
+                        sudo apt install -y ntp
+                        sudo service ntp stop
+                        sudo ntpd -gq
+                        sudo service ntp start
+                    '''
+
+                    sshCommand remote: remote, command: '''
+                        wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
+                        chmod +x ./install_osm.sh
+                        sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
+                    '''
+
+                    Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
+                                                credentialsId: 'gitlab-registry',
+                                                usernameVariable: 'USERNAME',
+                                                passwordVariable: 'PASSWORD']
+                    if (useCharmedInstaller) {
+                        // Use local proxy for docker hub
+                        sshCommand remote: remote, command: '''
+                            sudo snap install microk8s --classic --channel=1.19/stable
+                            sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
+                            /var/snap/microk8s/current/args/containerd-template.toml
+                            sudo systemctl restart snap.microk8s.daemon-containerd.service
+                            sudo snap alias microk8s.kubectl kubectl
+                        '''
+
+                        withCredentials([gitlabCredentialsMap]) {
+                            sshCommand remote: remote, command: """
+                                ./install_osm.sh -y \
+                                    ${repo_base_url} \
+                                    ${repo_key_name} \
+                                    ${release} -r unstable \
+                                    --charmed  \
+                                    --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+                                    --tag ${containerName}
+                            """
+                        }
+                        prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
+                        prometheusPort = 80
+                        osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
                     } else {
-                                          error = new Exception("Smoke test failed")
-                                          currentBuild.result = 'FAILURE'
-                                       }
-                }
-            }
+                        // Run -k8s installer here specifying internal docker registry and docker proxy
+                        withCredentials([gitlabCredentialsMap]) {
+                            sshCommand remote: remote, command: """
+                                ./install_osm.sh -y \
+                                    ${repo_base_url} \
+                                    ${repo_key_name} \
+                                    ${release} -r unstable \
+                                    -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+                                    -p ${INTERNAL_DOCKER_PROXY} \
+                                    -t ${containerName}
+                            """
+                        }
+                        prometheusHostname = IP_ADDRESS
+                        prometheusPort = 9091
+                        osmHostname = IP_ADDRESS
+                    }
+                } // stage("Install")
+///////////////////////////////////////////////////////////////////////////////////////
+// Health check of installed OSM in remote vm
+///////////////////////////////////////////////////////////////////////////////////////
+                stage('OSM Health') {
+                    stackName = 'osm'
+                    sshCommand remote: remote, command: """
+                        /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
+                    """
+                } // stage("OSM Health")
+            } // if ( params.DO_INSTALL )
+
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Execute Robot tests
+///////////////////////////////////////////////////////////////////////////////////////
+            stage_archive = false
+            if ( params.DO_ROBOT ) {
+                try {
+                    stage('System Integration Test') {
+                        if (useCharmedInstaller) {
+                            tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+                            sh(script: "touch ${tempdir}/hosts")
+                            hostfile = "${tempdir}/hosts"
+                            sh """cat << EOF > ${hostfile}
+127.0.0.1           localhost
+${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
+EOF"""
+                        } else {
+                            hostfile = null
+                        }
 
-            if ( params.DO_STAGE_4 ) {
-                // override stage_archive to only archive on stable
-                stage_archive = false
-                stage("System Integration Test") {
-                    if ( params.DO_ROBOT ) {
-                        run_robot_systest(container_name,container_name,params.TEST_NAME,params.ROBOT_VIM)
-                    } //else {
-                    run_systest(container_name,container_name,"openstack_stage_4",params.HIVE_VIM_1)
-                    //}
-
-                    if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) {
-                        stage_archive = keep_artifacts
-                    } else {
-                       println ("Systest test failed, throwing error")
-                                          error = new Exception("Systest test failed")
-                                          currentBuild.result = 'FAILURE'
-                                          throw error
-                                       }
+                        jujuPassword = sshCommand remote: remote, command: '''
+                            echo `juju gui 2>&1 | grep password | cut -d: -f2`
+                        '''
+
+                        run_robot_systest(
+                            containerName,
+                            params.ROBOT_TAG_NAME,
+                            osmHostname,
+                            prometheusHostname,
+                            prometheusPort,
+                            params.ROBOT_VIM,
+                            params.ROBOT_PORT_MAPPING_VIM,
+                            params.KUBECONFIG,
+                            params.CLOUDS,
+                            hostfile,
+                            jujuPassword,
+                            SSH_KEY,
+                            params.ROBOT_PASS_THRESHOLD,
+                            params.ROBOT_UNSTABLE_THRESHOLD
+                        )
+                    } // stage("System Integration Test")
+                } finally {
+                    stage('Archive Container Logs') {
+                        // Archive logs to containers_logs.txt
+                        archive_logs(remote)
+                        if (currentBuild.result != 'FAILURE') {
+                            stage_archive = keep_artifacts
+                        } else {
+                            println('Systest test failed, throwing error')
+                            error = new Exception('Systest test failed')
+                            currentBuild.result = 'FAILURE'
+                            throw error
+                        }
+                    }
                 }
-            }
+            } // if ( params.DO_ROBOT )
 
-            // override to save the artifacts
-            if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
-                stage("Archive") {
-                    sh "echo ${container_name} > build_version.txt"
-                    archiveArtifacts artifacts: "build_version.txt", fingerprint: true
+            if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
+                stage('Archive') {
+                    sh "echo ${containerName} > build_version.txt"
+                    archiveArtifacts artifacts: 'build_version.txt', fingerprint: true
 
                     // Archive the tested repo
                     dir("${RELEASE_DIR}") {
-                        ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
+                        ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
                     }
-                    if ( params.DO_DOCKERPUSH ) {
-                        stage("Docker Push") {
-                            sh "make -C docker push INPUT_TAG=${container_name} TAG=${params.DOCKER_TAG}"
+                    if (params.DO_DOCKERPUSH) {
+                        stage('Publish to Dockerhub') {
+                            parallelSteps = [:]
+                            for (buildStep in containerList) {
+                                module = buildStep
+                                moduleName = buildStep.toLowerCase()
+                                dockerTag = params.DOCKER_TAG
+                                moduleTag = containerName
+
+                                parallelSteps[module] = {
+                                    dir("$module") {
+                                        sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
+                                           opensourcemano/${moduleName}:${dockerTag}""")
+                                        sh "docker push opensourcemano/${moduleName}:${dockerTag}"
+                                    }
+                                }
+                            }
+                            parallel parallelSteps
                         }
-                    }
+
+                        stage('Snap promotion') {
+                            snaps = ['osmclient']
+                            sh 'snapcraft login --with ~/.snapcraft/config'
+                            for (snap in snaps) {
+                                channel = 'latest/'
+                                if (BRANCH_NAME.startsWith('v')) {
+                                    channel = BRANCH_NAME.substring(1) + '/'
+                                } else if (BRANCH_NAME != 'master') {
+                                    channel += '/' + BRANCH_NAME.replaceAll('/', '-')
+                                }
+                                track = channel + 'edge\\*'
+                                edge_rev = sh(returnStdout: true,
+                                    script: "snapcraft revisions $snap | " +
+                                    "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+                                print "edge rev is $edge_rev"
+                                track = channel + 'beta\\*'
+                                beta_rev = sh(returnStdout: true,
+                                    script: "snapcraft revisions $snap | " +
+                                    "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+                                print "beta rev is $beta_rev"
+
+                                if (edge_rev != beta_rev) {
+                                    print "Promoting $edge_rev to beta in place of $beta_rev"
+                                    beta_track = channel + 'beta'
+                                    sh "snapcraft release $snap $edge_rev $beta_track"
+                                }
+                            }
+                        } // stage('Snap promotion')
+                    } // if (params.DO_DOCKERPUSH)
+                } // stage('Archive')
+            } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
+        } // dir(OSM_DEVOPS)
+    } finally {
+        if ( params.DO_INSTALL && server_id != null) {
+            delete_vm = true
+            if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+                delete_vm = false
+            }
+            if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+                delete_vm = false
+            }
+
+            if ( delete_vm ) {
+                if (server_id != null) {
+                    println("Deleting VM: $server_id")
+                    sh """#!/bin/sh -e
+                        for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+                        openstack server delete ${server_id}
+                    """
+                } else {
+                    println("Saved VM $server_id in ETSI VIM")
                 }
             }
         }
-        catch(Exception ex) {
-            error = ex
-            currentBuild.result = 'FAILURE'
-            println("Caught error")
-            println(ex.getMessage())
+        if ( http_server_name != null ) {
+            sh "docker stop ${http_server_name} || true"
+            sh "docker rm ${http_server_name} || true"
         }
-        finally {
-            if ( params.DO_INSTALL ) {
-                if (error) {
-                    if ( !params.SAVE_CONTAINER_ON_FAIL ) {
-                        uninstall_osm container_name
-                        sh "docker stop ${http_server_name}"
-                        sh "docker rm ${http_server_name}"
-                    }
-                }
-                else {
-                    if ( !params.SAVE_CONTAINER_ON_PASS ) {
-                        uninstall_osm container_name
-                        sh "docker stop ${http_server_name}"
-                        sh "docker rm ${http_server_name}"
-                    }
-                }
-            }
+
+        if ( devopstempdir != null ) {
+            sh "rm -rf ${devopstempdir}"
         }
     }
 }