X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=jenkins%2Fci-pipelines%2Fci_stage_3.groovy;h=7a920762928b3b3c0626365bae1ec15ccb81a1ad;hb=f83c20ab3304291aca8224ebe2dd88b58b5dc1b4;hp=6e23ba38c6894fd495592e5b053f1f6dd6516fb3;hpb=a1cf037db12ace2fd334de49a3ca2d59809209ba;p=osm%2Fdevops.git diff --git a/jenkins/ci-pipelines/ci_stage_3.groovy b/jenkins/ci-pipelines/ci_stage_3.groovy index 6e23ba38..7a920762 100644 --- a/jenkins/ci-pipelines/ci_stage_3.groovy +++ b/jenkins/ci-pipelines/ci_stage_3.groovy @@ -1,4 +1,4 @@ -/* Copyright 2017 Sandvine +/* Copyright ETSI Contributors and Others * * All Rights Reserved. * @@ -32,49 +32,47 @@ properties([ string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'), string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'), string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'), + string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'), booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'), booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'), booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'), - booleanParam(defaultValue: true, description: '', name: 'DO_STAGE_4'), booleanParam(defaultValue: true, description: '', name: 'DO_BUILD'), booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'), - booleanParam(defaultValue: true, description: '', name: 'DO_SMOKE'), booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'), booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'), string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'), - booleanParam(defaultValue: false, description: '', name: 'DO_ROBOT'), - string(defaultValue: 'sanity', description: 'sanity/regression are the options', name: 'TEST_NAME'), + booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'), + string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'), string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'), + string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'), string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'), string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'), string(defaultValue: 'Default', description: '', name: 'INSTALLER'), + string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'), + string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'), ]) ]) -def uninstall_osm(stackName) { - sh """ - export OSM_USE_LOCAL_DEVOPS=true - export PATH=$PATH:/snap/bin - installers/full_install_osm.sh -y -c swarm -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall - """ -} - -def run_systest(stackName,tagName,testName,envfile=null) { - tempdir = sh(returnStdout: true, script: "mktemp -d").trim() - if ( !envfile ) - { - sh(script: "touch ${tempdir}/env") - envfile="${tempdir}/env" - } - sh "docker run --network net${stackName} --env-file ${envfile} -v ${tempdir}:/usr/share/osm-devops/systest/reports opensourcemano/osmclient:${tagName} make -C /usr/share/osm-devops/systest ${testName}" - sh "cp ${tempdir}/* ." - junit '*.xml' -} -def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus_port=null,envfile=null,kubeconfig=null,clouds=null,hostfile=null,jujuPassword=null) { - tempdir = sh(returnStdout: true, script: "mktemp -d").trim() - if ( !envfile ) - { +//////////////////////////////////////////////////////////////////////////////////////// +// Helper Functions +//////////////////////////////////////////////////////////////////////////////////////// +void run_robot_systest(String tagName, + String testName, + String osmHostname, + String prometheusHostname, + Integer prometheusPort=null, + String envfile=null, + String portmappingfile=null, + String kubeconfig=null, + String clouds=null, + String hostfile=null, + String jujuPassword=null, + String osmRSAfile=null, + String pass_th='0.0', + String unstable_th='0.0') { + tempdir = sh(returnStdout: true, script: 'mktemp -d').trim() + if ( !envfile ) { sh(script: "touch ${tempdir}/env") envfile="${tempdir}/env" } @@ -93,7 +91,7 @@ def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus } try { - sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}" + sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}" } finally { sh "cp ${tempdir}/* ." outputDirectory = sh(returnStdout: true, script: "pwd").trim() @@ -105,8 +103,8 @@ def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus disableArchiveOutput : false, reportFileName : "report.html", logFileName : "log.html", - passThreshold : 0, - unstableThreshold: 0, + passThreshold : pass_th, + unstableThreshold: unstable_th, otherFiles : "*.png", ]) } @@ -156,10 +154,14 @@ def get_value(key, output) { } } +//////////////////////////////////////////////////////////////////////////////////////// +// Main Script +//////////////////////////////////////////////////////////////////////////////////////// node("${params.NODE}") { INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/' INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000' + APT_PROXY="http://172.21.1.1:3142" SSH_KEY = '~/hive/cicd_rsa' sh 'env' @@ -187,130 +189,186 @@ node("${params.NODE}") { } container_name += "-${BUILD_NUMBER}" - // Copy the artifacts from the upstream jobs - stage("Copy Artifacts") { - // cleanup any previous repo - sh 'rm -rf repo' - dir("repo") { - // grab all stable upstream builds based on the - - dir("${RELEASE}") { - def list = ["RO", "openvim", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "LW-UI", "NG-UI", "PLA", "tests"] - for (component in list) { - step ([$class: 'CopyArtifact', - projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"]) - - // grab the build name/number - build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER') - - // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge) - ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", build_num) - - // cleanup any prevously defined dists - sh "rm -rf dists" - } + server_id = null + http_server_name = null + devopstempdir = null + useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed") - // check if an upstream artifact based on specific build number has been requested - // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed - // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact) - if ( params.UPSTREAM_JOB_NAME ) { - step ([$class: 'CopyArtifact', - projectName: "${params.UPSTREAM_JOB_NAME}", - selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"] - ]) - - build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER') - component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT')) - - // the upstream job name contains suffix with the project. Need this stripped off - def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0] + try { + builtModules = [:] +/////////////////////////////////////////////////////////////////////////////////////// +// Fetch stage 2 .deb artifacts +/////////////////////////////////////////////////////////////////////////////////////// + stage("Copy Artifacts") { + // cleanup any previous repo + sh 'rm -rf repo' + dir("repo") { + packageList = [] + dir("${RELEASE}") { + RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim() + + // check if an upstream artifact based on specific build number has been requested + // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed + // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact) + def upstreamComponent="" + if ( params.UPSTREAM_JOB_NAME ) { + println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}") + lock('Artifactory') { + step ([$class: 'CopyArtifact', + projectName: "${params.UPSTREAM_JOB_NAME}", + selector: [$class: 'SpecificBuildSelector', + buildNumber: "${params.UPSTREAM_JOB_NUMBER}"] + ]) + + upstreamComponent = ci_helper.get_mdg_from_project( + ci_helper.get_env_value('build.env','GERRIT_PROJECT')) + def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER') + dir("$upstreamComponent") { + // the upstream job name contains suffix with the project. Need this stripped off + def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0] + def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER, + upstreamComponent, + GERRIT_BRANCH, + "${project_without_branch} :: ${GERRIT_BRANCH}", + buildNumber) + + packageList.addAll(packages) + println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}") + } + } // lock artifactory + } - // Remove the previous artifact for this component. Use the new upstream artifact - sh "rm -rf pool/${component}" + parallelSteps = [:] + def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"] + if (upstreamComponent.length()>0) { + println("Skipping upstream fetch of "+upstreamComponent) + list.remove(upstreamComponent) + } + for (buildStep in list) { + def component = buildStep + parallelSteps[component] = { + dir("$component") { + println("Fetching artifact for ${component}") + step ([$class: 'CopyArtifact', + projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"]) + + // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge) + def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER, + component, + GERRIT_BRANCH, + "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", + ci_helper.get_env_value('build.env','BUILD_NUMBER')) + packageList.addAll(packages) + println("Fetched ${component}: ${packages}") + sh "rm -rf dists" + } + } + } + lock('Artifactory') { + parallel parallelSteps + } - ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${project_without_branch} :: ${GERRIT_BRANCH}", build_num) +/////////////////////////////////////////////////////////////////////////////////////// +// Create Devops APT repository +/////////////////////////////////////////////////////////////////////////////////////// + sh "mkdir -p pool" + for (component in [ "devops", "IM", "osmclient" ]) { + sh "ls -al ${component}/pool/" + sh "cp -r ${component}/pool/* pool/" + sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*" + sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/" + sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" + sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" + } - sh "rm -rf dists" - } + // create and sign the release file + sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release" + sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release" - // sign all the components - for (component in list) { - sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*" + // copy the public key into the release folder + // this pulls the key from the home dir of the current user (jenkins) + sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'" + sh "cp ~/${REPO_KEY_NAME} ." } - // now create the distro - for (component in list) { - sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/" - sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" - sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" - } + // start an apache server to serve up the packages + http_server_name = "${container_name}-apache" - // create and sign the release file - sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release" - sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release" - - // copy the public key into the release folder - // this pulls the key from the home dir of the current user (jenkins) - sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'" - sh "cp ~/${REPO_KEY_NAME} ." - - // merge the change logs - sh """ - rm -f changelog/changelog-osm.html - [ ! -d changelog ] || for mdgchange in \$(ls changelog); do cat changelog/\$mdgchange >> changelog/changelog-osm.html; done - """ - RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim() + pwd = sh(returnStdout:true, script: 'pwd').trim() + repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim() + repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port) + NODE_IP_ADDRESS=sh(returnStdout: true, script: + "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim() } - // start an apache server to serve up the images - http_server_name = "${container_name}-apache" - - pwd = sh(returnStdout:true, script: 'pwd').trim() - repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim() - repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port) - NODE_IP_ADDRESS=sh(returnStdout: true, script: - "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim() - } - - // now pull the devops package and install in temporary location - tempdir = sh(returnStdout: true, script: "mktemp -d").trim() - osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim() - sh "dpkg -x ${osm_devops_dpkg} ${tempdir}" - OSM_DEVOPS="${tempdir}/usr/share/osm-devops" - println("Repo base URL=${repo_base_url}") - } - - dir(OSM_DEVOPS) { - def remote = [:] - error = null - if ( params.DO_BUILD ) { - stage("Build") { - sh "make -C docker clean" - sh "make -C docker -j `nproc` Q= CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}" + // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch + osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim() + devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim() + println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step") + sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}" + OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops" + // Convert URLs from stage 2 packages to arguments that can be passed to docker build + for (remotePath in packageList) { + packageName=remotePath.substring(remotePath.lastIndexOf('/')+1) + packageName=packageName.substring(0,packageName.indexOf('_')) + builtModules[packageName]=remotePath } + } - stage("Push to internal registry") { +/////////////////////////////////////////////////////////////////////////////////////// +// Build docker containers +/////////////////////////////////////////////////////////////////////////////////////// + dir(OSM_DEVOPS) { + def remote = [:] + error = null + if ( params.DO_BUILD ) { withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry', usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) { sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}" } - sh "make -C docker push INPUT_TAG=${container_name} TAG=${container_name} DOCKER_REGISTRY=${INTERNAL_DOCKER_REGISTRY}" - } - - } - - try { - useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed") + datetime = sh(returnStdout: true, script: "date +%Y-%m-%d:%H:%M:%S").trim() + moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}" + for (packageName in builtModules.keySet()) { + envName=packageName.replaceAll("-","_").toUpperCase()+"_URL" + moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName] + } + dir ("docker") { + stage("Build") { + containerList = sh(returnStdout: true, script: + "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'") + containerList=Arrays.asList(containerList.split("\n")) + print(containerList) + parallelSteps = [:] + for (buildStep in containerList) { + def module = buildStep + def moduleName = buildStep.toLowerCase() + def moduleTag = container_name + parallelSteps[module] = { + dir("$module") { + sh "docker build --build-arg APT_PROXY=${APT_PROXY} -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ." + println("Tagging ${moduleName}:${moduleTag}") + sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}" + sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}" + } + } + } + parallel parallelSteps + } + } + } // if ( params.DO_BUILD ) if ( params.DO_INSTALL ) { - +/////////////////////////////////////////////////////////////////////////////////////// +// Launch VM +/////////////////////////////////////////////////////////////////////////////////////// stage("Spawn Remote VM") { println("Launching new VM") output=sh(returnStdout: true, script: """#!/bin/sh -e for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done - openstack server create --flavor m1.xlarge \ - --image ubuntu18.04 \ + openstack server create --flavor osm.sanity \ + --image ${OPENSTACK_BASE_IMAGE} \ --key-name CICD \ + --property build_url="${BUILD_URL}" \ --nic net-id=osm-ext \ ${container_name} """).trim() @@ -343,8 +401,11 @@ node("${params.NODE}") { alive = output.contains("succeeded") } println("VM is ready and accepting ssh connections") - } + } // stage("Spawn Remote VM") +/////////////////////////////////////////////////////////////////////////////////////// +// Installation +/////////////////////////////////////////////////////////////////////////////////////// stage("Install") { commit_id = '' repo_distro = '' @@ -388,14 +449,22 @@ node("${params.NODE}") { remote.logLevel = 'INFO' remote.pty = true + // Force time sync to avoid clock drift and invalid certificates sshCommand remote: remote, command: """ - wget https://osm-download.etsi.org/ftp/osm-9.0-nine/install_osm.sh + sudo apt update + sudo apt install -y ntp + sudo service ntp stop + sudo ntpd -gq + sudo service ntp start + """ + + sshCommand remote: remote, command: """ + wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh chmod +x ./install_osm.sh sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc """ if ( useCharmedInstaller ) { - // Use local proxy for docker hub sshCommand remote: remote, command: ''' sudo snap install microk8s --classic --channel=1.19/stable @@ -416,9 +485,9 @@ node("${params.NODE}") { --tag ${container_name} """ } - prometheusHostname = "prometheus."+IP_ADDRESS+".xip.io" + prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io" prometheusPort = 80 - osmHostname = "nbi."+IP_ADDRESS+".xip.io:443" + osmHostname = "nbi."+IP_ADDRESS+".nip.io:443" } else { // Run -k8s installer here specifying internal docker registry and docker proxy withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry', @@ -437,59 +506,64 @@ node("${params.NODE}") { prometheusPort = 9091 osmHostname = IP_ADDRESS } - } - } - - stage_archive = false - if ( params.DO_SMOKE ) { + } // stage("Install") +/////////////////////////////////////////////////////////////////////////////////////// +// Health check of installed OSM in remote vm +/////////////////////////////////////////////////////////////////////////////////////// stage("OSM Health") { stackName = "osm" sshCommand remote: remote, command: """ /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName} """ - } - } + } // stage("OSM Health") + } // if ( params.DO_INSTALL ) - if ( params.DO_STAGE_4 ) { - // override stage_archive to only archive on stable - stage_archive = false + +/////////////////////////////////////////////////////////////////////////////////////// +// Execute Robot tests +/////////////////////////////////////////////////////////////////////////////////////// + stage_archive = false + if ( params.DO_ROBOT ) { try { stage("System Integration Test") { - if ( params.DO_ROBOT ) { - if( useCharmedInstaller ) { - tempdir = sh(returnStdout: true, script: "mktemp -d").trim() - sh(script: "touch ${tempdir}/hosts") - hostfile="${tempdir}/hosts" - sh """cat << EOF > ${hostfile} + if ( useCharmedInstaller ) { + tempdir = sh(returnStdout: true, script: "mktemp -d").trim() + sh(script: "touch ${tempdir}/hosts") + hostfile="${tempdir}/hosts" + sh """cat << EOF > ${hostfile} 127.0.0.1 localhost -${remote.host} prometheus.${remote.host}.xip.io nbi.${remote.host}.xip.io +${remote.host} prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io EOF""" - } else { - hostfile=null - } + } else { + hostfile=null + } - jujuPassword=sshCommand remote: remote, command: """ - echo `juju gui 2>&1 | grep password | cut -d: -f2` - """ + jujuPassword=sshCommand remote: remote, command: """ + echo `juju gui 2>&1 | grep password | cut -d: -f2` + """ - run_robot_systest( - container_name, - params.TEST_NAME, - osmHostname, - prometheusHostname, - prometheusPort, - params.ROBOT_VIM, - params.KUBECONFIG, - params.CLOUDS, - hostfile, - jujuPassword) - } - } + run_robot_systest( + container_name, + params.ROBOT_TAG_NAME, + osmHostname, + prometheusHostname, + prometheusPort, + params.ROBOT_VIM, + params.ROBOT_PORT_MAPPING_VIM, + params.KUBECONFIG, + params.CLOUDS, + hostfile, + jujuPassword, + SSH_KEY, + params.ROBOT_PASS_THRESHOLD, + params.ROBOT_UNSTABLE_THRESHOLD + ) + } // stage("System Integration Test") } finally { stage("Archive Container Logs") { // Archive logs to containers_logs.txt archive_logs(remote) - if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) { + if ( ! currentBuild.result.equals('FAILURE') ) { stage_archive = keep_artifacts } else { println ("Systest test failed, throwing error") @@ -499,9 +573,8 @@ EOF""" } } } - } + } // if ( params.DO_ROBOT ) - // override to save the artifacts if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) { stage("Archive") { sh "echo ${container_name} > build_version.txt" @@ -512,8 +585,22 @@ EOF""" ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested') } if ( params.DO_DOCKERPUSH ) { - stage("Docker Push") { - sh "make -C docker push INPUT_TAG=${container_name} TAG=${params.DOCKER_TAG}" + stage("Publish to Dockerhub") { + parallelSteps = [:] + for (buildStep in containerList) { + def module = buildStep + def moduleName = buildStep.toLowerCase() + def dockerTag = params.DOCKER_TAG + def moduleTag = container_name + + parallelSteps[module] = { + dir("$module") { + sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}" + sh "docker push opensourcemano/${moduleName}:${dockerTag}" + } + } + } + parallel parallelSteps } stage("Snap promotion") { @@ -543,41 +630,40 @@ EOF""" sh "snapcraft release $snap $edge_rev $beta_track" } } - } - } - } + } // stage("Snap promotion") + } // if ( params.DO_DOCKERPUSH ) + } // stage("Archive") + } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) + } // dir(OSM_DEVOPS) + } finally { + if ( params.DO_INSTALL && server_id != null) { + delete_vm = true + if (error && params.SAVE_CONTAINER_ON_FAIL ) { + delete_vm = false + } + if (!error && params.SAVE_CONTAINER_ON_PASS ) { + delete_vm = false } - } - catch(Exception ex) { - error = ex - currentBuild.result = 'FAILURE' - println("Caught error: "+ex) - } - finally { - println("Entered finally block") - if ( params.DO_INSTALL && server_id != null) { - delete_vm = true - if (error && params.SAVE_CONTAINER_ON_FAIL ) { - delete_vm = false - } - if (!error && params.SAVE_CONTAINER_ON_PASS ) { - delete_vm = false - } - if ( delete_vm ) { - if (server_id != null) { - println("Deleting VM: $server_id") - sh """#!/bin/sh -e - for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done - openstack server delete ${server_id} - """ - } else { - println("Saved VM $server_id in ETSI VIM") - } + if ( delete_vm ) { + if (server_id != null) { + println("Deleting VM: $server_id") + sh """#!/bin/sh -e + for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done + openstack server delete ${server_id} + """ + } else { + println("Saved VM $server_id in ETSI VIM") } } + } + if ( http_server_name != null ) { sh "docker stop ${http_server_name} || true" sh "docker rm ${http_server_name} || true" } + + if ( devopstempdir != null ) { + sh "rm -rf ${devopstempdir}" + } } }