-/* Copyright 2017 Sandvine
+/* Copyright ETSI Contributors and Others
*
* All Rights Reserved.
*
booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
- booleanParam(defaultValue: true, description: '', name: 'DO_STAGE_4'),
booleanParam(defaultValue: true, description: '', name: 'DO_BUILD'),
booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
- booleanParam(defaultValue: true, description: '', name: 'DO_SMOKE'),
booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
- booleanParam(defaultValue: false, description: '', name: 'DO_ROBOT'),
- string(defaultValue: 'sanity', description: 'sanity/regression are the options', name: 'TEST_NAME'),
+ booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
+ string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
+ string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
+ string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
+ string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
])
])
-def uninstall_osm(stackName) {
- sh """
- export OSM_USE_LOCAL_DEVOPS=true
- export PATH=$PATH:/snap/bin
- installers/full_install_osm.sh -y -c swarm -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall
- """
-}
-
-def run_systest(stackName,tagName,testName,envfile=null) {
- tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
- if ( !envfile )
- {
- sh(script: "touch ${tempdir}/env")
- envfile="${tempdir}/env"
- }
- sh "docker run --network net${stackName} --env-file ${envfile} -v ${tempdir}:/usr/share/osm-devops/systest/reports opensourcemano/osmclient:${tagName} make -C /usr/share/osm-devops/systest ${testName}"
- sh "cp ${tempdir}/* ."
- junit '*.xml'
-}
-def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus_port=null,envfile=null,kubeconfig=null,clouds=null,hostfile=null,jujuPassword=null) {
- tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
- if ( !envfile )
- {
+////////////////////////////////////////////////////////////////////////////////////////
+// Helper Functions
+////////////////////////////////////////////////////////////////////////////////////////
+void run_robot_systest(String tagName,
+ String testName,
+ String osmHostname,
+ String prometheusHostname,
+ Integer prometheusPort=null,
+ String envfile=null,
+ String portmappingfile=null,
+ String jujudata=null,
+ String kubeconfig=null,
+ String clouds=null,
+ String hostfile=null,
+ String jujuPassword=null,
+ String osmRSAfile=null,
+ String pass_th='0.0',
+ String unstable_th='0.0') {
+ tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+ if ( !envfile ) {
sh(script: "touch ${tempdir}/env")
envfile="${tempdir}/env"
}
}
try {
- sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
+ sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${jujudata}:/root/.local/share/juju -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
} finally {
sh "cp ${tempdir}/* ."
outputDirectory = sh(returnStdout: true, script: "pwd").trim()
disableArchiveOutput : false,
reportFileName : "report.html",
logFileName : "log.html",
- passThreshold : 0,
- unstableThreshold: 0,
+ passThreshold : pass_th,
+ unstableThreshold: unstable_th,
otherFiles : "*.png",
])
}
}
}
+////////////////////////////////////////////////////////////////////////////////////////
+// Main Script
+////////////////////////////////////////////////////////////////////////////////////////
node("${params.NODE}") {
INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
}
container_name += "-${BUILD_NUMBER}"
- // Copy the artifacts from the upstream jobs
- stage("Copy Artifacts") {
- // cleanup any previous repo
- sh 'rm -rf repo'
- dir("repo") {
- // grab all stable upstream builds based on the
-
- dir("${RELEASE}") {
- def list = ["RO", "openvim", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "LW-UI", "NG-UI", "PLA", "tests"]
- for (component in list) {
- step ([$class: 'CopyArtifact',
- projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
-
- // grab the build name/number
- build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
-
- // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
- ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", build_num)
-
- // cleanup any prevously defined dists
- sh "rm -rf dists"
- }
-
- // check if an upstream artifact based on specific build number has been requested
- // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
- // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
- if ( params.UPSTREAM_JOB_NAME ) {
- step ([$class: 'CopyArtifact',
- projectName: "${params.UPSTREAM_JOB_NAME}",
- selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
- ])
-
- build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
- component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
+ server_id = null
+ http_server_name = null
+ devopstempdir = null
+ jujutempdir = null
+ useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
- // the upstream job name contains suffix with the project. Need this stripped off
- def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
-
- // Remove the previous artifact for this component. Use the new upstream artifact
- sh "rm -rf pool/${component}"
+ try {
+ builtModules = [:]
+///////////////////////////////////////////////////////////////////////////////////////
+// Fetch stage 2 .deb artifacts
+///////////////////////////////////////////////////////////////////////////////////////
+ stage("Copy Artifacts") {
+ // cleanup any previous repo
+ sh 'rm -rf repo'
+ dir("repo") {
+ packageList = []
+ dir("${RELEASE}") {
+ RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim()
+
+ // check if an upstream artifact based on specific build number has been requested
+ // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
+ // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
+ def upstreamComponent=""
+ if ( params.UPSTREAM_JOB_NAME ) {
+ println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
+
+ step ([$class: 'CopyArtifact',
+ projectName: "${params.UPSTREAM_JOB_NAME}",
+ selector: [$class: 'SpecificBuildSelector',
+ buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
+ ])
+
+ upstreamComponent = ci_helper.get_mdg_from_project(
+ ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
+ def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
+ dir("$upstreamComponent") {
+ // the upstream job name contains suffix with the project. Need this stripped off
+ def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
+ def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+ upstreamComponent,
+ GERRIT_BRANCH,
+ "${project_without_branch} :: ${GERRIT_BRANCH}",
+ buildNumber)
+
+ packageList.addAll(packages)
+ println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
+ }
+ }
- ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${project_without_branch} :: ${GERRIT_BRANCH}", build_num)
+ parallelSteps = [:]
+ def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
+ if (upstreamComponent.length()>0) {
+ println("Skipping upstream fetch of "+upstreamComponent)
+ list.remove(upstreamComponent)
+ }
+ for (buildStep in list) {
+ def component = buildStep
+ parallelSteps[component] = {
+ dir("$component") {
+ println("Fetching artifact for ${component}")
+ step ([$class: 'CopyArtifact',
+ projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
+
+ // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
+ def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+ component,
+ GERRIT_BRANCH,
+ "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
+ ci_helper.get_env_value('build.env','BUILD_NUMBER'))
+ packageList.addAll(packages)
+ println("Fetched ${component}: ${packages}")
+ sh "rm -rf dists"
+ }
+ }
+ }
+ parallel parallelSteps
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Create Devops APT repository
+///////////////////////////////////////////////////////////////////////////////////////
+ sh "mkdir -p pool"
+ for (component in [ "devops", "IM", "osmclient" ]) {
+ sh "ls -al ${component}/pool/"
+ sh "cp -r ${component}/pool/* pool/"
+ sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
+ sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
+ sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
+ sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
+ }
- sh "rm -rf dists"
- }
+ // create and sign the release file
+ sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
+ sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
- // sign all the components
- for (component in list) {
- sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
+ // copy the public key into the release folder
+ // this pulls the key from the home dir of the current user (jenkins)
+ sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
+ sh "cp ~/${REPO_KEY_NAME} ."
}
- // now create the distro
- for (component in list) {
- sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
- sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
- sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
- }
+ // start an apache server to serve up the packages
+ http_server_name = "${container_name}-apache"
- // create and sign the release file
- sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
- sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
-
- // copy the public key into the release folder
- // this pulls the key from the home dir of the current user (jenkins)
- sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
- sh "cp ~/${REPO_KEY_NAME} ."
-
- // merge the change logs
- sh """
- rm -f changelog/changelog-osm.html
- [ ! -d changelog ] || for mdgchange in \$(ls changelog); do cat changelog/\$mdgchange >> changelog/changelog-osm.html; done
- """
- RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim()
+ pwd = sh(returnStdout:true, script: 'pwd').trim()
+ repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
+ repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
+ NODE_IP_ADDRESS=sh(returnStdout: true, script:
+ "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
}
- // start an apache server to serve up the images
- http_server_name = "${container_name}-apache"
-
- pwd = sh(returnStdout:true, script: 'pwd').trim()
- repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
- repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
- NODE_IP_ADDRESS=sh(returnStdout: true, script:
- "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
- }
- // now pull the devops package and install in temporary location
- tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
- osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim()
- sh "dpkg -x ${osm_devops_dpkg} ${tempdir}"
- OSM_DEVOPS="${tempdir}/usr/share/osm-devops"
- println("Repo base URL=${repo_base_url}")
- }
-
- dir(OSM_DEVOPS) {
- def remote = [:]
- error = null
-
- if ( params.DO_BUILD ) {
- stage("Build") {
- sh "make -C docker clean"
- sh "make -C docker -j `nproc` Q= CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}"
+ // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
+ osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
+ devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+ println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
+ sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
+ OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
+ // Convert URLs from stage 2 packages to arguments that can be passed to docker build
+ for (remotePath in packageList) {
+ packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
+ packageName=packageName.substring(0,packageName.indexOf('_'))
+ builtModules[packageName]=remotePath
}
+ }
- stage("Push to internal registry") {
+///////////////////////////////////////////////////////////////////////////////////////
+// Build docker containers
+///////////////////////////////////////////////////////////////////////////////////////
+ dir(OSM_DEVOPS) {
+ def remote = [:]
+ error = null
+ if ( params.DO_BUILD ) {
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
}
- sh "make -C docker push INPUT_TAG=${container_name} TAG=${container_name} DOCKER_REGISTRY=${INTERNAL_DOCKER_REGISTRY}"
- }
-
- }
-
- try {
- useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
+ moduleBuildArgs = ""
+ for (packageName in builtModules.keySet()) {
+ envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
+ moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
+ }
+ dir ("docker") {
+ stage("Build") {
+ containerList = sh(returnStdout: true, script:
+ "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
+ containerList=Arrays.asList(containerList.split("\n"))
+ print(containerList)
+ parallelSteps = [:]
+ for (buildStep in containerList) {
+ def module = buildStep
+ def moduleName = buildStep.toLowerCase()
+ def moduleTag = container_name
+ parallelSteps[module] = {
+ dir("$module") {
+ sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
+ println("Tagging ${moduleName}:${moduleTag}")
+ sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
+ sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
+ }
+ }
+ }
+ parallel parallelSteps
+ }
+ }
+ } // if ( params.DO_BUILD )
if ( params.DO_INSTALL ) {
-
+///////////////////////////////////////////////////////////////////////////////////////
+// Launch VM
+///////////////////////////////////////////////////////////////////////////////////////
stage("Spawn Remote VM") {
println("Launching new VM")
output=sh(returnStdout: true, script: """#!/bin/sh -e
alive = output.contains("succeeded")
}
println("VM is ready and accepting ssh connections")
- }
+ } // stage("Spawn Remote VM")
+///////////////////////////////////////////////////////////////////////////////////////
+// Installation
+///////////////////////////////////////////////////////////////////////////////////////
stage("Install") {
commit_id = ''
repo_distro = ''
remote.pty = true
sshCommand remote: remote, command: """
- wget https://osm-download.etsi.org/ftp/osm-9.0-nine/install_osm.sh
+ wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh
chmod +x ./install_osm.sh
sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
"""
if ( useCharmedInstaller ) {
-
// Use local proxy for docker hub
sshCommand remote: remote, command: '''
sudo snap install microk8s --classic --channel=1.19/stable
--tag ${container_name}
"""
}
- prometheusHostname = "prometheus."+IP_ADDRESS+".xip.io"
+ prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
prometheusPort = 80
- osmHostname = "nbi."+IP_ADDRESS+".xip.io:443"
+ osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
} else {
// Run -k8s installer here specifying internal docker registry and docker proxy
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
${release} -r unstable \
-d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
-p ${INTERNAL_DOCKER_PROXY} \
- -t ${container_name}
+ -t ${container_name} \
+ --nocachelxdimages
"""
}
prometheusHostname = IP_ADDRESS
prometheusPort = 9091
osmHostname = IP_ADDRESS
}
- }
- }
-
- stage_archive = false
- if ( params.DO_SMOKE ) {
+ } // stage("Install")
+///////////////////////////////////////////////////////////////////////////////////////
+// Health check of installed OSM in remote vm
+///////////////////////////////////////////////////////////////////////////////////////
stage("OSM Health") {
stackName = "osm"
sshCommand remote: remote, command: """
/usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
"""
- }
- }
-
- if ( params.DO_STAGE_4 ) {
- // override stage_archive to only archive on stable
- stage_archive = false
+ } // stage("OSM Health")
+///////////////////////////////////////////////////////////////////////////////////////
+// Get juju data from installed OSM in remote vm
+///////////////////////////////////////////////////////////////////////////////////////
+ jujutempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+ jujudatafolder = jujutempdir + '/juju'
+ homefolder = sshCommand remote: remote, command: 'echo ${HOME}'
+ sshGet remote: remote, from: homefolder + '/.local/share/juju', into: jujutempdir, override: true
+ } // if ( params.DO_INSTALL )
+
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Execute Robot tests
+///////////////////////////////////////////////////////////////////////////////////////
+ stage_archive = false
+ if ( params.DO_ROBOT ) {
try {
stage("System Integration Test") {
- if ( params.DO_ROBOT ) {
- if( useCharmedInstaller ) {
- tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
- sh(script: "touch ${tempdir}/hosts")
- hostfile="${tempdir}/hosts"
- sh """cat << EOF > ${hostfile}
+ if ( useCharmedInstaller ) {
+ tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+ sh(script: "touch ${tempdir}/hosts")
+ hostfile="${tempdir}/hosts"
+ sh """cat << EOF > ${hostfile}
127.0.0.1 localhost
-${remote.host} prometheus.${remote.host}.xip.io nbi.${remote.host}.xip.io
+${remote.host} prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
EOF"""
- } else {
- hostfile=null
- }
+ } else {
+ hostfile=null
+ }
- jujuPassword=sshCommand remote: remote, command: """
- echo `juju gui 2>&1 | grep password | cut -d: -f2`
- """
+ jujuPassword=sshCommand remote: remote, command: """
+ echo `juju gui 2>&1 | grep password | cut -d: -f2`
+ """
- run_robot_systest(
- container_name,
- params.TEST_NAME,
- osmHostname,
- prometheusHostname,
- prometheusPort,
- params.ROBOT_VIM,
- params.KUBECONFIG,
- params.CLOUDS,
- hostfile,
- jujuPassword)
- }
- }
+ run_robot_systest(
+ container_name,
+ params.ROBOT_TAG_NAME,
+ osmHostname,
+ prometheusHostname,
+ prometheusPort,
+ params.ROBOT_VIM,
+ params.ROBOT_PORT_MAPPING_VIM,
+ jujudatafolder,
+ params.KUBECONFIG,
+ params.CLOUDS,
+ hostfile,
+ jujuPassword,
+ SSH_KEY,
+ params.ROBOT_PASS_THRESHOLD,
+ params.ROBOT_UNSTABLE_THRESHOLD
+ )
+ } // stage("System Integration Test")
} finally {
stage("Archive Container Logs") {
// Archive logs to containers_logs.txt
archive_logs(remote)
- if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) {
+ if ( ! currentBuild.result.equals('FAILURE') ) {
stage_archive = keep_artifacts
} else {
println ("Systest test failed, throwing error")
}
}
}
- }
+ } // if ( params.DO_ROBOT )
- // override to save the artifacts
if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
stage("Archive") {
sh "echo ${container_name} > build_version.txt"
ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
}
if ( params.DO_DOCKERPUSH ) {
- stage("Docker Push") {
- sh "make -C docker push INPUT_TAG=${container_name} TAG=${params.DOCKER_TAG}"
+ stage("Publish to Dockerhub") {
+ parallelSteps = [:]
+ for (buildStep in containerList) {
+ def module = buildStep
+ def moduleName = buildStep.toLowerCase()
+ def dockerTag = params.DOCKER_TAG
+ def moduleTag = container_name
+
+ parallelSteps[module] = {
+ dir("$module") {
+ sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}"
+ sh "docker push opensourcemano/${moduleName}:${dockerTag}"
+ }
+ }
+ }
+ parallel parallelSteps
}
stage("Snap promotion") {
sh "snapcraft release $snap $edge_rev $beta_track"
}
}
- }
- }
- }
+ } // stage("Snap promotion")
+ } // if ( params.DO_DOCKERPUSH )
+ } // stage("Archive")
+ } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
+ } // dir(OSM_DEVOPS)
+ } finally {
+ if ( params.DO_INSTALL && server_id != null) {
+ delete_vm = true
+ if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+ delete_vm = false
+ }
+ if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+ delete_vm = false
}
- }
- catch(Exception ex) {
- error = ex
- currentBuild.result = 'FAILURE'
- println("Caught error: "+ex)
- }
- finally {
- println("Entered finally block")
- if ( params.DO_INSTALL && server_id != null) {
- delete_vm = true
- if (error && params.SAVE_CONTAINER_ON_FAIL ) {
- delete_vm = false
- }
- if (!error && params.SAVE_CONTAINER_ON_PASS ) {
- delete_vm = false
- }
- if ( delete_vm ) {
- if (server_id != null) {
- println("Deleting VM: $server_id")
- sh """#!/bin/sh -e
- for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
- openstack server delete ${server_id}
- """
- } else {
- println("Saved VM $server_id in ETSI VIM")
- }
+ if ( delete_vm ) {
+ if (server_id != null) {
+ println("Deleting VM: $server_id")
+ sh """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server delete ${server_id}
+ """
+ } else {
+ println("Saved VM $server_id in ETSI VIM")
}
}
+ }
+ if ( http_server_name != null ) {
sh "docker stop ${http_server_name} || true"
sh "docker rm ${http_server_name} || true"
}
+
+ if ( devopstempdir != null ) {
+ sh "rm -rf ${devopstempdir}"
+ }
+
+ if ( jujutempdir != null ) {
+ sh "rm -rf ${jujutempdir}"
+ }
}
}