X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=jenkins%2Fci-pipelines%2Fci_stage_3.groovy;h=bc23e441b8468d42a8b05294ae4bfdc755ca40ce;hb=0062727f3c596aa3c4e87b4ea497bdf69b3dedd9;hp=153abcbf5be26572926efc7650046623d28328bf;hpb=2afad6d1c0bc3d7160192c9e71690a01838271d7;p=osm%2Fdevops.git diff --git a/jenkins/ci-pipelines/ci_stage_3.groovy b/jenkins/ci-pipelines/ci_stage_3.groovy index 153abcbf..bc23e441 100644 --- a/jenkins/ci-pipelines/ci_stage_3.groovy +++ b/jenkins/ci-pipelines/ci_stage_3.groovy @@ -1,7 +1,7 @@ -/* Copyright 2017 Sandvine +/* Copyright ETSI Contributors and Others * * All Rights Reserved. - * + * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at @@ -14,13 +14,6 @@ * License for the specific language governing permissions and limitations * under the License. */ - -/* Change log: - * 1. Bug 745 : Jayant Madavi, Mrityunjay Yadav : JM00553988@techmahindra.com : 23-july-2019 : Improvement to the code, typically we have 2 * or more branches whose build gets triggered, ex master & release branch, the previous code was removing any/all docker. - * Now removing previous docker of the same branch, so that the other branch failed docker should not be removed. It also - * acts as clean-up for previous docker remove failure. - * 2. Feature 7829 : Mrityunjay Yadav, Jayant Madavi: MY00514913@techmahindra.com : 19-Aug-2019 : Added a parameters & function to invoke Robot test. - */ properties([ parameters([ @@ -38,69 +31,124 @@ properties([ string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'), string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'), string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'), - string(defaultValue: 'releaseseven-daily', description: '', name: 'DOCKER_TAG'), - booleanParam(defaultValue: true, description: '', name: 'SAVE_CONTAINER_ON_FAIL'), + string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'), + booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'), booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'), booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'), - booleanParam(defaultValue: true, description: '', name: 'DO_STAGE_4'), booleanParam(defaultValue: true, description: '', name: 'DO_BUILD'), booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'), - booleanParam(defaultValue: true, description: '', name: 'DO_SMOKE'), booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'), booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'), string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'), - booleanParam(defaultValue: false, description: '', name: 'DO_ROBOT'), - string(defaultValue: 'sanity', description: 'smoke/vim/sanity/comprehensive are the options', name: 'TEST_NAME'), + booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'), + string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'), string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'), + string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'), + string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'), + string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'), + string(defaultValue: 'Default', description: '', name: 'INSTALLER'), + string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'), + string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'), ]) ]) -def uninstall_osm(stackName) { - sh """ - export OSM_USE_LOCAL_DEVOPS=true - export PATH=$PATH:/snap/bin - installers/full_install_osm.sh -y -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall - """ -} -def run_systest(stackName,tagName,testName,envfile=null) { +//////////////////////////////////////////////////////////////////////////////////////// +// Helper Functions +//////////////////////////////////////////////////////////////////////////////////////// +def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus_port=null,envfile=null,portmappingfile=null,kubeconfig=null,clouds=null,hostfile=null,jujuPassword=null,pass_th='0.0',unstable_th='0.0') { tempdir = sh(returnStdout: true, script: "mktemp -d").trim() if ( !envfile ) { sh(script: "touch ${tempdir}/env") envfile="${tempdir}/env" } - sh "docker run --network net${stackName} --env-file ${envfile} -v ${tempdir}:/usr/share/osm-devops/systest/reports opensourcemano/osmclient:${tagName} make -C /usr/share/osm-devops/systest ${testName}" - sh "cp ${tempdir}/* ." - junit '*.xml' + PROMETHEUS_PORT_VAR = "" + if ( prometheusPort != null) { + PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort + } + hostfilemount="" + if ( hostfile ) { + hostfilemount="-v "+hostfile+":/etc/hosts" + } + + JUJU_PASSWORD_VAR = "" + if ( jujuPassword != null) { + JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword + } + + try { + sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}" + } finally { + sh "cp ${tempdir}/* ." + outputDirectory = sh(returnStdout: true, script: "pwd").trim() + println ("Present Directory is : ${outputDirectory}") + step([ + $class : 'RobotPublisher', + outputPath : "${outputDirectory}", + outputFileName : "*.xml", + disableArchiveOutput : false, + reportFileName : "report.html", + logFileName : "log.html", + passThreshold : pass_th, + unstableThreshold: unstable_th, + otherFiles : "*.png", + ]) + } } -def run_robot_systest(stackName,tagName,testName,envfile=null) { - tempdir = sh(returnStdout: true, script: "mktemp -d").trim() - if ( !envfile ) - { - sh(script: "touch ${tempdir}/env") - envfile="${tempdir}/env" +def archive_logs(remote) { + + sshCommand remote: remote, command: '''mkdir -p logs''' + if (useCharmedInstaller) { + sshCommand remote: remote, command: ''' + for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do + logfile=`echo $container | cut -d- -f1` + echo "Extracting log for $logfile" + kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log + done + ''' + } else { + sshCommand remote: remote, command: ''' + for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do + echo "Extracting log for $deployment" + kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log + done + ''' + sshCommand remote: remote, command: ''' + for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do + echo "Extracting log for $statefulset" + kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log + done + ''' + } + + sh "rm -rf logs" + sshCommand remote: remote, command: '''ls -al logs''' + sshGet remote: remote, from: 'logs', into: '.', override: true + sh "cp logs/* ." + archiveArtifacts artifacts: '*.log' +} + +def get_value(key, output) { + for (String line : output.split( '\n' )) { + data = line.split( '\\|' ) + if (data.length > 1) { + if ( data[1].trim() == key ) { + return data[2].trim() + } + } } - sh "docker run --network net${stackName} --env-file ${envfile} -v ${tempdir}:/usr/share/osm-devops/robot-systest/reports opensourcemano/osmclient:${tagName} bash -C /usr/share/osm-devops/robot-systest/run_test.sh --do_install -t ${testName}" - sh "cp ${tempdir}/* ." - outputDirectory = sh(returnStdout: true, script: "pwd").trim() - println ("Present Directory is : ${outputDirectory}") - step([ - $class : 'RobotPublisher', - outputPath : "${outputDirectory}", - outputFileName : "*.xml", - disableArchiveOutput : false, - reportFileName : "report.html", - logFileName : "log.html", - passThreshold : 0, - unstableThreshold: 0, - otherFiles : "*.png", - ]) } +//////////////////////////////////////////////////////////////////////////////////////// +// Main Script +//////////////////////////////////////////////////////////////////////////////////////// node("${params.NODE}") { + INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/' + INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000' + SSH_KEY = '~/hive/cicd_rsa' sh 'env' tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"") @@ -127,113 +175,220 @@ node("${params.NODE}") { } container_name += "-${BUILD_NUMBER}" - // Copy the artifacts from the upstream jobs - stage("Copy Artifacts") { - // cleanup any previous repo - sh 'rm -rf repo' - dir("repo") { - // grab all stable upstream builds based on the - - dir("${RELEASE}") { - def list = ["RO", "openvim", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "LW-UI"] - for (component in list) { - step ([$class: 'CopyArtifact', - projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"]) + server_id = null + http_server_name = null + devopstempdir = null + useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed") + + try { + builtModules = [:] +/////////////////////////////////////////////////////////////////////////////////////// +// Fetch stage 2 .deb artifacts +/////////////////////////////////////////////////////////////////////////////////////// + stage("Copy Artifacts") { + // cleanup any previous repo + sh 'rm -rf repo' + dir("repo") { + packageList = [] + dir("${RELEASE}") { + RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim() + + // check if an upstream artifact based on specific build number has been requested + // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed + // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact) + def upstreamComponent="" + if ( params.UPSTREAM_JOB_NAME ) { + println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}") + + step ([$class: 'CopyArtifact', + projectName: "${params.UPSTREAM_JOB_NAME}", + selector: [$class: 'SpecificBuildSelector', + buildNumber: "${params.UPSTREAM_JOB_NUMBER}"] + ]) + + upstreamComponent = ci_helper.get_mdg_from_project( + ci_helper.get_env_value('build.env','GERRIT_PROJECT')) + def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER') + dir("$upstreamComponent") { + // the upstream job name contains suffix with the project. Need this stripped off + def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0] + def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER, + upstreamComponent, + GERRIT_BRANCH, + "${project_without_branch} :: ${GERRIT_BRANCH}", + buildNumber) + + packageList.addAll(packages) + println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}") + } + } - // grab the build name/number - //options = get_env_from_build('build.env') - build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER') + parallelSteps = [:] + def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"] + if (upstreamComponent.length()>0) { + println("Skipping upstream fetch of "+upstreamComponent) + list.remove(upstreamComponent) + } + for (buildStep in list) { + def component = buildStep + parallelSteps[component] = { + dir("$component") { + println("Fetching artifact for ${component}") + step ([$class: 'CopyArtifact', + projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"]) + + // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge) + def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER, + component, + GERRIT_BRANCH, + "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", + ci_helper.get_env_value('build.env','BUILD_NUMBER')) + packageList.addAll(packages) + println("Fetched ${component}: ${packages}") + sh "rm -rf dists" + } + } + } + parallel parallelSteps + +/////////////////////////////////////////////////////////////////////////////////////// +// Create Devops APT repository +/////////////////////////////////////////////////////////////////////////////////////// + sh "mkdir -p pool" + for (component in [ "devops", "IM", "osmclient" ]) { + sh "ls -al ${component}/pool/" + sh "cp -r ${component}/pool/* pool/" + sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*" + sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/" + sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" + sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" + } - // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge) - ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", build_num) + // create and sign the release file + sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release" + sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release" - // cleanup any prevously defined dists - sh "rm -rf dists" + // copy the public key into the release folder + // this pulls the key from the home dir of the current user (jenkins) + sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'" + sh "cp ~/${REPO_KEY_NAME} ." } - // check if an upstream artifact based on specific build number has been requested - // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed - // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact) - if ( params.UPSTREAM_JOB_NAME ) { - step ([$class: 'CopyArtifact', - projectName: "${params.UPSTREAM_JOB_NAME}", - selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"] - ]) - - //options = get_env_from_build('build.env') - // grab the build name/number - //build_num = sh(returnStdout:true, script: "cat build.env | awk -F= '/BUILD_NUMBER/{print \$2}'").trim() - build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER') - component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT')) - - // the upstream job name contains suffix with the project. Need this stripped off - def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0] + // start an apache server to serve up the packages + http_server_name = "${container_name}-apache" - // Remove the previous artifact for this component. Use the new upstream artifact - sh "rm -rf pool/${component}" + pwd = sh(returnStdout:true, script: 'pwd').trim() + repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim() + repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port) + NODE_IP_ADDRESS=sh(returnStdout: true, script: + "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim() + } - ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${project_without_branch} :: ${GERRIT_BRANCH}", build_num) + // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch + osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim() + devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim() + println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step") + sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}" + OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops" + // Convert URLs from stage 2 packages to arguments that can be passed to docker build + for (remotePath in packageList) { + packageName=remotePath.substring(remotePath.lastIndexOf('/')+1) + packageName=packageName.substring(0,packageName.indexOf('_')) + builtModules[packageName]=remotePath + } + } - sh "rm -rf dists" +/////////////////////////////////////////////////////////////////////////////////////// +// Build docker containers +/////////////////////////////////////////////////////////////////////////////////////// + dir(OSM_DEVOPS) { + def remote = [:] + error = null + if ( params.DO_BUILD ) { + withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry', + usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) { + sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}" } - - // sign all the components - for (component in list) { - sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*" + moduleBuildArgs = "" + for (packageName in builtModules.keySet()) { + envName=packageName.replaceAll("-","_").toUpperCase()+"_URL" + moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName] } - - // now create the distro - for (component in list) { - sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/" - sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" - sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages" + dir ("docker") { + stage("Build") { + containerList = sh(returnStdout: true, script: + "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'") + containerList=Arrays.asList(containerList.split("\n")) + print(containerList) + parallelSteps = [:] + for (buildStep in containerList) { + def module = buildStep + def moduleName = buildStep.toLowerCase() + def moduleTag = container_name + parallelSteps[module] = { + dir("$module") { + sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ." + println("Tagging ${moduleName}:${moduleTag}") + sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}" + sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}" + } + } + } + parallel parallelSteps + } } + } // if ( params.DO_BUILD ) - // create and sign the release file - sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release" - sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release" - - // copy the public key into the release folder - // this pulls the key from the home dir of the current user (jenkins) - sh "cp ~/${REPO_KEY_NAME} ." - - // merge the change logs - sh """ - rm -f changelog/changelog-osm.html - [ ! -d changelog ] || for mdgchange in \$(ls changelog); do cat changelog/\$mdgchange >> changelog/changelog-osm.html; done - """ - RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim() - } - // start an apache server to serve up the images - http_server_name = "${container_name}-apache" - - pwd = sh(returnStdout:true, script: 'pwd').trim() - repo_base_url = ci_helper.start_http_server(pwd,http_server_name) - } + if ( params.DO_INSTALL ) { +/////////////////////////////////////////////////////////////////////////////////////// +// Launch VM +/////////////////////////////////////////////////////////////////////////////////////// + stage("Spawn Remote VM") { + println("Launching new VM") + output=sh(returnStdout: true, script: """#!/bin/sh -e + for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done + openstack server create --flavor osm.sanity \ + --image ubuntu18.04 \ + --key-name CICD \ + --property build_url="${BUILD_URL}" \ + --nic net-id=osm-ext \ + ${container_name} + """).trim() + + server_id = get_value('id', output) + + if (server_id == null) { + println("VM launch output: ") + println(output) + throw new Exception("VM Launch failed") + } + println("Target VM is ${server_id}, waiting for IP address to be assigned") - // now pull the devops package and install in temporary location - tempdir = sh(returnStdout: true, script: "mktemp -d").trim() - osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim() - sh "dpkg -x ${osm_devops_dpkg} ${tempdir}" - OSM_DEVOPS="${tempdir}/usr/share/osm-devops" - } + IP_ADDRESS = "" - dir(OSM_DEVOPS) { - error = null - if ( params.DO_BUILD ) { - stage("Build") { - sh "make -C docker clean" - sh "make -C docker Q= CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}" - } - } + while (IP_ADDRESS == "") { + output=sh(returnStdout: true, script: """#!/bin/sh -e + for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done + openstack server show ${server_id} + """).trim() + IP_ADDRESS = get_value('addresses', output) + } + IP_ADDRESS = IP_ADDRESS.split('=')[1] + println("Waiting for VM at ${IP_ADDRESS} to be reachable") + + alive = false + while (! alive) { + output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim() + println("output is [$output]") + alive = output.contains("succeeded") + } + println("VM is ready and accepting ssh connections") + } // stage("Spawn Remote VM") - try { - if ( params.DO_INSTALL ) { +/////////////////////////////////////////////////////////////////////////////////////// +// Installation +/////////////////////////////////////////////////////////////////////////////////////// stage("Install") { - - //will by default always delete containers on complete - //sh "jenkins/system/delete_old_containers.sh ${container_name_prefix}" - commit_id = '' repo_distro = '' repo_key_name = '' @@ -258,74 +413,141 @@ node("${params.NODE}") { { release = "-R ${params.RELEASE}" } - + if ( params.REPOSITORY_BASE ) { repo_base_url = "-u ${params.REPOSITORY_BASE}" } - if ( params.DO_STAGE_4 ) { - try { - sh "docker stack list |grep \"${container_name_prefix}\"| awk '{ print \$1 }'| xargs docker stack rm" - } - catch (caughtError) { - println("Caught error: docker stack rm failed!") - } - } - sh """ - export PATH=$PATH:/snap/bin - installers/full_install_osm.sh -y -s ${container_name} --test --nolxd --nodocker --nojuju --nohostports --nohostclient \ - --nodockerbuild -t ${container_name} \ - -w /tmp/osm \ - ${commit_id} \ - ${repo_distro} \ - ${repo_base_url} \ - ${repo_key_name} \ - ${release} \ - ${params.BUILD_FROM_SOURCE} - """ - } - } + else + { + repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}" + } - stage_archive = false - if ( params.DO_SMOKE ) { + remote.name = container_name + remote.host = IP_ADDRESS + remote.user = 'ubuntu' + remote.identityFile = SSH_KEY + remote.allowAnyHosts = true + remote.logLevel = 'INFO' + remote.pty = true + + sshCommand remote: remote, command: """ + wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh + chmod +x ./install_osm.sh + sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc + """ + + if ( useCharmedInstaller ) { + // Use local proxy for docker hub + sshCommand remote: remote, command: ''' + sudo snap install microk8s --classic --channel=1.19/stable + sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml + sudo systemctl restart snap.microk8s.daemon-containerd.service + sudo snap alias microk8s.kubectl kubectl + ''' + + withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry', + usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) { + sshCommand remote: remote, command: """ + ./install_osm.sh -y \ + ${repo_base_url} \ + ${repo_key_name} \ + ${release} -r unstable \ + --charmed \ + --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \ + --tag ${container_name} + """ + } + prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io" + prometheusPort = 80 + osmHostname = "nbi."+IP_ADDRESS+".nip.io:443" + } else { + // Run -k8s installer here specifying internal docker registry and docker proxy + withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry', + usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) { + sshCommand remote: remote, command: """ + ./install_osm.sh -y \ + ${repo_base_url} \ + ${repo_key_name} \ + ${release} -r unstable \ + -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \ + -p ${INTERNAL_DOCKER_PROXY} \ + -t ${container_name} \ + --nocachelxdimages + """ + } + prometheusHostname = IP_ADDRESS + prometheusPort = 9091 + osmHostname = IP_ADDRESS + } + } // stage("Install") +/////////////////////////////////////////////////////////////////////////////////////// +// Health check of installed OSM in remote vm +/////////////////////////////////////////////////////////////////////////////////////// stage("OSM Health") { - sh "installers/osm_health.sh -s ${container_name}" - } - stage("Smoke") { - run_systest(container_name,container_name,"smoke") - // archive smoke success until stage_4 is ready + stackName = "osm" + sshCommand remote: remote, command: """ + /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName} + """ + } // stage("OSM Health") + } // if ( params.DO_INSTALL ) - if ( ! currentBuild.result.equals('UNSTABLE') ) { - stage_archive = keep_artifacts - } else { - error = new Exception("Smoke test failed") - currentBuild.result = 'FAILURE' - } - } - } - if ( params.DO_STAGE_4 ) { - // override stage_archive to only archive on stable - stage_archive = false - stage("System Integration Test") { - if ( params.DO_ROBOT ) { - run_robot_systest(container_name,container_name,params.TEST_NAME,params.ROBOT_VIM) - } //else { - run_systest(container_name,container_name,"openstack_stage_4",params.HIVE_VIM_1) - //} - - if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) { - stage_archive = keep_artifacts - } else { - println ("Systest test failed, throwing error") - error = new Exception("Systest test failed") - currentBuild.result = 'FAILURE' - throw error - } +/////////////////////////////////////////////////////////////////////////////////////// +// Execute Robot tests +/////////////////////////////////////////////////////////////////////////////////////// + stage_archive = false + if ( params.DO_ROBOT ) { + try { + stage("System Integration Test") { + if ( useCharmedInstaller ) { + tempdir = sh(returnStdout: true, script: "mktemp -d").trim() + sh(script: "touch ${tempdir}/hosts") + hostfile="${tempdir}/hosts" + sh """cat << EOF > ${hostfile} +127.0.0.1 localhost +${remote.host} prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io +EOF""" + } else { + hostfile=null + } + + jujuPassword=sshCommand remote: remote, command: """ + echo `juju gui 2>&1 | grep password | cut -d: -f2` + """ + + run_robot_systest( + container_name, + params.ROBOT_TAG_NAME, + osmHostname, + prometheusHostname, + prometheusPort, + params.ROBOT_VIM, + params.ROBOT_PORT_MAPPING_VIM, + params.KUBECONFIG, + params.CLOUDS, + hostfile, + jujuPassword, + params.ROBOT_PASS_THRESHOLD, + params.ROBOT_UNSTABLE_THRESHOLD + ) + } // stage("System Integration Test") + } finally { + stage("Archive Container Logs") { + // Archive logs to containers_logs.txt + archive_logs(remote) + if ( ! currentBuild.result.equals('FAILURE') ) { + stage_archive = keep_artifacts + } else { + println ("Systest test failed, throwing error") + error = new Exception("Systest test failed") + currentBuild.result = 'FAILURE' + throw error + } + } } - } + } // if ( params.DO_ROBOT ) - // override to save the artifacts if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) { stage("Archive") { sh "echo ${container_name} > build_version.txt" @@ -336,36 +558,85 @@ node("${params.NODE}") { ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested') } if ( params.DO_DOCKERPUSH ) { - stage("Docker Push") { - sh "make -C docker push INPUT_TAG=${container_name} TAG=${params.DOCKER_TAG}" + stage("Publish to Dockerhub") { + parallelSteps = [:] + for (buildStep in containerList) { + def module = buildStep + def moduleName = buildStep.toLowerCase() + def dockerTag = params.DOCKER_TAG + def moduleTag = container_name + + parallelSteps[module] = { + dir("$module") { + sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}" + sh "docker push opensourcemano/${moduleName}:${dockerTag}" + } + } + } + parallel parallelSteps } - } + + stage("Snap promotion") { + def snaps = ["osmclient"] + sh "snapcraft login --with ~/.snapcraft/config" + for (snap in snaps) { + channel="latest/" + if (BRANCH_NAME.startsWith("v")) { + channel=BRANCH_NAME.substring(1)+"/" + } else if (BRANCH_NAME!="master") { + channel+="/"+BRANCH_NAME.replaceAll('/','-') + } + track=channel+"edge\\*" + edge_rev=sh(returnStdout: true, + script: "snapcraft revisions $snap | " + + "grep \"$track\" | tail -1 | awk '{print \$1}'").trim() + print "edge rev is $edge_rev" + track=channel+"beta\\*" + beta_rev=sh(returnStdout: true, + script: "snapcraft revisions $snap | " + + "grep \"$track\" | tail -1 | awk '{print \$1}'").trim() + print "beta rev is $beta_rev" + + if ( edge_rev != beta_rev ) { + print "Promoting $edge_rev to beta in place of $beta_rev" + beta_track=channel+"beta" + sh "snapcraft release $snap $edge_rev $beta_track" + } + } + } // stage("Snap promotion") + } // if ( params.DO_DOCKERPUSH ) + } // stage("Archive") + } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) + } // dir(OSM_DEVOPS) + } finally { + if ( params.DO_INSTALL && server_id != null) { + delete_vm = true + if (error && params.SAVE_CONTAINER_ON_FAIL ) { + delete_vm = false + } + if (!error && params.SAVE_CONTAINER_ON_PASS ) { + delete_vm = false + } + + if ( delete_vm ) { + if (server_id != null) { + println("Deleting VM: $server_id") + sh """#!/bin/sh -e + for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done + openstack server delete ${server_id} + """ + } else { + println("Saved VM $server_id in ETSI VIM") } } } - catch(Exception ex) { - error = ex - currentBuild.result = 'FAILURE' - println("Caught error") - println(ex.getMessage()) + if ( http_server_name != null ) { + sh "docker stop ${http_server_name} || true" + sh "docker rm ${http_server_name} || true" } - finally { - if ( params.DO_INSTALL ) { - if (error) { - if ( !params.SAVE_CONTAINER_ON_FAIL ) { - uninstall_osm container_name - sh "docker stop ${http_server_name}" - sh "docker rm ${http_server_name}" - } - } - else { - if ( !params.SAVE_CONTAINER_ON_PASS ) { - uninstall_osm container_name - sh "docker stop ${http_server_name}" - sh "docker rm ${http_server_name}" - } - } - } + + if ( devopstempdir != null ) { + sh "rm -rf ${devopstempdir}" } } }