-/* Copyright 2017 Sandvine
+/* Copyright ETSI Contributors and Others
*
* All Rights Reserved.
- *
+ *
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
string(defaultValue: 'release', description: '', name: 'RELEASE'),
string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
- string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
- string(defaultValue: 'dpkg1', description: '', name: 'GPG_KEY_NAME'),
+ string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
+ string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
+ string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
- booleanParam(defaultValue: false, description: '', name: 'DO_STAGE_4'),
booleanParam(defaultValue: true, description: '', name: 'DO_BUILD'),
booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
- booleanParam(defaultValue: true, description: '', name: 'DO_SMOKE'),
+ booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
+ string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
+ booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
+ string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
+ name: 'ROBOT_TAG_NAME'),
+ string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
+ string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
+ description: 'Port mapping file for SDN assist in ETSI VIM',
+ name: 'ROBOT_PORT_MAPPING_VIM'),
+ string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
+ string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
+ string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
+ string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
+ name: 'ROBOT_PASS_THRESHOLD'),
+ string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
+ '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
])
])
-def uninstall_osm(stackName) {
- sh """
- export OSM_USE_LOCAL_DEVOPS=true
- export PATH=$PATH:/snap/bin
- installers/full_install_osm.sh -y -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall
- """
+////////////////////////////////////////////////////////////////////////////////////////
+// Helper Functions
+////////////////////////////////////////////////////////////////////////////////////////
+void run_robot_systest(String tagName,
+ String testName,
+ String osmHostname,
+ String prometheusHostname,
+ Integer prometheusPort=null,
+ String envfile=null,
+ String portmappingfile=null,
+ String kubeconfig=null,
+ String clouds=null,
+ String hostfile=null,
+ String jujuPassword=null,
+ String osmRSAfile=null,
+ String passThreshold='0.0',
+ String unstableThreshold='0.0') {
+ tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+ String environmentFile = ''
+ if (envfile) {
+ environmentFile = envfile
+ } else {
+ sh(script: "touch ${tempdir}/env")
+ environmentFile = "${tempdir}/env"
+ }
+ PROMETHEUS_PORT_VAR = ''
+ if (prometheusPort != null) {
+ PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
+ }
+ hostfilemount = ''
+ if (hostfile) {
+ hostfilemount = "-v ${hostfile}:/etc/hosts"
+ }
+
+ JUJU_PASSWORD_VAR = ''
+ if (jujuPassword != null) {
+ JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
+ }
+
+ try {
+ sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
+ ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
+ -v ${clouds}:/etc/openstack/clouds.yaml \
+ -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
+ -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
+ -c -t ${testName}""")
+ } finally {
+ sh("cp ${tempdir}/*.xml .")
+ sh("cp ${tempdir}/*.html .")
+ outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
+ println("Present Directory is : ${outputDirectory}")
+ step([
+ $class : 'RobotPublisher',
+ outputPath : "${outputDirectory}",
+ outputFileName : '*.xml',
+ disableArchiveOutput : false,
+ reportFileName : 'report.html',
+ logFileName : 'log.html',
+ passThreshold : passThreshold,
+ unstableThreshold: unstableThreshold,
+ otherFiles : '*.png',
+ ])
+ }
}
-def run_systest(stackName,tagName,testName) {
- tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
- sh "docker run --network net${stackName} -v ${tempdir}:/usr/share/osm-devops/systest/reports osm/osmclient:${tagName} make -C /usr/share/osm-devops/systest ${testName}"
- sh "cp ${tempdir}/* ."
- junit '*.xml'
+void archive_logs(Map remote) {
+
+ sshCommand remote: remote, command: '''mkdir -p logs'''
+ if (useCharmedInstaller) {
+ sshCommand remote: remote, command: '''
+ for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+ logfile=`echo $container | cut -d- -f1`
+ echo "Extracting log for $logfile"
+ kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
+ done
+ '''
+ } else {
+ sshCommand remote: remote, command: '''
+ for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+ echo "Extracting log for $deployment"
+ kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
+ > logs/$deployment.log
+ done
+ '''
+ sshCommand remote: remote, command: '''
+ for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+ echo "Extracting log for $statefulset"
+ kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
+ > logs/$statefulset.log
+ done
+ '''
+ }
+
+ sh 'rm -rf logs'
+ sshCommand remote: remote, command: '''ls -al logs'''
+ sshGet remote: remote, from: 'logs', into: '.', override: true
+ sh 'cp logs/* .'
+ archiveArtifacts artifacts: '*.log'
+}
+
+String get_value(String key, String output) {
+ for (String line : output.split( '\n' )) {
+ data = line.split( '\\|' )
+ if (data.length > 1) {
+ if ( data[1].trim() == key ) {
+ return data[2].trim()
+ }
+ }
+ }
}
+////////////////////////////////////////////////////////////////////////////////////////
+// Main Script
+////////////////////////////////////////////////////////////////////////////////////////
node("${params.NODE}") {
+ INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
+ INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
+ APT_PROXY = 'http://172.21.1.1:3142'
+ SSH_KEY = '~/hive/cicd_rsa'
+ ARCHIVE_LOGS_FLAG = false
sh 'env'
- tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
+ tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
- stage("Checkout") {
+ stage('Checkout') {
checkout scm
}
- ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
+ ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
- def upstream_main_job = params.UPSTREAM_SUFFIX
+ def upstreamMainJob = params.UPSTREAM_SUFFIX
// upstream jobs always use merged artifacts
- upstream_main_job += '-merge'
- container_name_prefix = "osm-${tag_or_branch}"
- container_name = "${container_name_prefix}"
- if ( JOB_NAME.contains('merge') ) {
- container_name += "-merge"
- }
- container_name += "-${BUILD_NUMBER}"
-
- // Copy the artifacts from the upstream jobs
- stage("Copy Artifacts") {
- // cleanup any previous repo
- sh 'rm -rf repo'
- dir("repo") {
- // grab all stable upstream builds based on the
-
- dir("${RELEASE}") {
- def list = ["RO", "openvim", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL"]
- for (component in list) {
- step ([$class: 'CopyArtifact',
- projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
-
- // grab the build name/number
- //options = get_env_from_build('build.env')
- build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
-
- // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
- ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${component}${upstream_main_job} :: ${GERRIT_BRANCH}", build_num)
-
- // cleanup any prevously defined dists
- sh "rm -rf dists"
- }
-
- // check if an upstream artifact based on specific build number has been requested
- // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
- // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
- if ( params.UPSTREAM_JOB_NAME ) {
- step ([$class: 'CopyArtifact',
- projectName: "${params.UPSTREAM_JOB_NAME}",
- selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
- ])
+ upstreamMainJob += '-merge'
+ containerNamePrefix = "osm-${tag_or_branch}"
+ containerName = "${containerNamePrefix}"
- //options = get_env_from_build('build.env')
- // grab the build name/number
- //build_num = sh(returnStdout:true, script: "cat build.env | awk -F= '/BUILD_NUMBER/{print \$2}'").trim()
- build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
- component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
+ keep_artifacts = false
+ if ( JOB_NAME.contains('merge') ) {
+ containerName += '-merge'
- // the upstream job name contains suffix with the project. Need this stripped off
- def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
+ // On a merge job, we keep artifacts on smoke success
+ keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
+ }
+ containerName += "-${BUILD_NUMBER}"
+
+ server_id = null
+ http_server_name = null
+ devopstempdir = null
+ useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
+
+ try {
+ builtModules = [:]
+///////////////////////////////////////////////////////////////////////////////////////
+// Fetch stage 2 .deb artifacts
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Copy Artifacts') {
+ // cleanup any previous repo
+ sh "tree -fD repo || exit 0"
+ sh 'rm -rvf repo'
+ sh "tree -fD repo && lsof repo || exit 0"
+ dir('repo') {
+ packageList = []
+ dir("${RELEASE}") {
+ RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim()
+
+ // check if an upstream artifact based on specific build number has been requested
+ // This is the case of a merge build and the upstream merge build is not yet complete
+ // (it is not deemed a successful build yet). The upstream job is calling this downstream
+ // job (with the its build artifact)
+ def upstreamComponent = ''
+ if (params.UPSTREAM_JOB_NAME) {
+ println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
+ lock('Artifactory') {
+ step ([$class: 'CopyArtifact',
+ projectName: "${params.UPSTREAM_JOB_NAME}",
+ selector: [$class: 'SpecificBuildSelector',
+ buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
+ ])
+
+ upstreamComponent = ci_helper.get_mdg_from_project(
+ ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
+ def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
+ dir("$upstreamComponent") {
+ // the upstream job name contains suffix with the project. Need this stripped off
+ project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
+ packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+ upstreamComponent,
+ GERRIT_BRANCH,
+ "${project_without_branch} :: ${GERRIT_BRANCH}",
+ buildNumber)
+
+ packageList.addAll(packages)
+ println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
+ }
+ } // lock artifactory
+ }
- // Remove the previous artifact for this component. Use the new upstream artifact
- sh "rm -rf pool/${component}"
+ parallelSteps = [:]
+ list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
+ 'common', 'LCM', 'POL', 'NG-UI', 'NG-SA', 'PLA', 'tests']
+ if (upstreamComponent.length() > 0) {
+ println("Skipping upstream fetch of ${upstreamComponent}")
+ list.remove(upstreamComponent)
+ }
+ for (buildStep in list) {
+ def component = buildStep
+ parallelSteps[component] = {
+ dir("$component") {
+ println("Fetching artifact for ${component}")
+ step([$class: 'CopyArtifact',
+ projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
+
+ // grab the archives from the stage_2 builds
+ // (ie. this will be the artifacts stored based on a merge)
+ packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+ component,
+ GERRIT_BRANCH,
+ "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
+ ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
+ packageList.addAll(packages)
+ println("Fetched ${component}: ${packages}")
+ sh 'rm -rf dists'
+ }
+ }
+ }
+ lock('Artifactory') {
+ parallel parallelSteps
+ }
- ci_helper.get_archive(params.ARTIFACTORY_SERVER,component,GERRIT_BRANCH, "${project_without_branch} :: ${GERRIT_BRANCH}", build_num)
+///////////////////////////////////////////////////////////////////////////////////////
+// Create Devops APT repository
+///////////////////////////////////////////////////////////////////////////////////////
+ sh 'mkdir -p pool'
+ for (component in [ 'devops', 'IM', 'osmclient' ]) {
+ sh "ls -al ${component}/pool/"
+ sh "cp -r ${component}/pool/* pool/"
+ sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
+ sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
+ sh("""apt-ftparchive packages pool/${component} \
+ > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
+ sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
+ }
- sh "rm -rf dists"
- }
-
- // sign all the components
- for (component in list) {
- sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
- }
+ // create and sign the release file
+ sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
+ sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
+ -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
- // now create the distro
- for (component in list) {
- sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
- sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
- sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
+ // copy the public key into the release folder
+ // this pulls the key from the home dir of the current user (jenkins)
+ sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
+ sh "cp ~/${REPO_KEY_NAME} ."
}
- // create and sign the release file
- sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
- sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
-
- // copy the public key into the release folder
- // this pulls the key from the home dir of the current user (jenkins)
- sh "cp ~/${REPO_KEY_NAME} ."
-
- // merge the change logs
- sh """
- rm -f changelog/changelog-osm.html
- [ ! -d changelog ] || for mdgchange in \$(ls changelog); do cat changelog/\$mdgchange >> changelog/changelog-osm.html; done
- """
- RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim()
+ // start an apache server to serve up the packages
+ http_server_name = "${containerName}-apache"
+
+ pwd = sh(returnStdout:true, script: 'pwd').trim()
+ repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
+ 'print(s.getsockname()[1]); s.close()\');',
+ returnStdout: true).trim()
+ internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
+ NODE_IP_ADDRESS = sh(returnStdout: true, script:
+ "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
+ ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
}
- // start an apache server to serve up the images
- http_server_name = "${container_name}-apache"
-
- pwd = sh(returnStdout:true, script: 'pwd').trim()
- repo_base_url = ci_helper.start_http_server(pwd,http_server_name)
- }
-
- // now pull the devops package and install in temporary location
- tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
- osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim()
- sh "dpkg -x ${osm_devops_dpkg} ${tempdir}"
- OSM_DEVOPS="${tempdir}/usr/share/osm-devops"
- }
- dir(OSM_DEVOPS) {
- error = null
- if ( params.DO_BUILD ) {
- stage("Build") {
- sh "make -C docker clean"
- sh "make -j4 -C docker CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}"
+ sh "tree -fD repo"
+
+ // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
+ osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
+ devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+ println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
+ sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
+ OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
+ // Convert URLs from stage 2 packages to arguments that can be passed to docker build
+ for (remotePath in packageList) {
+ packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
+ packageName = packageName[0 .. packageName.indexOf('_') - 1]
+ builtModules[packageName] = remotePath
}
}
- try {
- if ( params.DO_INSTALL ) {
- stage("Install") {
+///////////////////////////////////////////////////////////////////////////////////////
+// Build docker containers
+///////////////////////////////////////////////////////////////////////////////////////
+ dir(OSM_DEVOPS) {
+ Map remote = [:]
+ error = null
+ if ( params.DO_BUILD ) {
+ withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
+ }
+ datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
+ moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
+ for (packageName in builtModules.keySet()) {
+ envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
+ moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
+ }
+ dir('docker') {
+ stage('Build') {
+ containerList = sh(returnStdout: true, script:
+ "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
+ containerList = Arrays.asList(containerList.split('\n'))
+ print(containerList)
+ parallelSteps = [:]
+ for (buildStep in containerList) {
+ def module = buildStep
+ def moduleName = buildStep.toLowerCase()
+ def moduleTag = containerName
+ parallelSteps[module] = {
+ dir("$module") {
+ sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
+ -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
+ println("Tagging ${moduleName}:${moduleTag}")
+ sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
+ ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
+ sh("""docker push \
+ ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
+ }
+ }
+ }
+ parallel parallelSteps
+ }
+ }
+ } // if (params.DO_BUILD)
+
+ if (params.DO_INSTALL) {
+///////////////////////////////////////////////////////////////////////////////////////
+// Launch VM
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Spawn Remote VM') {
+ println('Launching new VM')
+ output = sh(returnStdout: true, script: """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server create --flavor osm.sanity \
+ --image ${OPENSTACK_BASE_IMAGE} \
+ --key-name CICD \
+ --property build_url="${BUILD_URL}" \
+ --nic net-id=osm-ext \
+ ${containerName}
+ """).trim()
+
+ server_id = get_value('id', output)
+
+ if (server_id == null) {
+ println('VM launch output: ')
+ println(output)
+ throw new Exception('VM Launch failed')
+ }
+ println("Target VM is ${server_id}, waiting for IP address to be assigned")
- //will by default always delete containers on complete
- //sh "jenkins/system/delete_old_containers.sh ${container_name_prefix}"
+ IP_ADDRESS = ''
+ while (IP_ADDRESS == '') {
+ output = sh(returnStdout: true, script: """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server show ${server_id}
+ """).trim()
+ IP_ADDRESS = get_value('addresses', output)
+ }
+ IP_ADDRESS = IP_ADDRESS.split('=')[1]
+ println("Waiting for VM at ${IP_ADDRESS} to be reachable")
+
+ alive = false
+ timeout(time: 1, unit: 'MINUTES') {
+ while (!alive) {
+ output = sh(
+ returnStatus: true,
+ script: "ssh -T -i ${SSH_KEY} " +
+ "-o StrictHostKeyChecking=no " +
+ "-o UserKnownHostsFile=/dev/null " +
+ "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
+ alive = (output == 0)
+ }
+ }
+ println('VM is ready and accepting ssh connections')
+ } // stage("Spawn Remote VM")
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Checks before installation
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Checks before installation') {
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+
+ // Ensure the VM is ready
+ sshCommand remote: remote, command: 'cloud-init status --wait'
+ // Force time sync to avoid clock drift and invalid certificates
+ sshCommand remote: remote, command: 'sudo apt-get update'
+ sshCommand remote: remote, command: 'sudo apt-get install -y chrony'
+ sshCommand remote: remote, command: 'sudo service chrony stop'
+ sshCommand remote: remote, command: 'sudo chronyd -vq'
+ sshCommand remote: remote, command: 'sudo service chrony start'
+
+ } // stage("Checks before installation")
+///////////////////////////////////////////////////////////////////////////////////////
+// Installation
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Install') {
commit_id = ''
repo_distro = ''
repo_key_name = ''
release = ''
- if ( params.COMMIT_ID )
- {
+ if (params.COMMIT_ID) {
commit_id = "-b ${params.COMMIT_ID}"
}
-
- if ( params.REPO_DISTRO )
- {
+ if (params.REPO_DISTRO) {
repo_distro = "-r ${params.REPO_DISTRO}"
}
-
- if ( params.REPO_KEY_NAME )
- {
+ if (params.REPO_KEY_NAME) {
repo_key_name = "-k ${params.REPO_KEY_NAME}"
}
-
- if ( params.RELEASE )
- {
+ if (params.RELEASE) {
release = "-R ${params.RELEASE}"
}
-
- sh """
- export PATH=$PATH:/snap/bin
- installers/full_install_osm.sh -y -s ${container_name} --test --nolxd --nodocker --nojuju --nohostports --nohostclient \
- --nodockerbuild -t ${container_name} \
- -w /tmp/osm \
- ${commit_id} \
- ${repo_distro} \
- ${repo_base_url} \
- ${repo_key_name} \
- ${release} \
- ${params.BUILD_FROM_SOURCE}
- """
- }
- }
-
- stage_archive = false
- if ( params.DO_SMOKE ) {
- stage("OSM Health") {
- sh "installers/osm_health.sh -s ${container_name}"
- }
- stage("Smoke") {
- run_systest(container_name,container_name,"smoke")
- // archive smoke success until stage_4 is ready
-
- if ( ! currentBuild.result.equals('UNSTABLE') ) {
- stage_archive = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
+ if (params.REPOSITORY_BASE) {
+ repo_base_url = "-u ${params.REPOSITORY_BASE}"
+ } else {
+ repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
}
- }
- }
- if ( params.DO_STAGE_4 ) {
- stage("stage_4") {
- def downstream_params = [
- string(name: 'CONTAINER_NAME', value: container_name),
- string(name: 'NODE', value: NODE_NAME.split()[0]),
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
]
- stage_4_result = build job: "${params.DOWNSTREAM_STAGE_NAME}/${GERRIT_BRANCH}", parameters: downstream_params, propagate: false
- currentBuild.result = stage_4_result.result
- if ( stage_4_result.getResult().equals('SUCCESS') ) {
- stage_archive = true;
+ sshCommand remote: remote, command: '''
+ wget https://osm-download.etsi.org/ftp/osm-13.0-thirteen/install_osm.sh
+ chmod +x ./install_osm.sh
+ sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
+ '''
+
+ Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
+ credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME',
+ passwordVariable: 'PASSWORD']
+ if (useCharmedInstaller) {
+ // Use local proxy for docker hub
+ sshCommand remote: remote, command: '''
+ sudo snap install microk8s --classic --channel=1.19/stable
+ sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
+ /var/snap/microk8s/current/args/containerd-template.toml
+ sudo systemctl restart snap.microk8s.daemon-containerd.service
+ sudo snap alias microk8s.kubectl kubectl
+ '''
+
+ withCredentials([gitlabCredentialsMap]) {
+ sshCommand remote: remote, command: """
+ ./install_osm.sh -y \
+ ${repo_base_url} \
+ ${repo_key_name} \
+ ${release} -r unstable \
+ --charmed \
+ --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+ --tag ${containerName}
+ """
+ }
+ prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
+ prometheusPort = 80
+ osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
+ } else {
+ // Run -k8s installer here specifying internal docker registry and docker proxy
+ withCredentials([gitlabCredentialsMap]) {
+ sshCommand remote: remote, command: """
+ ./install_osm.sh -y \
+ ${repo_base_url} \
+ ${repo_key_name} \
+ ${release} -r unstable \
+ -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+ -p ${INTERNAL_DOCKER_PROXY} \
+ -t ${containerName}
+ """
+ }
+ prometheusHostname = IP_ADDRESS
+ prometheusPort = 9091
+ osmHostname = IP_ADDRESS
+ }
+ } // stage("Install")
+///////////////////////////////////////////////////////////////////////////////////////
+// Health check of installed OSM in remote vm
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('OSM Health') {
+ // if this point is reached, logs should be archived
+ ARCHIVE_LOGS_FLAG = true
+ stackName = 'osm'
+ sshCommand remote: remote, command: """
+ /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
+ """
+ } // stage("OSM Health")
+ } // if ( params.DO_INSTALL )
+
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Execute Robot tests
+///////////////////////////////////////////////////////////////////////////////////////
+ stage_archive = false
+ if ( params.DO_ROBOT ) {
+ try {
+ stage('System Integration Test') {
+ if (useCharmedInstaller) {
+ tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
+ sh(script: "touch ${tempdir}/hosts")
+ hostfile = "${tempdir}/hosts"
+ sh """cat << EOF > ${hostfile}
+127.0.0.1 localhost
+${remote.host} prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
+EOF"""
+ } else {
+ hostfile = null
+ }
+
+ jujuPassword = sshCommand remote: remote, command: '''
+ echo `juju gui 2>&1 | grep password | cut -d: -f2`
+ '''
+
+ run_robot_systest(
+ containerName,
+ params.ROBOT_TAG_NAME,
+ osmHostname,
+ prometheusHostname,
+ prometheusPort,
+ params.ROBOT_VIM,
+ params.ROBOT_PORT_MAPPING_VIM,
+ params.KUBECONFIG,
+ params.CLOUDS,
+ hostfile,
+ jujuPassword,
+ SSH_KEY,
+ params.ROBOT_PASS_THRESHOLD,
+ params.ROBOT_UNSTABLE_THRESHOLD
+ )
+ } // stage("System Integration Test")
+ } finally {
+ stage('After System Integration test') {
+ if (currentBuild.result != 'FAILURE') {
+ stage_archive = keep_artifacts
+ } else {
+ println('Systest test failed, throwing error')
+ error = new Exception('Systest test failed')
+ currentBuild.result = 'FAILURE'
+ throw error
+ }
}
}
- }
-
- // override to save the artifacts
- if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
- stage("Archive") {
- sh "echo ${container_name} > build_version.txt"
- archiveArtifacts artifacts: "build_version.txt", fingerprint: true
+ } // if ( params.DO_ROBOT )
+ if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
+ stage('Archive') {
// Archive the tested repo
dir("${RELEASE_DIR}") {
- ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
+ ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
}
+ if (params.DO_DOCKERPUSH) {
+ stage('Publish to Dockerhub') {
+ parallelSteps = [:]
+ for (buildStep in containerList) {
+ def module = buildStep
+ def moduleName = buildStep.toLowerCase()
+ def dockerTag = params.DOCKER_TAG
+ def moduleTag = containerName
+
+ parallelSteps[module] = {
+ dir("$module") {
+ sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
+ sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
+ opensourcemano/${moduleName}:${dockerTag}""")
+ sh "docker push opensourcemano/${moduleName}:${dockerTag}"
+ }
+ }
+ }
+ parallel parallelSteps
+ }
+ stage('Snap promotion') {
+ withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) {
+ snaps = ['osmclient']
+ for (snap in snaps) {
+ channel = 'latest/'
+ if (BRANCH_NAME.startsWith('v')) {
+ channel = BRANCH_NAME.substring(1) + '/'
+ } else if (BRANCH_NAME != 'master') {
+ channel += '/' + BRANCH_NAME.replaceAll('/', '-')
+ }
+ track = channel + 'edge\\*'
+ edge_rev = sh(returnStdout: true,
+ script: "snapcraft revisions $snap | " +
+ "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+ track = channel + 'beta\\*'
+ beta_rev = sh(returnStdout: true,
+ script: "snapcraft revisions $snap | " +
+ "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+
+ print "Edge: $edge_rev, Beta: $beta_rev"
+
+ if (edge_rev != beta_rev) {
+ print "Promoting $edge_rev to beta in place of $beta_rev"
+ beta_track = channel + 'beta'
+ sh "snapcraft release $snap $edge_rev $beta_track"
+ }
+ }
+ }
+ } // stage('Snap promotion')
+ stage('Charm promotion') {
+ charms = [
+ 'osm', // bundle
+ 'osm-ha', // bundle
+ 'osm-grafana',
+ 'osm-mariadb',
+ 'mongodb-exporter-k8s',
+ 'mysqld-exporter-k8s',
+ 'osm-lcm',
+ 'osm-mon',
+ 'osm-nbi',
+ 'osm-ng-ui',
+ 'osm-pol',
+ 'osm-ro',
+ 'osm-prometheus',
+ 'osm-vca-integrator',
+ ]
+ for (charm in charms) {
+
+ channel = 'latest'
+ if (BRANCH_NAME.startsWith('v')) {
+ channel = BRANCH_NAME.substring(1)
+ } else if (BRANCH_NAME != 'master') {
+ channel += '/' + BRANCH_NAME.replaceAll('/', '-')
+ }
+
+ withCredentials([string(credentialsId: 'Charmstore', variable: 'CHARMCRAFT_AUTH')]) {
+ sh "charmcraft status $charm --format json > ${charm}.json"
+ isCharm = sh(returnStdout: true, script: "grep architecture ${charm}.json | wc -l").trim() as int
+ resourceArgument = ""
+ if (isCharm) {
+ jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
+ jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
+ index=0
+ while (index < 5) {
+ resourceNameScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].name'|head -1"
+ resourceRevsScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].revision'|head -1"
+ resourceName = sh(returnStdout: true, script: resourceNameScript).trim()
+ resourceRevs = sh(returnStdout: true, script: resourceRevsScript).trim()
+ if (resourceName != "null") {
+ resourceArgument += " --resource ${resourceName}:${resourceRevs}"
+ } else {
+ break
+ }
+ index ++
+ }
+ } else {
+ jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
+ jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
+ }
+ // edge/merged is used in place of /edge as 10.1.0 LTS uses latest/edge
+ edge_rev = sh(returnStdout: true, script: jqScriptEdge).trim()
+ beta_rev = sh(returnStdout: true, script: jqScriptBeta).trim()
+ try { edge_rev = edge_rev as int } catch (NumberFormatException nfe) {edge_rev = 0}
+ try { beta_rev = beta_rev as int } catch (NumberFormatException nfe) {beta_rev = 0}
+
+ print "Edge: $edge_rev, Beta: $beta_rev $resourceArgument"
+
+ if (edge_rev > beta_rev) {
+ print "Promoting $edge_rev to beta in place of $beta_rev"
+ beta_track = channel + 'beta'
+ sh "charmcraft release ${charm} --revision=${edge_rev} ${resourceArgument} --channel=${channel}/beta"
+ }
+
+ }
+ }
+ } // stage('Charm promotion')
+ } // if (params.DO_DOCKERPUSH)
+ } // stage('Archive')
+ } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
+ } // dir(OSM_DEVOPS)
+ } finally {
+ stage('Archive Container Logs') {
+ if ( ARCHIVE_LOGS_FLAG ) {
+ try {
+ // Archive logs
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+ println('Archiving container logs')
+ archive_logs(remote)
+ } catch (Exception e) {
+ println('Error fetching logs: '+ e.getMessage())
}
- }
- }
- catch(caughtError) {
- println("Caught error!")
- error = caughtError
- currentBuild.result = 'FAILURE'
+ } // end if ( ARCHIVE_LOGS_FLAG )
}
- finally {
- sh "docker stop ${http_server_name}"
- sh "docker rm ${http_server_name}"
-
- if ( params.DO_INSTALL ) {
- if (error) {
- if ( !params.SAVE_CONTAINER_ON_FAIL ) {
- uninstall_osm container_name
- }
- throw error
+ stage('Cleanup') {
+ if ( params.DO_INSTALL && server_id != null) {
+ delete_vm = true
+ if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+ delete_vm = false
+ }
+ if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+ delete_vm = false
}
- else {
- if ( !params.SAVE_CONTAINER_ON_PASS ) {
- uninstall_osm container_name
+
+ if ( delete_vm ) {
+ if (server_id != null) {
+ println("Deleting VM: $server_id")
+ sh """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server delete ${server_id}
+ """
+ } else {
+ println("Saved VM $server_id in ETSI VIM")
}
}
}
+ if ( http_server_name != null ) {
+ sh "docker stop ${http_server_name} || true"
+ sh "docker rm ${http_server_name} || true"
+ }
+
+ if ( devopstempdir != null ) {
+ sh "rm -rf ${devopstempdir}"
+ }
}
}
}