+///////////////////////////////////////////////////////////////////////////////////////
+// Build docker containers
+///////////////////////////////////////////////////////////////////////////////////////
+ dir(OSM_DEVOPS) {
+ def remote = [:]
+ error = null
+ if ( params.DO_BUILD ) {
+ withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
+ }
+ datetime = sh(returnStdout: true, script: "date +%Y-%m-%d:%H:%M:%S").trim()
+ moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
+ for (packageName in builtModules.keySet()) {
+ envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
+ moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
+ }
+ dir ("docker") {
+ stage("Build") {
+ containerList = sh(returnStdout: true, script:
+ "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
+ containerList=Arrays.asList(containerList.split("\n"))
+ print(containerList)
+ parallelSteps = [:]
+ for (buildStep in containerList) {
+ def module = buildStep
+ def moduleName = buildStep.toLowerCase()
+ def moduleTag = container_name
+ parallelSteps[module] = {
+ dir("$module") {
+ sh "docker build --build-arg APT_PROXY=${APT_PROXY} -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
+ println("Tagging ${moduleName}:${moduleTag}")
+ sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
+ sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
+ }
+ }
+ }
+ parallel parallelSteps
+ }
+ }
+ } // if ( params.DO_BUILD )
+
+ if ( params.DO_INSTALL ) {
+///////////////////////////////////////////////////////////////////////////////////////
+// Launch VM
+///////////////////////////////////////////////////////////////////////////////////////
+ stage("Spawn Remote VM") {
+ println("Launching new VM")
+ output=sh(returnStdout: true, script: """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server create --flavor osm.sanity \
+ --image ${OPENSTACK_BASE_IMAGE} \
+ --key-name CICD \
+ --property build_url="${BUILD_URL}" \
+ --nic net-id=osm-ext \
+ ${container_name}
+ """).trim()
+
+ server_id = get_value('id', output)
+
+ if (server_id == null) {
+ println("VM launch output: ")
+ println(output)
+ throw new Exception("VM Launch failed")
+ }
+ println("Target VM is ${server_id}, waiting for IP address to be assigned")
+
+ IP_ADDRESS = ""
+
+ while (IP_ADDRESS == "") {
+ output=sh(returnStdout: true, script: """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server show ${server_id}
+ """).trim()
+ IP_ADDRESS = get_value('addresses', output)
+ }
+ IP_ADDRESS = IP_ADDRESS.split('=')[1]
+ println("Waiting for VM at ${IP_ADDRESS} to be reachable")
+
+ alive = false
+ while (! alive) {
+ output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
+ println("output is [$output]")
+ alive = output.contains("succeeded")
+ }
+ println("VM is ready and accepting ssh connections")
+ } // stage("Spawn Remote VM")
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Installation
+///////////////////////////////////////////////////////////////////////////////////////
+ stage("Install") {
+ commit_id = ''
+ repo_distro = ''
+ repo_key_name = ''
+ release = ''
+
+ if ( params.COMMIT_ID )
+ {
+ commit_id = "-b ${params.COMMIT_ID}"
+ }
+
+ if ( params.REPO_DISTRO )
+ {
+ repo_distro = "-r ${params.REPO_DISTRO}"
+ }
+
+ if ( params.REPO_KEY_NAME )
+ {
+ repo_key_name = "-k ${params.REPO_KEY_NAME}"
+ }
+
+ if ( params.RELEASE )
+ {
+ release = "-R ${params.RELEASE}"
+ }
+
+ if ( params.REPOSITORY_BASE )
+ {
+ repo_base_url = "-u ${params.REPOSITORY_BASE}"
+ }
+ else
+ {
+ repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
+ }
+
+ remote.name = container_name
+ remote.host = IP_ADDRESS
+ remote.user = 'ubuntu'
+ remote.identityFile = SSH_KEY
+ remote.allowAnyHosts = true
+ remote.logLevel = 'INFO'
+ remote.pty = true
+
+ // Force time sync to avoid clock drift and invalid certificates
+ sshCommand remote: remote, command: """
+ sudo apt update
+ sudo apt install -y ntp
+ sudo service ntp stop
+ sudo ntpd -gq
+ sudo service ntp start
+ """
+
+ sshCommand remote: remote, command: """
+ wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
+ chmod +x ./install_osm.sh
+ sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
+ """
+
+ if ( useCharmedInstaller ) {
+ // Use local proxy for docker hub
+ sshCommand remote: remote, command: '''
+ sudo snap install microk8s --classic --channel=1.19/stable
+ sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
+ sudo systemctl restart snap.microk8s.daemon-containerd.service
+ sudo snap alias microk8s.kubectl kubectl
+ '''
+
+ withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ sshCommand remote: remote, command: """
+ ./install_osm.sh -y \
+ ${repo_base_url} \
+ ${repo_key_name} \
+ ${release} -r unstable \
+ --charmed \
+ --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+ --tag ${container_name}
+ """
+ }
+ prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
+ prometheusPort = 80
+ osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
+ } else {
+ // Run -k8s installer here specifying internal docker registry and docker proxy
+ withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ sshCommand remote: remote, command: """
+ ./install_osm.sh -y \
+ ${repo_base_url} \
+ ${repo_key_name} \
+ ${release} -r unstable \
+ -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+ -p ${INTERNAL_DOCKER_PROXY} \
+ -t ${container_name}
+ """
+ }
+ prometheusHostname = IP_ADDRESS
+ prometheusPort = 9091
+ osmHostname = IP_ADDRESS
+ }
+ } // stage("Install")
+///////////////////////////////////////////////////////////////////////////////////////
+// Health check of installed OSM in remote vm
+///////////////////////////////////////////////////////////////////////////////////////
+ stage("OSM Health") {
+ stackName = "osm"
+ sshCommand remote: remote, command: """
+ /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
+ """
+ } // stage("OSM Health")
+ } // if ( params.DO_INSTALL )
+
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Execute Robot tests
+///////////////////////////////////////////////////////////////////////////////////////
+ stage_archive = false
+ if ( params.DO_ROBOT ) {
+ try {
+ stage("System Integration Test") {
+ if ( useCharmedInstaller ) {
+ tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+ sh(script: "touch ${tempdir}/hosts")
+ hostfile="${tempdir}/hosts"
+ sh """cat << EOF > ${hostfile}
+127.0.0.1 localhost
+${remote.host} prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
+EOF"""
+ } else {
+ hostfile=null
+ }
+
+ jujuPassword=sshCommand remote: remote, command: """
+ echo `juju gui 2>&1 | grep password | cut -d: -f2`
+ """
+
+ run_robot_systest(
+ container_name,
+ params.ROBOT_TAG_NAME,
+ osmHostname,
+ prometheusHostname,
+ prometheusPort,
+ params.ROBOT_VIM,
+ params.ROBOT_PORT_MAPPING_VIM,
+ params.KUBECONFIG,
+ params.CLOUDS,
+ hostfile,
+ jujuPassword,
+ SSH_KEY,
+ params.ROBOT_PASS_THRESHOLD,
+ params.ROBOT_UNSTABLE_THRESHOLD
+ )
+ } // stage("System Integration Test")
+ } finally {
+ stage("Archive Container Logs") {
+ // Archive logs to containers_logs.txt
+ archive_logs(remote)
+ if ( ! currentBuild.result.equals('FAILURE') ) {
+ stage_archive = keep_artifacts
+ } else {
+ println ("Systest test failed, throwing error")
+ error = new Exception("Systest test failed")
+ currentBuild.result = 'FAILURE'
+ throw error
+ }
+ }
+ }
+ } // if ( params.DO_ROBOT )
+
+ if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
+ stage("Archive") {
+ sh "echo ${container_name} > build_version.txt"
+ archiveArtifacts artifacts: "build_version.txt", fingerprint: true
+
+ // Archive the tested repo
+ dir("${RELEASE_DIR}") {
+ ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
+ }
+ if ( params.DO_DOCKERPUSH ) {
+ stage("Publish to Dockerhub") {
+ parallelSteps = [:]
+ for (buildStep in containerList) {
+ def module = buildStep
+ def moduleName = buildStep.toLowerCase()
+ def dockerTag = params.DOCKER_TAG
+ def moduleTag = container_name
+
+ parallelSteps[module] = {
+ dir("$module") {
+ sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}"
+ sh "docker push opensourcemano/${moduleName}:${dockerTag}"
+ }
+ }
+ }
+ parallel parallelSteps
+ }
+
+ stage("Snap promotion") {
+ def snaps = ["osmclient"]
+ sh "snapcraft login --with ~/.snapcraft/config"
+ for (snap in snaps) {
+ channel="latest/"
+ if (BRANCH_NAME.startsWith("v")) {
+ channel=BRANCH_NAME.substring(1)+"/"
+ } else if (BRANCH_NAME!="master") {
+ channel+="/"+BRANCH_NAME.replaceAll('/','-')
+ }
+ track=channel+"edge\\*"
+ edge_rev=sh(returnStdout: true,
+ script: "snapcraft revisions $snap | " +
+ "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+ print "edge rev is $edge_rev"
+ track=channel+"beta\\*"
+ beta_rev=sh(returnStdout: true,
+ script: "snapcraft revisions $snap | " +
+ "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+ print "beta rev is $beta_rev"
+
+ if ( edge_rev != beta_rev ) {
+ print "Promoting $edge_rev to beta in place of $beta_rev"
+ beta_track=channel+"beta"
+ sh "snapcraft release $snap $edge_rev $beta_track"
+ }
+ }
+ } // stage("Snap promotion")
+ } // if ( params.DO_DOCKERPUSH )
+ } // stage("Archive")
+ } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
+ } // dir(OSM_DEVOPS)
+ } finally {
+ if ( params.DO_INSTALL && server_id != null) {
+ delete_vm = true
+ if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+ delete_vm = false