INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
APT_PROXY = 'http://172.21.1.1:3142'
SSH_KEY = '~/hive/cicd_rsa'
+ ARCHIVE_LOGS_FLAG = false
sh 'env'
tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
println("Waiting for VM at ${IP_ADDRESS} to be reachable")
alive = false
- while (!alive) {
- output = sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
- println("output is [$output]")
- alive = output.contains('succeeded')
+ timeout(time: 1, unit: 'MINUTES') {
+ while (!alive) {
+ output = sh(
+ returnStatus: true,
+ script: "ssh -T -i ${SSH_KEY} " +
+ "-o StrictHostKeyChecking=no " +
+ "-o UserKnownHostsFile=/dev/null " +
+ "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
+ alive = (output == 0)
+ }
}
println('VM is ready and accepting ssh connections')
} // stage("Spawn Remote VM")
///////////////////////////////////////////////////////////////////////////////////////
+// Checks before installation
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Checks before installation') {
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+
+ // Force time sync to avoid clock drift and invalid certificates
+ sshCommand remote: remote, command: 'sudo apt-get update'
+ sshCommand remote: remote, command: 'sudo apt-get install -y chrony'
+ sshCommand remote: remote, command: 'sudo service chrony stop'
+ sshCommand remote: remote, command: 'sudo chronyd -vq'
+ sshCommand remote: remote, command: 'sudo service chrony start'
+
+ } // stage("Checks before installation")
+///////////////////////////////////////////////////////////////////////////////////////
// Installation
///////////////////////////////////////////////////////////////////////////////////////
stage('Install') {
repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
}
- remote.with {
- name = containerName
- host = IP_ADDRESS
- user = 'ubuntu'
- identityFile = SSH_KEY
- allowAnyHosts = true
- logLevel = 'INFO'
- pty = true
- }
-
- // Force time sync to avoid clock drift and invalid certificates
- sshCommand remote: remote, command: '''
- sudo apt update
- sudo apt install -y ntp
- sudo service ntp stop
- sudo ntpd -gq
- sudo service ntp start
- '''
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
sshCommand remote: remote, command: '''
wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
// Health check of installed OSM in remote vm
///////////////////////////////////////////////////////////////////////////////////////
stage('OSM Health') {
+ // if this point is reached, logs should be archived
+ ARCHIVE_LOGS_FLAG = true
stackName = 'osm'
sshCommand remote: remote, command: """
/usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
)
} // stage("System Integration Test")
} finally {
- stage('Archive Container Logs') {
- // Archive logs to containers_logs.txt
- archive_logs(remote)
+ stage('After System Integration test') {
if (currentBuild.result != 'FAILURE') {
stage_archive = keep_artifacts
} else {
stage('Publish to Dockerhub') {
parallelSteps = [:]
for (buildStep in containerList) {
- module = buildStep
- moduleName = buildStep.toLowerCase()
- dockerTag = params.DOCKER_TAG
- moduleTag = containerName
+ def module = buildStep
+ def moduleName = buildStep.toLowerCase()
+ def dockerTag = params.DOCKER_TAG
+ def moduleTag = containerName
parallelSteps[module] = {
dir("$module") {
} // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
} // dir(OSM_DEVOPS)
} finally {
- if ( params.DO_INSTALL && server_id != null) {
- delete_vm = true
- if (error && params.SAVE_CONTAINER_ON_FAIL ) {
- delete_vm = false
- }
- if (!error && params.SAVE_CONTAINER_ON_PASS ) {
- delete_vm = false
- }
+ stage('Archive Container Logs') {
+ if ( ARCHIVE_LOGS_FLAG ) {
+ // Archive logs
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+ println('Archiving container logs')
+ archive_logs(remote)
+ } // end if ( ARCHIVE_LOGS_FLAG )
+ }
+ stage('Cleanup') {
+ if ( params.DO_INSTALL && server_id != null) {
+ delete_vm = true
+ if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+ delete_vm = false
+ }
+ if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+ delete_vm = false
+ }
- if ( delete_vm ) {
- if (server_id != null) {
- println("Deleting VM: $server_id")
- sh """#!/bin/sh -e
- for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
- openstack server delete ${server_id}
- """
- } else {
- println("Saved VM $server_id in ETSI VIM")
+ if ( delete_vm ) {
+ if (server_id != null) {
+ println("Deleting VM: $server_id")
+ sh """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server delete ${server_id}
+ """
+ } else {
+ println("Saved VM $server_id in ETSI VIM")
+ }
}
}
- }
- if ( http_server_name != null ) {
- sh "docker stop ${http_server_name} || true"
- sh "docker rm ${http_server_name} || true"
- }
+ if ( http_server_name != null ) {
+ sh "docker stop ${http_server_name} || true"
+ sh "docker rm ${http_server_name} || true"
+ }
- if ( devopstempdir != null ) {
- sh "rm -rf ${devopstempdir}"
+ if ( devopstempdir != null ) {
+ sh "rm -rf ${devopstempdir}"
+ }
}
}
}