X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=jenkins%2Fci-pipelines%2Fci_stage_3.groovy;h=316fdb27f8ef8f6bebbb28e5bd3311eac06a4485;hb=b04e188d083711b5983ad2859fc5851f25dd046d;hp=83671d14bfd6ad22759ac097aeeeeb75593698d7;hpb=8c76829f25eb1b8ba2c2c8b00a606476ae0e91b0;p=osm%2Fdevops.git diff --git a/jenkins/ci-pipelines/ci_stage_3.groovy b/jenkins/ci-pipelines/ci_stage_3.groovy index 83671d14..316fdb27 100644 --- a/jenkins/ci-pipelines/ci_stage_3.groovy +++ b/jenkins/ci-pipelines/ci_stage_3.groovy @@ -153,7 +153,8 @@ void archive_logs(Map remote) { sshCommand remote: remote, command: '''ls -al logs''' sshGet remote: remote, from: 'logs', into: '.', override: true sh 'cp logs/* .' - archiveArtifacts artifacts: '*.log' + sshGet remote: remote, from: 'ens3.pcap', into: 'ens3.pcap', override: true + archiveArtifacts artifacts: '*.log, *.pcap' } String get_value(String key, String output) { @@ -176,6 +177,7 @@ node("${params.NODE}") { INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000' APT_PROXY = 'http://172.21.1.1:3142' SSH_KEY = '~/hive/cicd_rsa' + ARCHIVE_LOGS_FLAG = false sh 'env' tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '') @@ -418,15 +420,44 @@ node("${params.NODE}") { println("Waiting for VM at ${IP_ADDRESS} to be reachable") alive = false - while (!alive) { - output = sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim() - println("output is [$output]") - alive = output.contains('succeeded') + timeout(time: 1, unit: 'MINUTES') { + while (!alive) { + output = sh( + returnStatus: true, + script: "ssh -T -i ${SSH_KEY} " + + "-o StrictHostKeyChecking=no " + + "-o UserKnownHostsFile=/dev/null " + + "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'") + alive = (output == 0) + } } println('VM is ready and accepting ssh connections') } // stage("Spawn Remote VM") /////////////////////////////////////////////////////////////////////////////////////// +// Checks before installation +/////////////////////////////////////////////////////////////////////////////////////// + stage('Checks before installation') { + remote = [ + name: containerName, + host: IP_ADDRESS, + user: 'ubuntu', + identityFile: SSH_KEY, + allowAnyHosts: true, + logLevel: 'INFO', + pty: true + ] + + // Force time sync to avoid clock drift and invalid certificates + sshCommand remote: remote, command: 'nohup sudo tcpdump -i ens3 -w ens3.pcap -s 400 & sleep 5' + sshCommand remote: remote, command: 'sudo apt-get update' + sshCommand remote: remote, command: 'sudo apt-get install -y chrony' + sshCommand remote: remote, command: 'sudo service chrony stop' + sshCommand remote: remote, command: 'sudo chronyd -vq' + sshCommand remote: remote, command: 'sudo service chrony start' + + } // stage("Checks before installation") +/////////////////////////////////////////////////////////////////////////////////////// // Installation /////////////////////////////////////////////////////////////////////////////////////// stage('Install') { @@ -453,24 +484,15 @@ node("${params.NODE}") { repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}" } - remote.with { - name = containerName - host = IP_ADDRESS - user = 'ubuntu' - identityFile = SSH_KEY - allowAnyHosts = true - logLevel = 'INFO' - pty = true - } - - // Force time sync to avoid clock drift and invalid certificates - sshCommand remote: remote, command: ''' - sudo apt update - sudo apt install -y ntp - sudo service ntp stop - sudo ntpd -gq - sudo service ntp start - ''' + remote = [ + name: containerName, + host: IP_ADDRESS, + user: 'ubuntu', + identityFile: SSH_KEY, + allowAnyHosts: true, + logLevel: 'INFO', + pty: true + ] sshCommand remote: remote, command: ''' wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh @@ -523,11 +545,17 @@ node("${params.NODE}") { prometheusPort = 9091 osmHostname = IP_ADDRESS } + sshCommand remote: remote, command: """ + sudo killall tcpdump + """ + } // stage("Install") /////////////////////////////////////////////////////////////////////////////////////// // Health check of installed OSM in remote vm /////////////////////////////////////////////////////////////////////////////////////// stage('OSM Health') { + // if this point is reached, logs should be archived + ARCHIVE_LOGS_FLAG = true stackName = 'osm' sshCommand remote: remote, command: """ /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName} @@ -543,6 +571,7 @@ node("${params.NODE}") { if ( params.DO_ROBOT ) { try { stage('System Integration Test') { + if (useCharmedInstaller) { tempdir = sh(returnStdout: true, script: 'mktemp -d').trim() sh(script: "touch ${tempdir}/hosts") @@ -577,9 +606,7 @@ EOF""" ) } // stage("System Integration Test") } finally { - stage('Archive Container Logs') { - // Archive logs to containers_logs.txt - archive_logs(remote) + stage('After System Integration test') { if (currentBuild.result != 'FAILURE') { stage_archive = keep_artifacts } else { @@ -594,9 +621,6 @@ EOF""" if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) { stage('Archive') { - sh "echo ${containerName} > build_version.txt" - archiveArtifacts artifacts: 'build_version.txt', fingerprint: true - // Archive the tested repo dir("${RELEASE_DIR}") { ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested') @@ -605,10 +629,10 @@ EOF""" stage('Publish to Dockerhub') { parallelSteps = [:] for (buildStep in containerList) { - module = buildStep - moduleName = buildStep.toLowerCase() - dockerTag = params.DOCKER_TAG - moduleTag = containerName + def module = buildStep + def moduleName = buildStep.toLowerCase() + def dockerTag = params.DOCKER_TAG + def moduleTag = containerName parallelSteps[module] = { dir("$module") { @@ -654,34 +678,52 @@ EOF""" } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) } // dir(OSM_DEVOPS) } finally { - if ( params.DO_INSTALL && server_id != null) { - delete_vm = true - if (error && params.SAVE_CONTAINER_ON_FAIL ) { - delete_vm = false - } - if (!error && params.SAVE_CONTAINER_ON_PASS ) { - delete_vm = false - } + stage('Archive Container Logs') { + if ( ARCHIVE_LOGS_FLAG ) { + // Archive logs + remote = [ + name: containerName, + host: IP_ADDRESS, + user: 'ubuntu', + identityFile: SSH_KEY, + allowAnyHosts: true, + logLevel: 'INFO', + pty: true + ] + println('Archiving container logs') + archive_logs(remote) + } // end if ( ARCHIVE_LOGS_FLAG ) + } + stage('Cleanup') { + if ( params.DO_INSTALL && server_id != null) { + delete_vm = true + if (error && params.SAVE_CONTAINER_ON_FAIL ) { + delete_vm = false + } + if (!error && params.SAVE_CONTAINER_ON_PASS ) { + delete_vm = false + } - if ( delete_vm ) { - if (server_id != null) { - println("Deleting VM: $server_id") - sh """#!/bin/sh -e - for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done - openstack server delete ${server_id} - """ - } else { - println("Saved VM $server_id in ETSI VIM") + if ( delete_vm ) { + if (server_id != null) { + println("Deleting VM: $server_id") + sh """#!/bin/sh -e + for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done + openstack server delete ${server_id} + """ + } else { + println("Saved VM $server_id in ETSI VIM") + } } } - } - if ( http_server_name != null ) { - sh "docker stop ${http_server_name} || true" - sh "docker rm ${http_server_name} || true" - } + if ( http_server_name != null ) { + sh "docker stop ${http_server_name} || true" + sh "docker rm ${http_server_name} || true" + } - if ( devopstempdir != null ) { - sh "rm -rf ${devopstempdir}" + if ( devopstempdir != null ) { + sh "rm -rf ${devopstempdir}" + } } } }