X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=jenkins%2Fci-pipelines%2Fci_stage_3.groovy;h=31eab9dc746beb2e1052b921081d7e6aaab63273;hb=80b2e177597d4d66afa568837aa58e0064f1a9d5;hp=83671d14bfd6ad22759ac097aeeeeb75593698d7;hpb=8c76829f25eb1b8ba2c2c8b00a606476ae0e91b0;p=osm%2Fdevops.git diff --git a/jenkins/ci-pipelines/ci_stage_3.groovy b/jenkins/ci-pipelines/ci_stage_3.groovy index 83671d14..31eab9dc 100644 --- a/jenkins/ci-pipelines/ci_stage_3.groovy +++ b/jenkins/ci-pipelines/ci_stage_3.groovy @@ -31,7 +31,7 @@ properties([ string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'), string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'), string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'), - string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'), + string(defaultValue: 'ubuntu22.04', description: '', name: 'OPENSTACK_BASE_IMAGE'), booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'), booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'), booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'), @@ -104,7 +104,8 @@ void run_robot_systest(String tagName, -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \ -c -t ${testName}""") } finally { - sh("cp ${tempdir}/* .") + sh("cp ${tempdir}/*.xml .") + sh("cp ${tempdir}/*.html .") outputDirectory = sh(returnStdout: true, script: 'pwd').trim() println("Present Directory is : ${outputDirectory}") step([ @@ -123,13 +124,13 @@ void run_robot_systest(String tagName, void archive_logs(Map remote) { - sshCommand remote: remote, command: '''mkdir -p logs''' + sshCommand remote: remote, command: '''mkdir -p logs/dags''' if (useCharmedInstaller) { sshCommand remote: remote, command: ''' - for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do - logfile=`echo $container | cut -d- -f1` + for pod in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do + logfile=`echo $pod | cut -d- -f1` echo "Extracting log for $logfile" - kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log + kubectl logs -n osm $pod --timestamps=true 2>&1 > logs/$logfile.log done ''' } else { @@ -147,12 +148,17 @@ void archive_logs(Map remote) { > logs/$statefulset.log done ''' + sshCommand remote: remote, command: ''' + schedulerPod="$(kubectl get pods -n osm | grep airflow-scheduler| awk '{print $1; exit}')"; \ + echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \ + kubectl cp -n osm ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler + ''' } sh 'rm -rf logs' sshCommand remote: remote, command: '''ls -al logs''' sshGet remote: remote, from: 'logs', into: '.', override: true - sh 'cp logs/* .' + sh 'cp logs/*.log logs/dags/*.log .' archiveArtifacts artifacts: '*.log' } @@ -176,6 +182,7 @@ node("${params.NODE}") { INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000' APT_PROXY = 'http://172.21.1.1:3142' SSH_KEY = '~/hive/cicd_rsa' + ARCHIVE_LOGS_FLAG = false sh 'env' tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '') @@ -214,7 +221,9 @@ node("${params.NODE}") { /////////////////////////////////////////////////////////////////////////////////////// stage('Copy Artifacts') { // cleanup any previous repo - sh 'rm -rf repo' + sh "tree -fD repo || exit 0" + sh 'rm -rvf repo' + sh "tree -fD repo && lsof repo || exit 0" dir('repo') { packageList = [] dir("${RELEASE}") { @@ -254,7 +263,7 @@ node("${params.NODE}") { parallelSteps = [:] list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI', - 'common', 'LCM', 'POL', 'NG-UI', 'PLA', 'tests'] + 'common', 'LCM', 'POL', 'NG-UI', 'NG-SA', 'PLA', 'tests'] if (upstreamComponent.length() > 0) { println("Skipping upstream fetch of ${upstreamComponent}") list.remove(upstreamComponent) @@ -316,11 +325,14 @@ node("${params.NODE}") { repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' + 'print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim() - repo_base_url = ci_helper.start_http_server(pwd, http_server_name, repo_port) + internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port) NODE_IP_ADDRESS = sh(returnStdout: true, script: "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim() + ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port) } + sh "tree -fD repo" + // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim() devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim() @@ -418,15 +430,69 @@ node("${params.NODE}") { println("Waiting for VM at ${IP_ADDRESS} to be reachable") alive = false - while (!alive) { - output = sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim() - println("output is [$output]") - alive = output.contains('succeeded') + timeout(time: 1, unit: 'MINUTES') { + while (!alive) { + output = sh( + returnStatus: true, + script: "ssh -T -i ${SSH_KEY} " + + "-o StrictHostKeyChecking=no " + + "-o UserKnownHostsFile=/dev/null " + + "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'") + alive = (output == 0) + } } println('VM is ready and accepting ssh connections') + + ////////////////////////////////////////////////////////////////////////////////////////////// + println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins...') + + sh( returnStatus: true, + script: "ssh -T -i ${SSH_KEY} " + + "-o StrictHostKeyChecking=no " + + "-o UserKnownHostsFile=/dev/null " + + "ubuntu@${IP_ADDRESS} " + + "'echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'") + sh( returnStatus: true, + script: "ssh -T -i ${SSH_KEY} " + + "-o StrictHostKeyChecking=no " + + "-o UserKnownHostsFile=/dev/null " + + "ubuntu@${IP_ADDRESS} " + + "'echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'") + sh( returnStatus: true, + script: "ssh -T -i ${SSH_KEY} " + + "-o StrictHostKeyChecking=no " + + "-o UserKnownHostsFile=/dev/null " + + "ubuntu@${IP_ADDRESS} " + + "'sudo systemctl restart sshd'") + ////////////////////////////////////////////////////////////////////////////////////////////// + } // stage("Spawn Remote VM") /////////////////////////////////////////////////////////////////////////////////////// +// Checks before installation +/////////////////////////////////////////////////////////////////////////////////////// + stage('Checks before installation') { + remote = [ + name: containerName, + host: IP_ADDRESS, + user: 'ubuntu', + identityFile: SSH_KEY, + allowAnyHosts: true, + logLevel: 'INFO', + pty: true + ] + + // Ensure the VM is ready + sshCommand remote: remote, command: 'cloud-init status --wait' + // Force time sync to avoid clock drift and invalid certificates + sshCommand remote: remote, command: 'sudo apt-get -y update' + sshCommand remote: remote, command: 'sudo apt-get -y install chrony' + sshCommand remote: remote, command: 'sudo service chrony stop' + sshCommand remote: remote, command: 'sudo chronyd -vq' + sshCommand remote: remote, command: 'sudo service chrony start' + + } // stage("Checks before installation") +/////////////////////////////////////////////////////////////////////////////////////// // Installation /////////////////////////////////////////////////////////////////////////////////////// stage('Install') { @@ -453,27 +519,18 @@ node("${params.NODE}") { repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}" } - remote.with { - name = containerName - host = IP_ADDRESS - user = 'ubuntu' - identityFile = SSH_KEY - allowAnyHosts = true - logLevel = 'INFO' - pty = true - } - - // Force time sync to avoid clock drift and invalid certificates - sshCommand remote: remote, command: ''' - sudo apt update - sudo apt install -y ntp - sudo service ntp stop - sudo ntpd -gq - sudo service ntp start - ''' + remote = [ + name: containerName, + host: IP_ADDRESS, + user: 'ubuntu', + identityFile: SSH_KEY, + allowAnyHosts: true, + logLevel: 'INFO', + pty: true + ] sshCommand remote: remote, command: ''' - wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh + wget https://osm-download.etsi.org/ftp/osm-13.0-thirteen/install_osm.sh chmod +x ./install_osm.sh sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc ''' @@ -528,6 +585,8 @@ node("${params.NODE}") { // Health check of installed OSM in remote vm /////////////////////////////////////////////////////////////////////////////////////// stage('OSM Health') { + // if this point is reached, logs should be archived + ARCHIVE_LOGS_FLAG = true stackName = 'osm' sshCommand remote: remote, command: """ /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName} @@ -577,9 +636,7 @@ EOF""" ) } // stage("System Integration Test") } finally { - stage('Archive Container Logs') { - // Archive logs to containers_logs.txt - archive_logs(remote) + stage('After System Integration test') { if (currentBuild.result != 'FAILURE') { stage_archive = keep_artifacts } else { @@ -594,9 +651,6 @@ EOF""" if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) { stage('Archive') { - sh "echo ${containerName} > build_version.txt" - archiveArtifacts artifacts: 'build_version.txt', fingerprint: true - // Archive the tested repo dir("${RELEASE_DIR}") { ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested') @@ -605,14 +659,15 @@ EOF""" stage('Publish to Dockerhub') { parallelSteps = [:] for (buildStep in containerList) { - module = buildStep - moduleName = buildStep.toLowerCase() - dockerTag = params.DOCKER_TAG - moduleTag = containerName + def module = buildStep + def moduleName = buildStep.toLowerCase() + def dockerTag = params.DOCKER_TAG + def moduleTag = containerName parallelSteps[module] = { dir("$module") { - sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \ + sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}") + sh("""docker tag ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag} \ opensourcemano/${moduleName}:${dockerTag}""") sh "docker push opensourcemano/${moduleName}:${dockerTag}" } @@ -620,68 +675,161 @@ EOF""" } parallel parallelSteps } - stage('Snap promotion') { - snaps = ['osmclient'] - sh 'snapcraft login --with ~/.snapcraft/config' - for (snap in snaps) { - channel = 'latest/' + withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) { + snaps = ['osmclient'] + for (snap in snaps) { + channel = 'latest/' + if (BRANCH_NAME.startsWith('v')) { + channel = BRANCH_NAME.substring(1) + '/' + } else if (BRANCH_NAME != 'master') { + channel += '/' + BRANCH_NAME.replaceAll('/', '-') + } + track = channel + 'edge\\*' + edge_rev = sh(returnStdout: true, + script: "snapcraft revisions $snap | " + + "grep \"$track\" | tail -1 | awk '{print \$1}'").trim() + track = channel + 'beta\\*' + beta_rev = sh(returnStdout: true, + script: "snapcraft revisions $snap | " + + "grep \"$track\" | tail -1 | awk '{print \$1}'").trim() + + print "Edge: $edge_rev, Beta: $beta_rev" + + if (edge_rev != beta_rev) { + print "Promoting $edge_rev to beta in place of $beta_rev" + beta_track = channel + 'beta' + sh "snapcraft release $snap $edge_rev $beta_track" + } + } + } + } // stage('Snap promotion') + stage('Charm promotion') { + charms = [ + 'osm', // bundle + 'osm-ha', // bundle + 'osm-grafana', + 'osm-mariadb', + 'mongodb-exporter-k8s', + 'mysqld-exporter-k8s', + 'osm-lcm', + 'osm-mon', + 'osm-nbi', + 'osm-ng-ui', + 'osm-pol', + 'osm-ro', + 'osm-prometheus', + 'osm-update-db-operator', + 'osm-vca-integrator', + ] + for (charm in charms) { + + channel = 'latest' if (BRANCH_NAME.startsWith('v')) { - channel = BRANCH_NAME.substring(1) + '/' + channel = BRANCH_NAME.substring(1) } else if (BRANCH_NAME != 'master') { channel += '/' + BRANCH_NAME.replaceAll('/', '-') } - track = channel + 'edge\\*' - edge_rev = sh(returnStdout: true, - script: "snapcraft revisions $snap | " + - "grep \"$track\" | tail -1 | awk '{print \$1}'").trim() - print "edge rev is $edge_rev" - track = channel + 'beta\\*' - beta_rev = sh(returnStdout: true, - script: "snapcraft revisions $snap | " + - "grep \"$track\" | tail -1 | awk '{print \$1}'").trim() - print "beta rev is $beta_rev" - - if (edge_rev != beta_rev) { - print "Promoting $edge_rev to beta in place of $beta_rev" - beta_track = channel + 'beta' - sh "snapcraft release $snap $edge_rev $beta_track" + + withCredentials([string(credentialsId: 'Charmstore', variable: 'CHARMCRAFT_AUTH')]) { + sh "charmcraft status $charm --format json > ${charm}.json" + isCharm = sh(returnStdout: true, script: "grep architecture ${charm}.json | wc -l").trim() as int + resourceArgument = "" + if (isCharm) { + jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1" + jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/beta\")| .version'|head -1" + index=0 + while (index < 5) { + resourceNameScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].name'|head -1" + resourceRevsScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].revision'|head -1" + resourceName = sh(returnStdout: true, script: resourceNameScript).trim() + resourceRevs = sh(returnStdout: true, script: resourceRevsScript).trim() + if (resourceName != "null") { + resourceArgument += " --resource ${resourceName}:${resourceRevs}" + } else { + break + } + index ++ + } + } else { + jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1" + jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/beta\")| .version'|head -1" + } + // edge/merged is used in place of /edge as 10.1.0 LTS uses latest/edge + edge_rev = sh(returnStdout: true, script: jqScriptEdge).trim() + beta_rev = sh(returnStdout: true, script: jqScriptBeta).trim() + try { edge_rev = edge_rev as int } catch (NumberFormatException nfe) {edge_rev = 0} + try { beta_rev = beta_rev as int } catch (NumberFormatException nfe) {beta_rev = 0} + + print "Edge: $edge_rev, Beta: $beta_rev $resourceArgument" + + if (edge_rev > beta_rev) { + print "Promoting $edge_rev to beta in place of $beta_rev" + beta_track = channel + 'beta' + sh "charmcraft release ${charm} --revision=${edge_rev} ${resourceArgument} --channel=${channel}/beta" + } + } } - } // stage('Snap promotion') + } // stage('Charm promotion') } // if (params.DO_DOCKERPUSH) } // stage('Archive') } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) } // dir(OSM_DEVOPS) } finally { - if ( params.DO_INSTALL && server_id != null) { - delete_vm = true - if (error && params.SAVE_CONTAINER_ON_FAIL ) { - delete_vm = false - } - if (!error && params.SAVE_CONTAINER_ON_PASS ) { - delete_vm = false - } + // stage('Debug') { + // sleep 900 + // } + stage('Archive Container Logs') { + if ( ARCHIVE_LOGS_FLAG ) { + try { + // Archive logs + remote = [ + name: containerName, + host: IP_ADDRESS, + user: 'ubuntu', + identityFile: SSH_KEY, + allowAnyHosts: true, + logLevel: 'INFO', + pty: true + ] + println('Archiving container logs') + archive_logs(remote) + } catch (Exception e) { + println('Error fetching logs: '+ e.getMessage()) + } + } // end if ( ARCHIVE_LOGS_FLAG ) + } + stage('Cleanup') { + if ( params.DO_INSTALL && server_id != null) { + delete_vm = true + if (error && params.SAVE_CONTAINER_ON_FAIL ) { + delete_vm = false + } + if (!error && params.SAVE_CONTAINER_ON_PASS ) { + delete_vm = false + } - if ( delete_vm ) { - if (server_id != null) { - println("Deleting VM: $server_id") - sh """#!/bin/sh -e - for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done - openstack server delete ${server_id} - """ - } else { - println("Saved VM $server_id in ETSI VIM") + if ( delete_vm ) { + if (server_id != null) { + println("Deleting VM: $server_id") + sh """#!/bin/sh -e + for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done + openstack server delete ${server_id} + """ + } else { + println("Saved VM $server_id in ETSI VIM") + } } } - } - if ( http_server_name != null ) { - sh "docker stop ${http_server_name} || true" - sh "docker rm ${http_server_name} || true" - } + if ( http_server_name != null ) { + sh "docker stop ${http_server_name} || true" + sh "docker rm ${http_server_name} || true" + } - if ( devopstempdir != null ) { - sh "rm -rf ${devopstempdir}" + if ( devopstempdir != null ) { + sh "rm -rf ${devopstempdir}" + } } } }