string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
- string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
+ string(defaultValue: 'ubuntu22.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
+ string(defaultValue: 'osm.sanity', description: '', name: 'OPENSTACK_OSM_FLAVOR'),
+ booleanParam(defaultValue: false, description: '', name: 'TRY_OLD_SERVICE_ASSURANCE'),
+ booleanParam(defaultValue: true, description: '', name: 'TRY_JUJU_INSTALLATION'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
String osmHostname,
String prometheusHostname,
Integer prometheusPort=null,
+ String ociRegistryUrl,
String envfile=null,
String portmappingfile=null,
String kubeconfig=null,
}
try {
- sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
- ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
- -v ${clouds}:/etc/openstack/clouds.yaml \
- -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
- -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
- -c -t ${testName}""")
+ withCredentials([usernamePassword(credentialsId: 'gitlab-oci-test',
+ passwordVariable: 'OCI_REGISTRY_PSW', usernameVariable: 'OCI_REGISTRY_USR')]) {
+ sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
+ ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
+ --env OCI_REGISTRY_URL=${ociRegistryUrl} --env OCI_REGISTRY_USER=${OCI_REGISTRY_USR} \
+ --env OCI_REGISTRY_PASSWORD=${OCI_REGISTRY_PSW} \
+ -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa \
+ -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
+ -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
+ -c -t ${testName}""")
+ }
} finally {
- sh("cp ${tempdir}/* .")
+ sh("cp ${tempdir}/*.xml .")
+ sh("cp ${tempdir}/*.html .")
outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
println("Present Directory is : ${outputDirectory}")
step([
void archive_logs(Map remote) {
- sshCommand remote: remote, command: '''mkdir -p logs'''
+ sshCommand remote: remote, command: '''mkdir -p logs/dags'''
if (useCharmedInstaller) {
sshCommand remote: remote, command: '''
- for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
- logfile=`echo $container | cut -d- -f1`
+ for pod in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+ logfile=`echo $pod | cut -d- -f1`
echo "Extracting log for $logfile"
- kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
+ kubectl logs -n osm $pod --timestamps=true 2>&1 > logs/$logfile.log
done
'''
} else {
> logs/$statefulset.log
done
'''
+ sshCommand remote: remote, command: '''
+ schedulerPod="$(kubectl get pods -n osm | grep airflow-scheduler| awk '{print $1; exit}')"; \
+ echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
+ kubectl cp -n osm ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler
+ '''
}
sh 'rm -rf logs'
sshCommand remote: remote, command: '''ls -al logs'''
sshGet remote: remote, from: 'logs', into: '.', override: true
- sh 'cp logs/* .'
- archiveArtifacts artifacts: '*.log'
+ archiveArtifacts artifacts: 'logs/*.log, logs/dags/*.log'
}
String get_value(String key, String output) {
INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
APT_PROXY = 'http://172.21.1.1:3142'
SSH_KEY = '~/hive/cicd_rsa'
+ ARCHIVE_LOGS_FLAG = false
+ OCI_REGISTRY_URL = 'oci://osm.etsi.org:5050/devops/test'
sh 'env'
tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
///////////////////////////////////////////////////////////////////////////////////////
stage('Copy Artifacts') {
// cleanup any previous repo
- sh 'rm -rf repo'
+ sh "tree -fD repo || exit 0"
+ sh 'rm -rvf repo'
+ sh "tree -fD repo && lsof repo || exit 0"
dir('repo') {
packageList = []
dir("${RELEASE}") {
parallelSteps = [:]
list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
- 'common', 'LCM', 'POL', 'NG-UI', 'PLA', 'tests']
+ 'common', 'LCM', 'POL', 'NG-UI', 'NG-SA', 'PLA', 'tests']
if (upstreamComponent.length() > 0) {
println("Skipping upstream fetch of ${upstreamComponent}")
list.remove(upstreamComponent)
repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
'print(s.getsockname()[1]); s.close()\');',
returnStdout: true).trim()
- repo_base_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
+ internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
NODE_IP_ADDRESS = sh(returnStdout: true, script:
"echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
+ ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
}
+ sh "tree -fD repo"
+
// Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
println('Launching new VM')
output = sh(returnStdout: true, script: """#!/bin/sh -e
for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
- openstack server create --flavor osm.sanity \
+ openstack server create --flavor ${OPENSTACK_OSM_FLAVOR} \
--image ${OPENSTACK_BASE_IMAGE} \
--key-name CICD \
--property build_url="${BUILD_URL}" \
}
}
println('VM is ready and accepting ssh connections')
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins...')
+
+ sh( returnStatus: true,
+ script: "ssh -T -i ${SSH_KEY} " +
+ "-o StrictHostKeyChecking=no " +
+ "-o UserKnownHostsFile=/dev/null " +
+ "ubuntu@${IP_ADDRESS} " +
+ "'echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
+ sh( returnStatus: true,
+ script: "ssh -T -i ${SSH_KEY} " +
+ "-o StrictHostKeyChecking=no " +
+ "-o UserKnownHostsFile=/dev/null " +
+ "ubuntu@${IP_ADDRESS} " +
+ "'echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
+ sh( returnStatus: true,
+ script: "ssh -T -i ${SSH_KEY} " +
+ "-o StrictHostKeyChecking=no " +
+ "-o UserKnownHostsFile=/dev/null " +
+ "ubuntu@${IP_ADDRESS} " +
+ "'sudo systemctl restart sshd'")
+ //////////////////////////////////////////////////////////////////////////////////////////////
+
} // stage("Spawn Remote VM")
///////////////////////////////////////////////////////////////////////////////////////
+// Checks before installation
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Checks before installation') {
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+
+ // Ensure the VM is ready
+ sshCommand remote: remote, command: 'cloud-init status --wait'
+ // Force time sync to avoid clock drift and invalid certificates
+ sshCommand remote: remote, command: 'sudo apt-get -y update'
+ sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
+ sshCommand remote: remote, command: 'sudo service chrony stop'
+ sshCommand remote: remote, command: 'sudo chronyd -vq'
+ sshCommand remote: remote, command: 'sudo service chrony start'
+
+ } // stage("Checks before installation")
+///////////////////////////////////////////////////////////////////////////////////////
// Installation
///////////////////////////////////////////////////////////////////////////////////////
stage('Install') {
pty: true
]
- // Force time sync to avoid clock drift and invalid certificates
- sshCommand remote: remote, command: '''
- sudo apt update
- sudo apt install -y ntp
- sudo service ntp stop
- sudo ntpd -gq
- sudo service ntp start
- '''
-
sshCommand remote: remote, command: '''
- wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
+ wget https://osm-download.etsi.org/ftp/osm-15.0-fifteen/install_osm.sh
chmod +x ./install_osm.sh
sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
'''
osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
} else {
// Run -k8s installer here specifying internal docker registry and docker proxy
+ osm_installation_options = ""
+ if (params.TRY_OLD_SERVICE_ASSURANCE) {
+ osm_installation_options = "${osm_installation_options} --old-sa"
+ }
+ if (params.TRY_JUJU_INSTALLATION) {
+ osm_installation_options = "${osm_installation_options} --juju --lxd"
+ }
withCredentials([gitlabCredentialsMap]) {
sshCommand remote: remote, command: """
./install_osm.sh -y \
${release} -r unstable \
-d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
-p ${INTERNAL_DOCKER_PROXY} \
- -t ${containerName}
+ -t ${containerName} \
+ ${osm_installation_options}
"""
}
prometheusHostname = IP_ADDRESS
// Health check of installed OSM in remote vm
///////////////////////////////////////////////////////////////////////////////////////
stage('OSM Health') {
+ // if this point is reached, logs should be archived
+ ARCHIVE_LOGS_FLAG = true
stackName = 'osm'
sshCommand remote: remote, command: """
/usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
osmHostname,
prometheusHostname,
prometheusPort,
+ OCI_REGISTRY_URL,
params.ROBOT_VIM,
params.ROBOT_PORT_MAPPING_VIM,
params.KUBECONFIG,
)
} // stage("System Integration Test")
} finally {
- stage('Archive Container Logs') {
- // Archive logs to containers_logs.txt
- archive_logs(remote)
+ stage('After System Integration test') {
if (currentBuild.result != 'FAILURE') {
stage_archive = keep_artifacts
} else {
if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
stage('Archive') {
- sh "echo ${containerName} > build_version.txt"
- archiveArtifacts artifacts: 'build_version.txt', fingerprint: true
-
// Archive the tested repo
dir("${RELEASE_DIR}") {
ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
parallelSteps[module] = {
dir("$module") {
- sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
+ sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
+ sh("""docker tag ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag} \
opensourcemano/${moduleName}:${dockerTag}""")
sh "docker push opensourcemano/${moduleName}:${dockerTag}"
}
}
parallel parallelSteps
}
-
- stage('Snap promotion') {
- snaps = ['osmclient']
- sh 'snapcraft login --with ~/.snapcraft/config'
- for (snap in snaps) {
- channel = 'latest/'
- if (BRANCH_NAME.startsWith('v')) {
- channel = BRANCH_NAME.substring(1) + '/'
- } else if (BRANCH_NAME != 'master') {
- channel += '/' + BRANCH_NAME.replaceAll('/', '-')
- }
- track = channel + 'edge\\*'
- edge_rev = sh(returnStdout: true,
- script: "snapcraft revisions $snap | " +
- "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
- print "edge rev is $edge_rev"
- track = channel + 'beta\\*'
- beta_rev = sh(returnStdout: true,
- script: "snapcraft revisions $snap | " +
- "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
- print "beta rev is $beta_rev"
-
- if (edge_rev != beta_rev) {
- print "Promoting $edge_rev to beta in place of $beta_rev"
- beta_track = channel + 'beta'
- sh "snapcraft release $snap $edge_rev $beta_track"
- }
- }
- } // stage('Snap promotion')
} // if (params.DO_DOCKERPUSH)
} // stage('Archive')
} // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
} // dir(OSM_DEVOPS)
} finally {
- if ( params.DO_INSTALL && server_id != null) {
- delete_vm = true
- if (error && params.SAVE_CONTAINER_ON_FAIL ) {
- delete_vm = false
- }
- if (!error && params.SAVE_CONTAINER_ON_PASS ) {
- delete_vm = false
- }
+ // stage('Debug') {
+ // sleep 900
+ // }
+ stage('Archive Container Logs') {
+ if ( ARCHIVE_LOGS_FLAG ) {
+ try {
+ // Archive logs
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+ println('Archiving container logs')
+ archive_logs(remote)
+ } catch (Exception e) {
+ println('Error fetching logs: '+ e.getMessage())
+ }
+ } // end if ( ARCHIVE_LOGS_FLAG )
+ }
+ stage('Cleanup') {
+ if ( params.DO_INSTALL && server_id != null) {
+ delete_vm = true
+ if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+ delete_vm = false
+ }
+ if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+ delete_vm = false
+ }
- if ( delete_vm ) {
- if (server_id != null) {
- println("Deleting VM: $server_id")
- sh """#!/bin/sh -e
- for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
- openstack server delete ${server_id}
- """
- } else {
- println("Saved VM $server_id in ETSI VIM")
+ if ( delete_vm ) {
+ if (server_id != null) {
+ println("Deleting VM: $server_id")
+ sh """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server delete ${server_id}
+ """
+ } else {
+ println("Saved VM $server_id in ETSI VIM")
+ }
}
}
- }
- if ( http_server_name != null ) {
- sh "docker stop ${http_server_name} || true"
- sh "docker rm ${http_server_name} || true"
- }
+ if ( http_server_name != null ) {
+ sh "docker stop ${http_server_name} || true"
+ sh "docker rm ${http_server_name} || true"
+ }
- if ( devopstempdir != null ) {
- sh "rm -rf ${devopstempdir}"
+ if ( devopstempdir != null ) {
+ sh "rm -rf ${devopstempdir}"
+ }
}
}
}