* under the License.
*/
-/* Change log:
- * 1. Bug 745 : Jayant Madavi, Mrityunjay Yadav : JM00553988@techmahindra.com : 23-july-2019 : Improvement to the code, typically we have 2 * or more branches whose build gets triggered, ex master & release branch, the previous code was removing any/all docker.
- * Now removing previous docker of the same branch, so that the other branch failed docker should not be removed. It also
- * acts as clean-up for previous docker remove failure.
- * 2. Feature 7829 : Mrityunjay Yadav, Jayant Madavi: MY00514913@techmahindra.com : 19-Aug-2019 : Added a parameters & function to invoke Robot test.
- */
-
properties([
parameters([
string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
+ string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
])
])
sh """
export OSM_USE_LOCAL_DEVOPS=true
export PATH=$PATH:/snap/bin
- installers/full_install_osm.sh -y -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall
+ installers/full_install_osm.sh -y -c swarm -w /tmp/osm -t ${stackName} -s ${stackName} --test --nolxd --nodocker --nojuju --nohostports --nohostclient --uninstall
"""
}
junit '*.xml'
}
-def run_robot_systest(stackName,tagName,testName,envfile=null,kubeconfig=null,clouds=null) {
+def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus_port=null,envfile=null,kubeconfig=null,clouds=null,hostfile=null,jujuPassword=null) {
tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
if ( !envfile )
{
sh(script: "touch ${tempdir}/env")
envfile="${tempdir}/env"
}
- sh "docker run --network net${stackName} --env OSM_HOSTNAME=${stackName}_nbi --env PROMETHEUS_HOSTNAME=${stackName}_prometheus --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports opensourcemano/tests:${tagName} -c -t ${testName}"
- sh "cp ${tempdir}/* ."
- outputDirectory = sh(returnStdout: true, script: "pwd").trim()
- println ("Present Directory is : ${outputDirectory}")
- step([
- $class : 'RobotPublisher',
- outputPath : "${outputDirectory}",
- outputFileName : "*.xml",
- disableArchiveOutput : false,
- reportFileName : "report.html",
- logFileName : "log.html",
- passThreshold : 0,
- unstableThreshold: 0,
- otherFiles : "*.png",
- ])
+ PROMETHEUS_PORT_VAR = ""
+ if ( prometheusPort != null) {
+ PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
+ }
+ hostfilemount=""
+ if ( hostfile ) {
+ hostfilemount="-v "+hostfile+":/etc/hosts"
+ }
+
+ JUJU_PASSWORD_VAR = ""
+ if ( jujuPassword != null) {
+ JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
+ }
+
+ try {
+ sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
+ } finally {
+ sh "cp ${tempdir}/* ."
+ outputDirectory = sh(returnStdout: true, script: "pwd").trim()
+ println ("Present Directory is : ${outputDirectory}")
+ step([
+ $class : 'RobotPublisher',
+ outputPath : "${outputDirectory}",
+ outputFileName : "*.xml",
+ disableArchiveOutput : false,
+ reportFileName : "report.html",
+ logFileName : "log.html",
+ passThreshold : 0,
+ unstableThreshold: 0,
+ otherFiles : "*.png",
+ ])
+ }
}
-def archive_logs(stackName) {
- sh "docker service ls |grep \"${stackName}\"| awk '{print \$2}'| xargs -iy docker service logs y --timestamps > containers_logs.txt 2>&1"
- archiveArtifacts artifacts: 'containers_logs.txt'
+def archive_logs(remote) {
+
+-def archive_logs(stackName) {
+- sh "docker service ls |grep \"${stackName}\"| awk '{print \$2}'| xargs -iy docker service logs y --timestamps > containers_logs.txt 2>&1"
+- archiveArtifacts artifacts: 'containers_logs.txt'
+ sshCommand remote: remote, command: '''mkdir -p logs'''
+ if (useCharmedInstaller) {
+ sshCommand remote: remote, command: '''
+ for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+ logfile=`echo $container | cut -d- -f1`
+ echo "Extracting log for $logfile"
+ kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
+ done
+ '''
+ } else {
+ sshCommand remote: remote, command: '''
+ for service in `docker service ls| grep \"${stackName}\"| awk '{print \$2}`; do
+ echo "Extracting log for $service"
+ docker service logs $service --timestamps 2>&1 > logs/$service.log
+ done
+ '''
+ }
+
+ sh "rm -rf logs"
+ sshCommand remote: remote, command: '''ls -al logs'''
+ sshGet remote: remote, from: 'logs', into: '.', override: true
+ sh "cp logs/* ."
+ archiveArtifacts artifacts: '*.log'
+}
+
+def get_value(key, output) {
+ for (String line : output.split( '\n' )) {
+ data = line.split( '\\|' )
+ if (data.length > 1) {
+ if ( data[1].trim() == key ) {
+ return data[2].trim()
+ }
+ }
+ }
}
node("${params.NODE}") {
+ INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
+ INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
+ SSH_KEY = '~/hive/cicd_rsa'
sh 'env'
tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
// grab the build name/number
- //options = get_env_from_build('build.env')
build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
// grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
])
- //options = get_env_from_build('build.env')
- // grab the build name/number
- //build_num = sh(returnStdout:true, script: "cat build.env | awk -F= '/BUILD_NUMBER/{print \$2}'").trim()
build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
// copy the public key into the release folder
// this pulls the key from the home dir of the current user (jenkins)
+ sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
sh "cp ~/${REPO_KEY_NAME} ."
// merge the change logs
http_server_name = "${container_name}-apache"
pwd = sh(returnStdout:true, script: 'pwd').trim()
- repo_base_url = ci_helper.start_http_server(pwd,http_server_name)
+ repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
+ repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
+ NODE_IP_ADDRESS=sh(returnStdout: true, script:
+ "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
}
// now pull the devops package and install in temporary location
osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim()
sh "dpkg -x ${osm_devops_dpkg} ${tempdir}"
OSM_DEVOPS="${tempdir}/usr/share/osm-devops"
+ println("Repo base URL=${repo_base_url}")
}
dir(OSM_DEVOPS) {
+ def remote = [:]
error = null
+
if ( params.DO_BUILD ) {
stage("Build") {
sh "make -C docker clean"
sh "make -C docker -j `nproc` Q= CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}"
}
+
+ stage("Push to internal registry") {
+ withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
+ }
+ sh "make -C docker push INPUT_TAG=${container_name} TAG=${container_name} DOCKER_REGISTRY=${INTERNAL_DOCKER_REGISTRY}"
+ }
+
}
try {
+ useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
+
if ( params.DO_INSTALL ) {
- stage("Install") {
- //will by default always delete containers on complete
- //sh "jenkins/system/delete_old_containers.sh ${container_name_prefix}"
+ stage("Spawn Remote VM") {
+ println("Launching new VM")
+ output=sh(returnStdout: true, script: """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server create --flavor m1.xlarge \
+ --image ubuntu18.04 \
+ --key-name CICD \
+ --nic net-id=osm-ext \
+ ${container_name}
+ """).trim()
+
+ server_id = get_value('id', output)
+
+ if (server_id == null) {
+ println("VM launch output: ")
+ println(output)
+ throw new Exception("VM Launch failed")
+ }
+ println("Target VM is ${server_id}, waiting for IP address to be assigned")
+
+ IP_ADDRESS = ""
+ while (IP_ADDRESS == "") {
+ output=sh(returnStdout: true, script: """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server show ${server_id}
+ """).trim()
+ IP_ADDRESS = get_value('addresses', output)
+ }
+ IP_ADDRESS = IP_ADDRESS.split('=')[1]
+ println("Waiting for VM at ${IP_ADDRESS} to be reachable")
+
+ alive = false
+ while (! alive) {
+ output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
+ println("output is [$output]")
+ alive = output.contains("succeeded")
+ }
+ println("VM is ready and accepting ssh connections")
+ }
+
+ stage("Install") {
commit_id = ''
repo_distro = ''
repo_key_name = ''
{
repo_base_url = "-u ${params.REPOSITORY_BASE}"
}
- if ( params.DO_STAGE_4 ) {
- try {
- sh "docker stack list |grep \"${container_name_prefix}\"| awk '{ print \$1 }'| xargs docker stack rm"
- }
- catch (caughtError) {
- println("Caught error: docker stack rm failed!")
- }
- }
- sh """
- export PATH=$PATH:/snap/bin
- installers/full_install_osm.sh -y -s ${container_name} --test --nolxd --nodocker --nojuju --nohostports --nohostclient \
- --nodockerbuild -t ${container_name} \
- -w /tmp/osm \
- ${commit_id} \
- ${repo_distro} \
- ${repo_base_url} \
- ${repo_key_name} \
- ${release} \
- ${params.BUILD_FROM_SOURCE}
- """
+ else
+ {
+ repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
+ }
+
+ remote.name = container_name
+ remote.host = IP_ADDRESS
+ remote.user = 'ubuntu'
+ remote.identityFile = SSH_KEY
+ remote.allowAnyHosts = true
+ remote.logLevel = 'INFO'
+ remote.pty = true
+
+ sshCommand remote: remote, command: """
+ wget https://osm-download.etsi.org/ftp/osm-8.0-eight/install_osm.sh
+ chmod +x ./install_osm.sh
+ sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
+ """
+
+ if ( useCharmedInstaller ) {
+
+ // Use local proxy for docker hub
+ sshCommand remote: remote, command: '''
+ sudo snap install microk8s --classic --channel=1.19/stable
+ sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
+ sudo systemctl restart snap.microk8s.daemon-containerd.service
+ sudo snap alias microk8s.kubectl kubectl
+ '''
+
+ withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ sshCommand remote: remote, command: """
+ ./install_osm.sh -y \
+ ${repo_base_url} \
+ ${repo_key_name} \
+ ${release} -r unstable \
+ --charmed \
+ --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+ --tag ${container_name}
+ """
+ }
+ prometheusHostname = "prometheus."+IP_ADDRESS+".xip.io"
+ prometheusPort = 80
+ osmHostname = "nbi."+IP_ADDRESS+".xip.io:443"
+ } else {
+ // Run -k8s installer here specifying internal docker registry and docker proxy
+ withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ sshCommand remote: remote, command: """
+ ./install_osm.sh -y \
+ ${repo_base_url} \
+ ${repo_key_name} \
+ ${release} -r unstable \
+ -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+ -p ${INTERNAL_DOCKER_PROXY} \
+ -t ${container_name}
+ """
+ }
+ prometheusHostname = IP_ADDRESS
+ prometheusPort = 9091
+ osmHostname = IP_ADDRESS
+ }
}
}
stage_archive = false
if ( params.DO_SMOKE ) {
stage("OSM Health") {
- sh "installers/osm_health.sh -s ${container_name}"
- }
- stage("Smoke") {
- run_systest(container_name,container_name,"smoke")
- // archive smoke success until stage_4 is ready
-
- if ( ! currentBuild.result.equals('UNSTABLE') ) {
- stage_archive = keep_artifacts
- } else {
- error = new Exception("Smoke test failed")
- currentBuild.result = 'FAILURE'
- }
+ stackName = "osm"
+ sshCommand remote: remote, command: """
+ /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
+ """
}
}
if ( params.DO_STAGE_4 ) {
// override stage_archive to only archive on stable
stage_archive = false
- stage("System Integration Test") {
- if ( params.DO_ROBOT ) {
- run_robot_systest(container_name,container_name,params.TEST_NAME,params.ROBOT_VIM,params.KUBECONFIG,params.CLOUDS)
- } //else {
- run_systest(container_name,container_name,"openstack_stage_4",params.HIVE_VIM_1)
- //}
- // Archive logs to containers_logs.txt
- archive_logs(container_name)
- if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) {
- stage_archive = keep_artifacts
- } else {
- println ("Systest test failed, throwing error")
- error = new Exception("Systest test failed")
- currentBuild.result = 'FAILURE'
- throw error
- }
+ try {
+ stage("System Integration Test") {
+ if ( params.DO_ROBOT ) {
+ if( useCharmedInstaller ) {
+ tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+ sh(script: "touch ${tempdir}/hosts")
+ hostfile="${tempdir}/hosts"
+ sh """cat << EOF > ${hostfile}
+127.0.0.1 localhost
+${remote.host} prometheus.${remote.host}.xip.io nbi.${remote.host}.xip.io
+EOF"""
+ } else {
+ hostfile=null
+ }
+
+ jujuPassword=sshCommand remote: remote, command: """
+ echo `juju gui 2>&1 | grep password | cut -d: -f2`
+ """
+
+ run_robot_systest(
+ container_name,
+ params.TEST_NAME,
+ osmHostname,
+ prometheusHostname,
+ prometheusPort,
+ params.ROBOT_VIM,
+ params.KUBECONFIG,
+ params.CLOUDS,
+ hostfile,
+ jujuPassword)
+ }
+ }
+ } finally {
+ stage("Archive Container Logs") {
+ // Archive logs to containers_logs.txt
+ archive_logs(remote)
+ if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) {
+ stage_archive = keep_artifacts
+ } else {
+ println ("Systest test failed, throwing error")
+ error = new Exception("Systest test failed")
+ currentBuild.result = 'FAILURE'
+ throw error
+ }
+ }
}
}
stage("Docker Push") {
sh "make -C docker push INPUT_TAG=${container_name} TAG=${params.DOCKER_TAG}"
}
+
+ stage("Snap promotion") {
+ def snaps = ["osmclient"]
+ for (snap in snaps) {
+ channel=""
+ if (BRANCH_NAME.startsWith("v")) {
+ channel=BRANCH_NAME.substring(1)+"/"
+ } else if (BRANCH_NAME!="master") {
+ channel+="/"+BRANCH_NAME.replaceAll('/','-')
+ }
+ track=channel+"edge\\*"
+ edge_rev=sh(returnStdout: true,
+ script: "sudo docker run -v ~/.snapcraft:/snapcraft -v ${WORKSPACE}:/build " +
+ "-w /build snapcore/snapcraft:stable /bin/bash -c " +
+ "\"snapcraft login --with /snapcraft/config &>/dev/null && " +
+ "snapcraft revisions $snap\" | " +
+ "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+ track=channel+"beta\\*"
+ beta_rev=sh(returnStdout: true,
+ script: "sudo docker run -v ~/.snapcraft:/snapcraft -v ${WORKSPACE}:/build " +
+ "-w /build snapcore/snapcraft:stable /bin/bash -c " +
+ "\"snapcraft login --with /snapcraft/config &>/dev/null && " +
+ "snapcraft revisions $snap\" | " +
+ "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
+
+ if ( edge_rev != beta_rev ) {
+ print "Promoting $edge_rev to beta in place of $beta_rev"
+ beta_track=channel+"beta"
+ sh("sudo docker run -v ~/.snapcraft:/snapcraft -v ${WORKSPACE}:/build " +
+ "-w /build snapcore/snapcraft:stable /bin/bash -c " +
+ "\"snapcraft login --with /snapcraft/config &>/dev/null && " +
+ "snapcraft release $snap $edge_rev $beta_track\"")
+ }
+ }
+ }
}
}
}
catch(Exception ex) {
error = ex
currentBuild.result = 'FAILURE'
- println("Caught error")
- println(ex.getMessage())
+ println("Caught error: "+ex)
}
finally {
- if ( params.DO_INSTALL ) {
- if (error) {
- if ( !params.SAVE_CONTAINER_ON_FAIL ) {
- uninstall_osm container_name
- sh "docker stop ${http_server_name} || true"
- sh "docker rm ${http_server_name} || true"
- }
+ println("Entered finally block")
+ if ( params.DO_INSTALL && server_id != null) {
+ delete_vm = true
+ if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+ delete_vm = false
}
- else {
- if ( !params.SAVE_CONTAINER_ON_PASS ) {
- uninstall_osm container_name
- sh "docker stop ${http_server_name} || true"
- sh "docker rm ${http_server_name} || true"
+ if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+ delete_vm = false
+ }
+
+ if ( delete_vm ) {
+ if (server_id != null) {
+ println("Deleting VM: $server_id")
+ sh """#!/bin/sh -e
+ for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+ openstack server delete ${server_id}
+ """
+ } else {
+ println("Saved VM $server_id in ETSI VIM")
}
}
}
+ sh "docker stop ${http_server_name} || true"
+ sh "docker rm ${http_server_name} || true"
}
}
}