Run stage_3 install in new VM 13/10013/5
authorbeierlm <mark.beierl@canonical.com>
Thu, 19 Nov 2020 16:39:36 +0000 (11:39 -0500)
committerbeierlm <mark.beierl@canonical.com>
Wed, 25 Nov 2020 11:58:47 +0000 (12:58 +0100)
Adds a parameter for installer to use in test.
Adds prometheus port to robot test script.
Adds hostname mapping file for robot container to avoid
use of xip.io as it can sometimes fail
Wraps execution of robot container in try block so we
can collect log files even if it fails.
Changes collection of container logs to use SSH and
SCP to pull logs back to Jenkins for archival.
Changes the Apache HTTPD to run on a specific port so
remote VM can access it.
After docker build, Pushes docker images to GitLab
at osm.etsi.org:5050/cicd/devops
Launches new VM in ETSI VIM to run the installer.
Changes install to use officially published installer
from osm-download.etsi.org.
Runs installer in new VM using images from GitLab.
Sets K8s cluster to dockerhub images from internal
OSM1 dockerhub proxy.
Changes cleanup to remove VM instead of uninstalling OSM.
Ensures HTTP server is removed on job completion.
Change makefile to use a registry for push if supplied.

Change-Id: I430173fb0b01720fa29fd971f924ff908ce69fbf
Signed-off-by: beierlm <mark.beierl@canonical.com>
docker/mk/Makefile.include
jenkins/ci-pipelines/ci_helper.groovy
jenkins/ci-pipelines/ci_stage_3.groovy

index e151748..e1a6aff 100644 (file)
@@ -26,6 +26,7 @@ RELEASE         ?= ReleaseEIGHT-daily
 REPOSITORY_KEY  ?= OSM%20ETSI%20Release%20Key.gpg
 REPOSITORY      ?= testing
 NO_CACHE        ?= --no-cache
+DOCKER_REGISTRY     ?= ""
 
 LOWER_MDG = $(shell echo $(MDG) | tr '[:upper:]' '[:lower:]')
 
@@ -81,7 +82,7 @@ clean:
        rm -f $(MKBUILD)/.dep*
 
 tag:
-       docker tag opensourcemano/$(CONTAINER_NAME):$(INPUT_TAG) opensourcemano/$(LOWER_MDG):$(TAG)
+       docker tag opensourcemano/$(CONTAINER_NAME):$(INPUT_TAG) $(DOCKER_REGISTRY)opensourcemano/$(LOWER_MDG):$(TAG)
 
 push: tag
-       docker push opensourcemano/$(LOWER_MDG):$(TAG)
+       docker push $(DOCKER_REGISTRY)opensourcemano/$(LOWER_MDG):$(TAG)
index 1e724ff..a2ededc 100644 (file)
@@ -55,8 +55,8 @@ def lxc_file_push(container_name,file,destination) {
 
 // start a http server
 // return the http server URL
-def start_http_server(repo_dir,server_name) {
-    sh "docker run -dit --name ${server_name} -v ${repo_dir}:/usr/local/apache2/htdocs/ httpd:2.4"
+def start_http_server(repo_dir,server_name,port) {
+    sh "docker run -dit --name ${server_name} -p ${port}:80 -v ${repo_dir}:/usr/local/apache2/htdocs/ httpd:2.4"
     def http_server_ip = sh(returnStdout:true,  script: "docker inspect --format '{{ .NetworkSettings.IPAddress }}' ${server_name}").trim()
     return "http://${http_server_ip}/"
 }
index d87ad95..6523deb 100644 (file)
  *   under the License.
  */
 
-/* Change log:
- * 1. Bug 745 : Jayant Madavi, Mrityunjay Yadav : JM00553988@techmahindra.com : 23-july-2019 : Improvement to the code, typically we have 2 *    or more branches whose build gets triggered, ex master & release branch, the previous code was removing any/all docker.
- *       Now removing previous docker of the same branch, so that the other branch failed docker should not be removed. It also
- *    acts as clean-up for previous docker remove failure.
- * 2. Feature 7829 : Mrityunjay Yadav, Jayant Madavi: MY00514913@techmahindra.com : 19-Aug-2019 : Added a parameters & function to invoke Robot test.
- */
-
 properties([
     parameters([
         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
@@ -54,6 +47,7 @@ properties([
         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
+        string(defaultValue: 'Charmed', description: '', name: 'INSTALLER'),
     ])
 ])
 
@@ -77,38 +71,79 @@ def run_systest(stackName,tagName,testName,envfile=null) {
     junit  '*.xml'
 }
 
-def run_robot_systest(stackName,tagName,testName,envfile=null,kubeconfig=null,clouds=null) {
+def run_robot_systest(tagName,testName,osmHostname,prometheusHostname,prometheus_port=null,envfile=null,kubeconfig=null,clouds=null,hostfile=null) {
     tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
     if ( !envfile )
     {
         sh(script: "touch ${tempdir}/env")
         envfile="${tempdir}/env"
     }
-    sh "docker run --network net${stackName} --env OSM_HOSTNAME=${stackName}_nbi --env PROMETHEUS_HOSTNAME=${stackName}_prometheus --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports opensourcemano/tests:${tagName} -c -t ${testName}"
-    sh "cp ${tempdir}/* ."
-    outputDirectory = sh(returnStdout: true, script: "pwd").trim()
-    println ("Present Directory is : ${outputDirectory}")
-    step([
-        $class : 'RobotPublisher',
-        outputPath : "${outputDirectory}",
-        outputFileName : "*.xml",
-        disableArchiveOutput : false,
-        reportFileName : "report.html",
-        logFileName : "log.html",
-        passThreshold : 0,
-        unstableThreshold: 0,
-        otherFiles : "*.png",
-    ])
+    PROMETHEUS_PORT_VAR = ""
+    if ( prometheusPort != null) {
+        PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
+    }
+    hostfilemount=""
+    if ( hostfile ) {
+        hostfilemount="-v "+hostfile+":/etc/hosts"
+    }
+
+    try {
+        sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
+    } finally {
+        sh "cp ${tempdir}/* ."
+        outputDirectory = sh(returnStdout: true, script: "pwd").trim()
+        println ("Present Directory is : ${outputDirectory}")
+        step([
+            $class : 'RobotPublisher',
+            outputPath : "${outputDirectory}",
+            outputFileName : "*.xml",
+            disableArchiveOutput : false,
+            reportFileName : "report.html",
+            logFileName : "log.html",
+            passThreshold : 0,
+            unstableThreshold: 0,
+            otherFiles : "*.png",
+        ])
+    }
 }
 
-def archive_logs(stackName) {
-    sh "docker service ls |grep \"${stackName}\"| awk '{print \$2}' | xargs -iy docker ps -af name=y  --format \"{{.ID}} {{.Names}}\" --no-trunc | awk '{ print \"sudo cp /var/lib/docker/containers/\"\$1\"/\"\$1\"-json.log \"\$2\".log\"}' | xargs -iy bash -c y"
-    sh "sudo chown jenkins: osm*.log"
+def archive_logs(remote) {
+
+    sshCommand remote: remote, command: '''mkdir -p logs'''
+    if (useCharmedInstaller) {
+        sshCommand remote: remote, command: '''
+            for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+                logfile=`echo $container | cut -d- -f1`
+                echo "Extracting log for $logfile"
+                kubectl logs -n osm $container 2>&1 > logs/$logfile.log
+            done
+        '''
+    } else {
+        // collect logs from k8s based installer...
+    }
+
+    sh "rm -rf logs"
+    sshCommand remote: remote, command: '''ls -al logs'''
+    sshGet remote: remote, from: 'logs', into: '.', override: true
+    sh "cp logs/* ."
     archiveArtifacts artifacts: '*.log'
 }
 
+def get_value(key, output) {
+    for (String line : output.split( '\n' )) {
+        data = line.split( '\\|' )
+        if (data.length > 1) {
+            if ( data[1].trim() == key ) {
+                return data[2].trim()
+            }
+        }
+    }
+}
+
 node("${params.NODE}") {
 
+    INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
+    SSH_KEY = '~/hive/cicd_rsa'
     sh 'env'
 
     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
@@ -149,7 +184,6 @@ node("${params.NODE}") {
                            projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
 
                     // grab the build name/number
-                    //options = get_env_from_build('build.env')
                     build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
 
                     // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
@@ -168,9 +202,6 @@ node("${params.NODE}") {
                            selector: [$class: 'SpecificBuildSelector', buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
                           ])
 
-                    //options = get_env_from_build('build.env')
-                    // grab the build name/number
-                    //build_num = sh(returnStdout:true,  script: "cat build.env | awk -F= '/BUILD_NUMBER/{print \$2}'").trim()
                     build_num = ci_helper.get_env_value('build.env','BUILD_NUMBER')
                     component = ci_helper.get_mdg_from_project(ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
 
@@ -203,6 +234,7 @@ node("${params.NODE}") {
 
                 // copy the public key into the release folder
                 // this pulls the key from the home dir of the current user (jenkins)
+                sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
                 sh "cp ~/${REPO_KEY_NAME} ."
 
                 // merge the change logs
@@ -216,7 +248,10 @@ node("${params.NODE}") {
             http_server_name = "${container_name}-apache"
 
             pwd = sh(returnStdout:true,  script: 'pwd').trim()
-            repo_base_url = ci_helper.start_http_server(pwd,http_server_name)
+            repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
+            repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
+            NODE_IP_ADDRESS=sh(returnStdout: true, script:
+                "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
         }
 
         // now pull the devops package and install in temporary location
@@ -224,24 +259,76 @@ node("${params.NODE}") {
         osm_devops_dpkg = sh(returnStdout: true, script: "find . -name osm-devops*.deb").trim()
         sh "dpkg -x ${osm_devops_dpkg} ${tempdir}"
         OSM_DEVOPS="${tempdir}/usr/share/osm-devops"
+        println("Repo base URL=${repo_base_url}")
     }
 
     dir(OSM_DEVOPS) {
+        def remote = [:]
         error = null
+
         if ( params.DO_BUILD ) {
             stage("Build") {
                 sh "make -C docker clean"
                 sh "make -C docker -j `nproc` Q= CMD_DOCKER_ARGS= TAG=${container_name} RELEASE=${params.RELEASE} REPOSITORY_BASE=${repo_base_url} REPOSITORY_KEY=${params.REPO_KEY_NAME} REPOSITORY=${params.REPO_DISTRO}"
             }
+
+            stage("Push to internal registry") {
+                withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+                                usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+                    sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
+                }
+                sh "make -C docker push INPUT_TAG=${container_name} TAG=${container_name} DOCKER_REGISTRY=${INTERNAL_DOCKER_REGISTRY}"
+            }
+
         }
 
         try {
+            useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
+
             if ( params.DO_INSTALL ) {
-                stage("Install") {
 
-                    //will by default always delete containers on complete
-                    //sh "jenkins/system/delete_old_containers.sh ${container_name_prefix}"
+                stage("Spawn Remote VM") {
+                    println("Launching new VM")
+                    output=sh(returnStdout: true, script: """#!/bin/sh -e
+                        for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+                        openstack server create --flavor m1.xlarge \
+                                                --image ubuntu18.04 \
+                                                --key-name CICD \
+                                                --nic net-id=osm-ext \
+                                                ${container_name}
+                    """).trim()
+
+                    server_id = get_value('id', output)
+
+                    if (server_id == null) {
+                        println("VM launch output: ")
+                        println(output)
+                        throw new Exception("VM Launch failed")
+                    }
+                    println("Target VM is ${server_id}, waiting for IP address to be assigned")
+
+                    IP_ADDRESS = ""
 
+                    while (IP_ADDRESS == "") {
+                        output=sh(returnStdout: true, script: """#!/bin/sh -e
+                            for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+                            openstack server show ${server_id}
+                        """).trim()
+                        IP_ADDRESS = get_value('addresses', output)
+                    }
+                    IP_ADDRESS = IP_ADDRESS.split('=')[1]
+                    println("Waiting for VM at ${IP_ADDRESS} to be reachable")
+
+                    alive = false
+                    while (! alive) {
+                        output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
+                        println("output is [$output]")
+                        alive = output.contains("succeeded")
+                    }
+                    println("VM is ready and accepting ssh connections")
+                }
+
+                stage("Install") {
                     commit_id = ''
                     repo_distro = ''
                     repo_key_name = ''
@@ -271,65 +358,114 @@ node("${params.NODE}") {
                     {
                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
                     }
-                    if ( params.DO_STAGE_4 ) {
-                        try {
-                        sh "docker stack list |grep \"${container_name_prefix}\"|  awk '{ print \$1 }'| xargs docker stack rm"
-                        }
-                        catch (caughtError) {
-                          println("Caught error: docker stack rm failed!")
+                    else
+                    {
+                        repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
+                    }
+
+                    remote.name = container_name
+                    remote.host = IP_ADDRESS
+                    remote.user = 'ubuntu'
+                    remote.identityFile = SSH_KEY
+                    remote.allowAnyHosts = true
+                    remote.logLevel = 'INFO'
+
+                    sshCommand remote: remote, command: """
+                        wget https://osm-download.etsi.org/ftp/osm-8.0-eight/install_osm.sh
+                        chmod +x ./install_osm.sh
+                    """
+
+                    if ( useCharmedInstaller ) {
+
+                        // Use local proxy for docker hub
+                        sshCommand remote: remote, command: '''
+                            sudo snap install microk8s --classic
+                            sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
+                            sudo systemctl restart snap.microk8s.daemon-containerd.service
+                            sudo snap alias microk8s.kubectl kubectl
+                            echo export PATH=/snap/bin:\${PATH} > ~/.bashrc
+                        '''
+
+                        withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
+                                        usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+                            sshCommand remote: remote, command: """
+                                ./install_osm.sh -y \
+                                    ${repo_base_url} \
+                                    ${repo_key_name} \
+                                    ${release} -r unstable \
+                                    --charmed  \
+                                    --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
+                                    --tag ${container_name}
+                            """
                         }
+                        prometheusHostname = "prometheus."+IP_ADDRESS+".xip.io"
+                        prometheusPort = 80
+                        osmHostname = "nbi."+IP_ADDRESS+".xip.io:443"
+                    } else {
+                        // Run -k8s installer here
+                        // Update to use 172.21.1.1 as the dockerhub proxy
+                        // Specify registry to use for installer
+                        // set osmHostname, prometheusHostname, prometheusPort as needed
                     }
-                    sh """
-                        export PATH=$PATH:/snap/bin
-                        installers/full_install_osm.sh -y -s ${container_name} --test --nolxd --nodocker --nojuju --nohostports --nohostclient \
-                                                        --nodockerbuild -t ${container_name} \
-                                                        -w /tmp/osm \
-                                                        ${commit_id} \
-                                                        ${repo_distro} \
-                                                        ${repo_base_url} \
-                                                        ${repo_key_name} \
-                                                        ${release} \
-                                                        ${params.BUILD_FROM_SOURCE}
-                       """
                 }
             }
 
             stage_archive = false
             if ( params.DO_SMOKE ) {
                 stage("OSM Health") {
-                    sh "installers/osm_health.sh -s ${container_name}"
-                }
-                stage("Smoke") {
-                    run_systest(container_name,container_name,"smoke")
-                    // archive smoke success until stage_4 is ready
-
-                    if ( ! currentBuild.result.equals('UNSTABLE') ) {
-                        stage_archive = keep_artifacts
+                    if ( useCharmedInstaller ) {
+                        stackName = "osm"
                     } else {
-                       error = new Exception("Smoke test failed")
-                       currentBuild.result = 'FAILURE'
+                        stackName = container_name
                     }
+                    sshCommand remote: remote, command: """
+                        /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
+                    """
                 }
             }
 
             if ( params.DO_STAGE_4 ) {
                 // override stage_archive to only archive on stable
                 stage_archive = false
-                stage("System Integration Test") {
-                    if ( params.DO_ROBOT ) {
-                        run_robot_systest(container_name,container_name,params.TEST_NAME,params.ROBOT_VIM,params.KUBECONFIG,params.CLOUDS)
-                    } //else {
-                    run_systest(container_name,container_name,"openstack_stage_4",params.HIVE_VIM_1)
-                    //}
-                    // Archive logs to containers_logs.txt
-                    archive_logs(container_name)
-                    if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) {
-                        stage_archive = keep_artifacts
-                    } else {
-                       println ("Systest test failed, throwing error")
-                       error = new Exception("Systest test failed")
-                       currentBuild.result = 'FAILURE'
-                       throw error
+                try {
+                    stage("System Integration Test") {
+                        if ( params.DO_ROBOT ) {
+                            if( useCharmedInstaller ) {
+                                tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+                                sh(script: "touch ${tempdir}/hosts")
+                                hostfile="${tempdir}/hosts"
+                                sh """cat << EOF > ${hostfile}
+127.0.0.1           localhost
+${remote.host}      prometheus.${remote.host}.xip.io nbi.${remote.host}.xip.io
+EOF"""
+                            } else {
+                                hostfile=null
+                            }
+
+                            run_robot_systest(
+                                container_name,
+                                params.TEST_NAME,
+                                osmHostname,
+                                prometheusHostname,
+                                prometheusPort,
+                                params.ROBOT_VIM,
+                                params.KUBECONFIG,
+                                params.CLOUDS,
+                                hostfile)
+                        }
+                    }
+                } finally {
+                    stage("Archive Contailer Logs") {
+                        // Archive logs to containers_logs.txt
+                        archive_logs(remote)
+                        if ( ! currentBuild.result.equals('UNSTABLE') && ! currentBuild.result.equals('FAILURE')) {
+                            stage_archive = keep_artifacts
+                        } else {
+                            println ("Systest test failed, throwing error")
+                            error = new Exception("Systest test failed")
+                            currentBuild.result = 'FAILURE'
+                            throw error
+                        }
                     }
                 }
             }
@@ -390,26 +526,33 @@ node("${params.NODE}") {
         catch(Exception ex) {
             error = ex
             currentBuild.result = 'FAILURE'
-            println("Caught error")
-            println(ex.getMessage())
+            println("Caught error: "+ex)
         }
         finally {
-            if ( params.DO_INSTALL ) {
-                if (error) {
-                    if ( !params.SAVE_CONTAINER_ON_FAIL ) {
-                        uninstall_osm container_name
-                        sh "docker stop ${http_server_name} || true"
-                        sh "docker rm ${http_server_name} || true"
-                    }
+            println("Entered finally block")
+            if ( params.DO_INSTALL && server_id != null) {
+                delete_vm = true
+                if (error && params.SAVE_CONTAINER_ON_FAIL ) {
+                    delete_vm = false
+                }
+                if (!error && params.SAVE_CONTAINER_ON_PASS ) {
+                    delete_vm = false
                 }
-                else {
-                    if ( !params.SAVE_CONTAINER_ON_PASS ) {
-                        uninstall_osm container_name
-                        sh "docker stop ${http_server_name} || true"
-                        sh "docker rm ${http_server_name} || true"
+
+                if ( delete_vm ) {
+                    if (server_id != null) {
+                        println("Deleting VM: $server_id")
+                        sh """#!/bin/sh -e
+                            for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
+                            openstack server delete ${server_id}
+                        """
+                    } else {
+                        println("Saved VM $server_id in ETSI VIM")
                     }
                 }
             }
+            sh "docker stop ${http_server_name} || true"
+            sh "docker rm ${http_server_name} || true"
         }
     }
-}
+}
\ No newline at end of file