Adding logging around repo
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
31         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
32         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
33         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
34         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
35         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
37         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
38         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
39         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
41         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
42         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
43         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
44         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
45         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
48         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
49         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
50         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
51         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
52         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
53     ])
54 ])
55
56
57 ////////////////////////////////////////////////////////////////////////////////////////
58 // Helper Functions
59 ////////////////////////////////////////////////////////////////////////////////////////
60 void run_robot_systest(String tagName,
61                        String testName,
62                        String osmHostname,
63                        String prometheusHostname,
64                        Integer prometheusPort=null,
65                        String envfile=null,
66                        String portmappingfile=null,
67                        String kubeconfig=null,
68                        String clouds=null,
69                        String hostfile=null,
70                        String jujuPassword=null,
71                        String osmRSAfile=null,
72                        String pass_th='0.0',
73                        String unstable_th='0.0') {
74     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
75     if ( !envfile ) {
76         sh(script: "touch ${tempdir}/env")
77         envfile="${tempdir}/env"
78     }
79     PROMETHEUS_PORT_VAR = ""
80     if ( prometheusPort != null) {
81         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
82     }
83     hostfilemount=""
84     if ( hostfile ) {
85         hostfilemount="-v "+hostfile+":/etc/hosts"
86     }
87
88     JUJU_PASSWORD_VAR = ""
89     if ( jujuPassword != null) {
90         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
91     }
92
93     try {
94         sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
95     } finally {
96         sh "cp ${tempdir}/* ."
97         outputDirectory = sh(returnStdout: true, script: "pwd").trim()
98         println ("Present Directory is : ${outputDirectory}")
99         step([
100             $class : 'RobotPublisher',
101             outputPath : "${outputDirectory}",
102             outputFileName : "*.xml",
103             disableArchiveOutput : false,
104             reportFileName : "report.html",
105             logFileName : "log.html",
106             passThreshold : pass_th,
107             unstableThreshold: unstable_th,
108             otherFiles : "*.png",
109         ])
110     }
111 }
112
113 def archive_logs(remote) {
114
115     sshCommand remote: remote, command: '''mkdir -p logs'''
116     if (useCharmedInstaller) {
117         sshCommand remote: remote, command: '''
118             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
119                 logfile=`echo $container | cut -d- -f1`
120                 echo "Extracting log for $logfile"
121                 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
122             done
123         '''
124     } else {
125         sshCommand remote: remote, command: '''
126             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
127                 echo "Extracting log for $deployment"
128                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
129             done
130         '''
131         sshCommand remote: remote, command: '''
132             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
133                 echo "Extracting log for $statefulset"
134                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
135             done
136         '''
137     }
138
139     sh "rm -rf logs"
140     sshCommand remote: remote, command: '''ls -al logs'''
141     sshGet remote: remote, from: 'logs', into: '.', override: true
142     sh "cp logs/* ."
143     archiveArtifacts artifacts: '*.log'
144 }
145
146 def get_value(key, output) {
147     for (String line : output.split( '\n' )) {
148         data = line.split( '\\|' )
149         if (data.length > 1) {
150             if ( data[1].trim() == key ) {
151                 return data[2].trim()
152             }
153         }
154     }
155 }
156
157 ////////////////////////////////////////////////////////////////////////////////////////
158 // Main Script
159 ////////////////////////////////////////////////////////////////////////////////////////
160 node("${params.NODE}") {
161
162     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
163     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
164     SSH_KEY = '~/hive/cicd_rsa'
165     sh 'env'
166
167     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
168
169     stage("Checkout") {
170         checkout scm
171     }
172
173     ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
174
175     def upstream_main_job = params.UPSTREAM_SUFFIX
176
177     // upstream jobs always use merged artifacts
178     upstream_main_job += '-merge'
179     container_name_prefix = "osm-${tag_or_branch}"
180     container_name = "${container_name_prefix}"
181
182     keep_artifacts = false
183     if ( JOB_NAME.contains('merge') ) {
184         container_name += "-merge"
185
186         // On a merge job, we keep artifacts on smoke success
187         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
188     }
189     container_name += "-${BUILD_NUMBER}"
190
191     server_id = null
192     http_server_name = null
193     devopstempdir = null
194     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
195
196     try {
197         builtModules = [:]
198 ///////////////////////////////////////////////////////////////////////////////////////
199 // Fetch stage 2 .deb artifacts
200 ///////////////////////////////////////////////////////////////////////////////////////
201         stage("Copy Artifacts") {
202             // cleanup any previous repo
203             sh "tree -fD repo || exit 0"
204             sh 'rm -rvf repo'
205             sh "tree -fD repo && lsof repo || exit 0"
206             dir("repo") {
207                 packageList = []
208                 dir("${RELEASE}") {
209                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
210
211                     // check if an upstream artifact based on specific build number has been requested
212                     // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
213                     // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
214                     def upstreamComponent=""
215                     if ( params.UPSTREAM_JOB_NAME ) {
216                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
217                         lock('Artifactory') {
218                             step ([$class: 'CopyArtifact',
219                                 projectName: "${params.UPSTREAM_JOB_NAME}",
220                                 selector: [$class: 'SpecificBuildSelector',
221                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
222                                 ])
223
224                             upstreamComponent = ci_helper.get_mdg_from_project(
225                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
226                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
227                             dir("$upstreamComponent") {
228                                 // the upstream job name contains suffix with the project. Need this stripped off
229                                 def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
230                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
231                                     upstreamComponent,
232                                     GERRIT_BRANCH,
233                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
234                                     buildNumber)
235
236                                 packageList.addAll(packages)
237                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
238                             }
239                         } // lock artifactory
240                     }
241
242                     parallelSteps = [:]
243                     def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
244                     if (upstreamComponent.length()>0) {
245                         println("Skipping upstream fetch of "+upstreamComponent)
246                         list.remove(upstreamComponent)
247                     }
248                     for (buildStep in list) {
249                         def component = buildStep
250                         parallelSteps[component] = {
251                             dir("$component") {
252                                 println("Fetching artifact for ${component}")
253                                 step ([$class: 'CopyArtifact',
254                                        projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
255
256                                 // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
257                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
258                                     component,
259                                     GERRIT_BRANCH,
260                                     "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
261                                     ci_helper.get_env_value('build.env','BUILD_NUMBER'))
262                                 packageList.addAll(packages)
263                                 println("Fetched ${component}: ${packages}")
264                                 sh "rm -rf dists"
265                             }
266                         }
267                     }
268                     lock('Artifactory') {
269                         parallel parallelSteps
270                     }
271
272 ///////////////////////////////////////////////////////////////////////////////////////
273 // Create Devops APT repository
274 ///////////////////////////////////////////////////////////////////////////////////////
275                     sh "mkdir -p pool"
276                     for (component in [ "devops", "IM", "osmclient" ]) {
277                         sh "ls -al ${component}/pool/"
278                         sh "cp -r ${component}/pool/* pool/"
279                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
280                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
281                         sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
282                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
283                     }
284
285                     // create and sign the release file
286                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
287                     sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
288
289                     // copy the public key into the release folder
290                     // this pulls the key from the home dir of the current user (jenkins)
291                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
292                     sh "cp ~/${REPO_KEY_NAME} ."
293                 }
294
295                 // start an apache server to serve up the packages
296                 http_server_name = "${container_name}-apache"
297
298                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
299                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
300                                'print(s.getsockname()[1]); s.close()\');',
301                                returnStdout: true).trim()
302                 internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
303                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
304                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
305                 ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
306             }
307
308             sh "tree -fD repo"
309
310             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
311             osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
312             devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
313             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
314             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
315             OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
316             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
317             for (remotePath in packageList) {
318                 packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
319                 packageName=packageName.substring(0,packageName.indexOf('_'))
320                 builtModules[packageName]=remotePath
321             }
322         }
323
324 ///////////////////////////////////////////////////////////////////////////////////////
325 // Build docker containers
326 ///////////////////////////////////////////////////////////////////////////////////////
327         dir(OSM_DEVOPS) {
328             def remote = [:]
329             error = null
330             if ( params.DO_BUILD ) {
331                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
332                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
333                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
334                 }
335                 moduleBuildArgs = ""
336                 for (packageName in builtModules.keySet()) {
337                     envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
338                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
339                 }
340                 dir ("docker") {
341                     stage("Build") {
342                         containerList = sh(returnStdout: true, script:
343                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
344                         containerList=Arrays.asList(containerList.split("\n"))
345                         print(containerList)
346                         parallelSteps = [:]
347                         for (buildStep in containerList) {
348                             def module = buildStep
349                             def moduleName = buildStep.toLowerCase()
350                             def moduleTag = container_name
351                             parallelSteps[module] = {
352                                 dir("$module") {
353                                     sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
354                                     println("Tagging ${moduleName}:${moduleTag}")
355                                     sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
356                                     sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
357                                 }
358                             }
359                         }
360                         parallel parallelSteps
361                     }
362                 }
363             } // if ( params.DO_BUILD )
364
365             if ( params.DO_INSTALL ) {
366 ///////////////////////////////////////////////////////////////////////////////////////
367 // Launch VM
368 ///////////////////////////////////////////////////////////////////////////////////////
369                 stage("Spawn Remote VM") {
370                     println("Launching new VM")
371                     output=sh(returnStdout: true, script: """#!/bin/sh -e
372                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
373                         openstack server create --flavor osm.sanity \
374                                                 --image ${OPENSTACK_BASE_IMAGE} \
375                                                 --key-name CICD \
376                                                 --property build_url="${BUILD_URL}" \
377                                                 --nic net-id=osm-ext \
378                                                 ${container_name}
379                     """).trim()
380
381                     server_id = get_value('id', output)
382
383                     if (server_id == null) {
384                         println("VM launch output: ")
385                         println(output)
386                         throw new Exception("VM Launch failed")
387                     }
388                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
389
390                     IP_ADDRESS = ""
391
392                     while (IP_ADDRESS == "") {
393                         output=sh(returnStdout: true, script: """#!/bin/sh -e
394                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
395                             openstack server show ${server_id}
396                         """).trim()
397                         IP_ADDRESS = get_value('addresses', output)
398                     }
399                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
400                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
401
402                     alive = false
403                     timeout(time: 1, unit: 'MINUTES') {
404                         while (!alive) {
405                             output = sh(
406                                 returnStatus: true,
407                                 script: "ssh -T -i ${SSH_KEY} " +
408                                     "-o StrictHostKeyChecking=no " +
409                                     "-o UserKnownHostsFile=/dev/null " +
410                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
411                             alive = (output == 0)
412                         }
413                     }
414                     println("VM is ready and accepting ssh connections")
415                 } // stage("Spawn Remote VM")
416
417 ///////////////////////////////////////////////////////////////////////////////////////
418 // Installation
419 ///////////////////////////////////////////////////////////////////////////////////////
420                 stage("Install") {
421                     commit_id = ''
422                     repo_distro = ''
423                     repo_key_name = ''
424                     release = ''
425
426                     if ( params.COMMIT_ID )
427                     {
428                         commit_id = "-b ${params.COMMIT_ID}"
429                     }
430
431                     if ( params.REPO_DISTRO )
432                     {
433                         repo_distro = "-r ${params.REPO_DISTRO}"
434                     }
435
436                     if ( params.REPO_KEY_NAME )
437                     {
438                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
439                     }
440
441                     if ( params.RELEASE )
442                     {
443                         release = "-R ${params.RELEASE}"
444                     }
445
446                     if ( params.REPOSITORY_BASE )
447                     {
448                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
449                     }
450                     else
451                     {
452                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
453                     }
454
455                     remote.name = container_name
456                     remote.host = IP_ADDRESS
457                     remote.user = 'ubuntu'
458                     remote.identityFile = SSH_KEY
459                     remote.allowAnyHosts = true
460                     remote.logLevel = 'INFO'
461                     remote.pty = true
462
463                     // Ensure the VM is ready
464                     sshCommand remote: remote, command: 'cloud-init status --wait'
465
466                     // Force time sync to avoid clock drift and invalid certificates
467                     sshCommand remote: remote, command: """
468                         sudo apt update
469                         sudo apt install -y chrony
470                         sudo service chrony stop
471                         sudo chrony -vq
472                         sudo service chrony start
473                     """
474
475                     sshCommand remote: remote, command: """
476                         wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh
477                         chmod +x ./install_osm.sh
478                         sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
479                     """
480
481                     if ( useCharmedInstaller ) {
482                         // Use local proxy for docker hub
483                         sshCommand remote: remote, command: '''
484                             sudo snap install microk8s --classic --channel=1.19/stable
485                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
486                             sudo systemctl restart snap.microk8s.daemon-containerd.service
487                             sudo snap alias microk8s.kubectl kubectl
488                         '''
489
490                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
491                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
492                             sshCommand remote: remote, command: """
493                                 ./install_osm.sh -y \
494                                     ${repo_base_url} \
495                                     ${repo_key_name} \
496                                     ${release} -r unstable \
497                                     --charmed  \
498                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
499                                     --tag ${container_name}
500                             """
501                         }
502                         prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
503                         prometheusPort = 80
504                         osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
505                     } else {
506                         // Run -k8s installer here specifying internal docker registry and docker proxy
507                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
508                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
509                             sshCommand remote: remote, command: """
510                                 ./install_osm.sh -y \
511                                     ${repo_base_url} \
512                                     ${repo_key_name} \
513                                     ${release} -r unstable \
514                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
515                                     -p ${INTERNAL_DOCKER_PROXY} \
516                                     -t ${container_name} \
517                                     --nocachelxdimages
518                             """
519                         }
520                         prometheusHostname = IP_ADDRESS
521                         prometheusPort = 9091
522                         osmHostname = IP_ADDRESS
523                     }
524                 } // stage("Install")
525 ///////////////////////////////////////////////////////////////////////////////////////
526 // Health check of installed OSM in remote vm
527 ///////////////////////////////////////////////////////////////////////////////////////
528                 stage("OSM Health") {
529                     stackName = "osm"
530                     sshCommand remote: remote, command: """
531                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
532                     """
533                 } // stage("OSM Health")
534             } // if ( params.DO_INSTALL )
535
536
537 ///////////////////////////////////////////////////////////////////////////////////////
538 // Execute Robot tests
539 ///////////////////////////////////////////////////////////////////////////////////////
540             stage_archive = false
541             if ( params.DO_ROBOT ) {
542                 try {
543                     stage("System Integration Test") {
544                         if ( useCharmedInstaller ) {
545                             tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
546                             sh(script: "touch ${tempdir}/hosts")
547                             hostfile="${tempdir}/hosts"
548                             sh """cat << EOF > ${hostfile}
549 127.0.0.1           localhost
550 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
551 EOF"""
552                         } else {
553                             hostfile=null
554                         }
555
556                         jujuPassword=sshCommand remote: remote, command: """
557                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
558                         """
559
560                         run_robot_systest(
561                             container_name,
562                             params.ROBOT_TAG_NAME,
563                             osmHostname,
564                             prometheusHostname,
565                             prometheusPort,
566                             params.ROBOT_VIM,
567                             params.ROBOT_PORT_MAPPING_VIM,
568                             params.KUBECONFIG,
569                             params.CLOUDS,
570                             hostfile,
571                             jujuPassword,
572                             SSH_KEY,
573                             params.ROBOT_PASS_THRESHOLD,
574                             params.ROBOT_UNSTABLE_THRESHOLD
575                         )
576                     } // stage("System Integration Test")
577                 } finally {
578                     stage("Archive Container Logs") {
579                         // Archive logs to containers_logs.txt
580                         archive_logs(remote)
581                         if ( ! currentBuild.result.equals('FAILURE') ) {
582                             stage_archive = keep_artifacts
583                         } else {
584                             println ("Systest test failed, throwing error")
585                             error = new Exception("Systest test failed")
586                             currentBuild.result = 'FAILURE'
587                             throw error
588                         }
589                     }
590                 }
591             } // if ( params.DO_ROBOT )
592
593             if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
594                 stage("Archive") {
595                     sh "echo ${container_name} > build_version.txt"
596                     archiveArtifacts artifacts: "build_version.txt", fingerprint: true
597
598                     // Archive the tested repo
599                     dir("${RELEASE_DIR}") {
600                         ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
601                     }
602                     if ( params.DO_DOCKERPUSH ) {
603                         stage("Publish to Dockerhub") {
604                             parallelSteps = [:]
605                             for (buildStep in containerList) {
606                                 def module = buildStep
607                                 def moduleName = buildStep.toLowerCase()
608                                 def dockerTag = params.DOCKER_TAG
609                                 def moduleTag = container_name
610
611                                 parallelSteps[module] = {
612                                     dir("$module") {
613                                         sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
614                                         sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
615                                            opensourcemano/${moduleName}:${dockerTag}""")
616                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
617                                     }
618                                 }
619                             }
620                             parallel parallelSteps
621                         }
622
623                         stage('Snap promotion') {
624                             withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) {
625                                 snaps = ['osmclient']
626                                 for (snap in snaps) {
627                                     channel = 'latest/'
628                                     if (BRANCH_NAME.startsWith('v')) {
629                                         channel = BRANCH_NAME.substring(1) + '/'
630                                     } else if (BRANCH_NAME != 'master') {
631                                         channel += '/' + BRANCH_NAME.replaceAll('/', '-')
632                                     }
633                                     track = channel + 'edge\\*'
634                                     edge_rev = sh(returnStdout: true,
635                                         script: "snapcraft revisions $snap | " +
636                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
637                                     print "edge rev is $edge_rev"
638                                     track = channel + 'beta\\*'
639                                     beta_rev = sh(returnStdout: true,
640                                         script: "snapcraft revisions $snap | " +
641                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
642                                     print "beta rev is $beta_rev"
643
644                                     if (edge_rev != beta_rev) {
645                                         print "Promoting $edge_rev to beta in place of $beta_rev"
646                                         beta_track = channel + 'beta'
647                                         sh "snapcraft release $snap $edge_rev $beta_track"
648                                     }
649                                 }
650                             }
651                         } // stage("Snap promotion")
652                     } // if ( params.DO_DOCKERPUSH )
653                 } // stage("Archive")
654             } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
655         } // dir(OSM_DEVOPS)
656     } finally {
657         if ( params.DO_INSTALL && server_id != null) {
658             delete_vm = true
659             if (error && params.SAVE_CONTAINER_ON_FAIL ) {
660                 delete_vm = false
661             }
662             if (!error && params.SAVE_CONTAINER_ON_PASS ) {
663                 delete_vm = false
664             }
665
666             if ( delete_vm ) {
667                 if (server_id != null) {
668                     println("Deleting VM: $server_id")
669                     sh """#!/bin/sh -e
670                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
671                         openstack server delete ${server_id}
672                     """
673                 } else {
674                     println("Saved VM $server_id in ETSI VIM")
675                 }
676             }
677         }
678         if ( http_server_name != null ) {
679             sh "docker stop ${http_server_name} || true"
680             sh "docker rm ${http_server_name} || true"
681         }
682
683         if ( devopstempdir != null ) {
684             sh "rm -rf ${devopstempdir}"
685         }
686     }
687 }