d17d46ed26efe58f84964133a3f756d3f7bbb560
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
31         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
32         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
33         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
34         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
35         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
37         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
38         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
39         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
41         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
42         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
43         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
44         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
45         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
48         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
49         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
50         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
51         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
52         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
53     ])
54 ])
55
56
57 ////////////////////////////////////////////////////////////////////////////////////////
58 // Helper Functions
59 ////////////////////////////////////////////////////////////////////////////////////////
60 void run_robot_systest(String tagName,
61                        String testName,
62                        String osmHostname,
63                        String prometheusHostname,
64                        Integer prometheusPort=null,
65                        String envfile=null,
66                        String portmappingfile=null,
67                        String kubeconfig=null,
68                        String clouds=null,
69                        String hostfile=null,
70                        String jujuPassword=null,
71                        String osmRSAfile=null,
72                        String pass_th='0.0',
73                        String unstable_th='0.0') {
74     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
75     if ( !envfile ) {
76         sh(script: "touch ${tempdir}/env")
77         envfile="${tempdir}/env"
78     }
79     PROMETHEUS_PORT_VAR = ""
80     if ( prometheusPort != null) {
81         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
82     }
83     hostfilemount=""
84     if ( hostfile ) {
85         hostfilemount="-v "+hostfile+":/etc/hosts"
86     }
87
88     JUJU_PASSWORD_VAR = ""
89     if ( jujuPassword != null) {
90         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
91     }
92
93     try {
94         sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
95     } finally {
96         sh "cp ${tempdir}/* ."
97         outputDirectory = sh(returnStdout: true, script: "pwd").trim()
98         println ("Present Directory is : ${outputDirectory}")
99         step([
100             $class : 'RobotPublisher',
101             outputPath : "${outputDirectory}",
102             outputFileName : "*.xml",
103             disableArchiveOutput : false,
104             reportFileName : "report.html",
105             logFileName : "log.html",
106             passThreshold : pass_th,
107             unstableThreshold: unstable_th,
108             otherFiles : "*.png",
109         ])
110     }
111 }
112
113 def archive_logs(remote) {
114
115     sshCommand remote: remote, command: '''mkdir -p logs'''
116     if (useCharmedInstaller) {
117         sshCommand remote: remote, command: '''
118             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
119                 logfile=`echo $container | cut -d- -f1`
120                 echo "Extracting log for $logfile"
121                 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
122             done
123         '''
124     } else {
125         sshCommand remote: remote, command: '''
126             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
127                 echo "Extracting log for $deployment"
128                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
129             done
130         '''
131         sshCommand remote: remote, command: '''
132             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
133                 echo "Extracting log for $statefulset"
134                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
135             done
136         '''
137     }
138
139     sh "rm -rf logs"
140     sshCommand remote: remote, command: '''ls -al logs'''
141     sshGet remote: remote, from: 'logs', into: '.', override: true
142     sh "cp logs/* ."
143     archiveArtifacts artifacts: '*.log'
144 }
145
146 def get_value(key, output) {
147     for (String line : output.split( '\n' )) {
148         data = line.split( '\\|' )
149         if (data.length > 1) {
150             if ( data[1].trim() == key ) {
151                 return data[2].trim()
152             }
153         }
154     }
155 }
156
157 ////////////////////////////////////////////////////////////////////////////////////////
158 // Main Script
159 ////////////////////////////////////////////////////////////////////////////////////////
160 node("${params.NODE}") {
161
162     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
163     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
164     APT_PROXY="http://172.21.1.1:3142"
165     SSH_KEY = '~/hive/cicd_rsa'
166     sh 'env'
167
168     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
169
170     stage("Checkout") {
171         checkout scm
172     }
173
174     ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
175
176     def upstream_main_job = params.UPSTREAM_SUFFIX
177
178     // upstream jobs always use merged artifacts
179     upstream_main_job += '-merge'
180     container_name_prefix = "osm-${tag_or_branch}"
181     container_name = "${container_name_prefix}"
182
183     keep_artifacts = false
184     if ( JOB_NAME.contains('merge') ) {
185         container_name += "-merge"
186
187         // On a merge job, we keep artifacts on smoke success
188         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
189     }
190     container_name += "-${BUILD_NUMBER}"
191
192     server_id = null
193     http_server_name = null
194     devopstempdir = null
195     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
196
197     try {
198         builtModules = [:]
199 ///////////////////////////////////////////////////////////////////////////////////////
200 // Fetch stage 2 .deb artifacts
201 ///////////////////////////////////////////////////////////////////////////////////////
202         stage("Copy Artifacts") {
203             // cleanup any previous repo
204             sh 'rm -rf repo'
205             dir("repo") {
206                 packageList = []
207                 dir("${RELEASE}") {
208                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
209
210                     // check if an upstream artifact based on specific build number has been requested
211                     // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
212                     // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
213                     def upstreamComponent=""
214                     if ( params.UPSTREAM_JOB_NAME ) {
215                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
216
217                         step ([$class: 'CopyArtifact',
218                                projectName: "${params.UPSTREAM_JOB_NAME}",
219                                selector: [$class: 'SpecificBuildSelector',
220                                buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
221                               ])
222
223                         upstreamComponent = ci_helper.get_mdg_from_project(
224                             ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
225                         def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
226                         dir("$upstreamComponent") {
227                             // the upstream job name contains suffix with the project. Need this stripped off
228                             def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
229                             def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
230                                 upstreamComponent,
231                                 GERRIT_BRANCH,
232                                 "${project_without_branch} :: ${GERRIT_BRANCH}",
233                                 buildNumber)
234
235                             packageList.addAll(packages)
236                             println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
237                         }
238                     }
239
240                     parallelSteps = [:]
241                     def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
242                     if (upstreamComponent.length()>0) {
243                         println("Skipping upstream fetch of "+upstreamComponent)
244                         list.remove(upstreamComponent)
245                     }
246                     for (buildStep in list) {
247                         def component = buildStep
248                         parallelSteps[component] = {
249                             dir("$component") {
250                                 println("Fetching artifact for ${component}")
251                                 step ([$class: 'CopyArtifact',
252                                        projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
253
254                                 // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
255                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
256                                     component,
257                                     GERRIT_BRANCH,
258                                     "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
259                                     ci_helper.get_env_value('build.env','BUILD_NUMBER'))
260                                 packageList.addAll(packages)
261                                 println("Fetched ${component}: ${packages}")
262                                 sh "rm -rf dists"
263                             }
264                         }
265                     }
266                     parallel parallelSteps
267
268 ///////////////////////////////////////////////////////////////////////////////////////
269 // Create Devops APT repository
270 ///////////////////////////////////////////////////////////////////////////////////////
271                     sh "mkdir -p pool"
272                     for (component in [ "devops", "IM", "osmclient" ]) {
273                         sh "ls -al ${component}/pool/"
274                         sh "cp -r ${component}/pool/* pool/"
275                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
276                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
277                         sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
278                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
279                     }
280
281                     // create and sign the release file
282                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
283                     sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
284
285                     // copy the public key into the release folder
286                     // this pulls the key from the home dir of the current user (jenkins)
287                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
288                     sh "cp ~/${REPO_KEY_NAME} ."
289                 }
290
291                 // start an apache server to serve up the packages
292                 http_server_name = "${container_name}-apache"
293
294                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
295                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
296                 repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
297                 NODE_IP_ADDRESS=sh(returnStdout: true, script:
298                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
299             }
300
301             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
302             osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
303             devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
304             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
305             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
306             OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
307             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
308             for (remotePath in packageList) {
309                 packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
310                 packageName=packageName.substring(0,packageName.indexOf('_'))
311                 builtModules[packageName]=remotePath
312             }
313         }
314
315 ///////////////////////////////////////////////////////////////////////////////////////
316 // Build docker containers
317 ///////////////////////////////////////////////////////////////////////////////////////
318         dir(OSM_DEVOPS) {
319             def remote = [:]
320             error = null
321             if ( params.DO_BUILD ) {
322                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
323                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
324                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
325                 }
326                 datetime = sh(returnStdout: true, script: "date +%Y-%m-%d:%H:%M:%S").trim()
327                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
328                 for (packageName in builtModules.keySet()) {
329                     envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
330                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
331                 }
332                 dir ("docker") {
333                     stage("Build") {
334                         containerList = sh(returnStdout: true, script:
335                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
336                         containerList=Arrays.asList(containerList.split("\n"))
337                         print(containerList)
338                         parallelSteps = [:]
339                         for (buildStep in containerList) {
340                             def module = buildStep
341                             def moduleName = buildStep.toLowerCase()
342                             def moduleTag = container_name
343                             parallelSteps[module] = {
344                                 dir("$module") {
345                                     sh "docker build --build-arg APT_PROXY=${APT_PROXY} -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
346                                     println("Tagging ${moduleName}:${moduleTag}")
347                                     sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
348                                     sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
349                                 }
350                             }
351                         }
352                         parallel parallelSteps
353                     }
354                 }
355             } // if ( params.DO_BUILD )
356
357             if ( params.DO_INSTALL ) {
358 ///////////////////////////////////////////////////////////////////////////////////////
359 // Launch VM
360 ///////////////////////////////////////////////////////////////////////////////////////
361                 stage("Spawn Remote VM") {
362                     println("Launching new VM")
363                     output=sh(returnStdout: true, script: """#!/bin/sh -e
364                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
365                         openstack server create --flavor osm.sanity \
366                                                 --image ${OPENSTACK_BASE_IMAGE} \
367                                                 --key-name CICD \
368                                                 --property build_url="${BUILD_URL}" \
369                                                 --nic net-id=osm-ext \
370                                                 ${container_name}
371                     """).trim()
372
373                     server_id = get_value('id', output)
374
375                     if (server_id == null) {
376                         println("VM launch output: ")
377                         println(output)
378                         throw new Exception("VM Launch failed")
379                     }
380                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
381
382                     IP_ADDRESS = ""
383
384                     while (IP_ADDRESS == "") {
385                         output=sh(returnStdout: true, script: """#!/bin/sh -e
386                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
387                             openstack server show ${server_id}
388                         """).trim()
389                         IP_ADDRESS = get_value('addresses', output)
390                     }
391                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
392                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
393
394                     alive = false
395                     while (! alive) {
396                         output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
397                         println("output is [$output]")
398                         alive = output.contains("succeeded")
399                     }
400                     println("VM is ready and accepting ssh connections")
401                 } // stage("Spawn Remote VM")
402
403 ///////////////////////////////////////////////////////////////////////////////////////
404 // Installation
405 ///////////////////////////////////////////////////////////////////////////////////////
406                 stage("Install") {
407                     commit_id = ''
408                     repo_distro = ''
409                     repo_key_name = ''
410                     release = ''
411
412                     if ( params.COMMIT_ID )
413                     {
414                         commit_id = "-b ${params.COMMIT_ID}"
415                     }
416
417                     if ( params.REPO_DISTRO )
418                     {
419                         repo_distro = "-r ${params.REPO_DISTRO}"
420                     }
421
422                     if ( params.REPO_KEY_NAME )
423                     {
424                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
425                     }
426
427                     if ( params.RELEASE )
428                     {
429                         release = "-R ${params.RELEASE}"
430                     }
431
432                     if ( params.REPOSITORY_BASE )
433                     {
434                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
435                     }
436                     else
437                     {
438                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
439                     }
440
441                     remote.name = container_name
442                     remote.host = IP_ADDRESS
443                     remote.user = 'ubuntu'
444                     remote.identityFile = SSH_KEY
445                     remote.allowAnyHosts = true
446                     remote.logLevel = 'INFO'
447                     remote.pty = true
448
449                     // Force time sync to avoid clock drift and invalid certificates
450                     sshCommand remote: remote, command: """
451                         sudo apt update
452                         sudo apt install -y ntp
453                         sudo service ntp stop
454                         sudo ntpd -gq
455                         sudo service ntp start
456                     """
457
458                     sshCommand remote: remote, command: """
459                         wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
460                         chmod +x ./install_osm.sh
461                         sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
462                     """
463
464                     if ( useCharmedInstaller ) {
465                         // Use local proxy for docker hub
466                         sshCommand remote: remote, command: '''
467                             sudo snap install microk8s --classic --channel=1.19/stable
468                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
469                             sudo systemctl restart snap.microk8s.daemon-containerd.service
470                             sudo snap alias microk8s.kubectl kubectl
471                         '''
472
473                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
474                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
475                             sshCommand remote: remote, command: """
476                                 ./install_osm.sh -y \
477                                     ${repo_base_url} \
478                                     ${repo_key_name} \
479                                     ${release} -r unstable \
480                                     --charmed  \
481                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
482                                     --tag ${container_name}
483                             """
484                         }
485                         prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
486                         prometheusPort = 80
487                         osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
488                     } else {
489                         // Run -k8s installer here specifying internal docker registry and docker proxy
490                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
491                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
492                             sshCommand remote: remote, command: """
493                                 ./install_osm.sh -y \
494                                     ${repo_base_url} \
495                                     ${repo_key_name} \
496                                     ${release} -r unstable \
497                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
498                                     -p ${INTERNAL_DOCKER_PROXY} \
499                                     -t ${container_name}
500                             """
501                         }
502                         prometheusHostname = IP_ADDRESS
503                         prometheusPort = 9091
504                         osmHostname = IP_ADDRESS
505                     }
506                 } // stage("Install")
507 ///////////////////////////////////////////////////////////////////////////////////////
508 // Health check of installed OSM in remote vm
509 ///////////////////////////////////////////////////////////////////////////////////////
510                 stage("OSM Health") {
511                     stackName = "osm"
512                     sshCommand remote: remote, command: """
513                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
514                     """
515                 } // stage("OSM Health")
516             } // if ( params.DO_INSTALL )
517
518
519 ///////////////////////////////////////////////////////////////////////////////////////
520 // Execute Robot tests
521 ///////////////////////////////////////////////////////////////////////////////////////
522             stage_archive = false
523             if ( params.DO_ROBOT ) {
524                 try {
525                     stage("System Integration Test") {
526                         if ( useCharmedInstaller ) {
527                             tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
528                             sh(script: "touch ${tempdir}/hosts")
529                             hostfile="${tempdir}/hosts"
530                             sh """cat << EOF > ${hostfile}
531 127.0.0.1           localhost
532 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
533 EOF"""
534                         } else {
535                             hostfile=null
536                         }
537
538                         jujuPassword=sshCommand remote: remote, command: """
539                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
540                         """
541
542                         run_robot_systest(
543                             container_name,
544                             params.ROBOT_TAG_NAME,
545                             osmHostname,
546                             prometheusHostname,
547                             prometheusPort,
548                             params.ROBOT_VIM,
549                             params.ROBOT_PORT_MAPPING_VIM,
550                             params.KUBECONFIG,
551                             params.CLOUDS,
552                             hostfile,
553                             jujuPassword,
554                             SSH_KEY,
555                             params.ROBOT_PASS_THRESHOLD,
556                             params.ROBOT_UNSTABLE_THRESHOLD
557                         )
558                     } // stage("System Integration Test")
559                 } finally {
560                     stage("Archive Container Logs") {
561                         // Archive logs to containers_logs.txt
562                         archive_logs(remote)
563                         if ( ! currentBuild.result.equals('FAILURE') ) {
564                             stage_archive = keep_artifacts
565                         } else {
566                             println ("Systest test failed, throwing error")
567                             error = new Exception("Systest test failed")
568                             currentBuild.result = 'FAILURE'
569                             throw error
570                         }
571                     }
572                 }
573             } // if ( params.DO_ROBOT )
574
575             if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
576                 stage("Archive") {
577                     sh "echo ${container_name} > build_version.txt"
578                     archiveArtifacts artifacts: "build_version.txt", fingerprint: true
579
580                     // Archive the tested repo
581                     dir("${RELEASE_DIR}") {
582                         ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
583                     }
584                     if ( params.DO_DOCKERPUSH ) {
585                         stage("Publish to Dockerhub") {
586                             parallelSteps = [:]
587                             for (buildStep in containerList) {
588                                 def module = buildStep
589                                 def moduleName = buildStep.toLowerCase()
590                                 def dockerTag = params.DOCKER_TAG
591                                 def moduleTag = container_name
592
593                                 parallelSteps[module] = {
594                                     dir("$module") {
595                                         sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}"
596                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
597                                     }
598                                 }
599                             }
600                             parallel parallelSteps
601                         }
602
603                         stage("Snap promotion") {
604                             def snaps = ["osmclient"]
605                             sh "snapcraft login --with ~/.snapcraft/config"
606                             for (snap in snaps) {
607                                 channel="latest/"
608                                 if (BRANCH_NAME.startsWith("v")) {
609                                     channel=BRANCH_NAME.substring(1)+"/"
610                                 } else if (BRANCH_NAME!="master") {
611                                     channel+="/"+BRANCH_NAME.replaceAll('/','-')
612                                 }
613                                 track=channel+"edge\\*"
614                                 edge_rev=sh(returnStdout: true,
615                                     script: "snapcraft revisions $snap | " +
616                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
617                                 print "edge rev is $edge_rev"
618                                 track=channel+"beta\\*"
619                                 beta_rev=sh(returnStdout: true,
620                                     script: "snapcraft revisions $snap | " +
621                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
622                                 print "beta rev is $beta_rev"
623
624                                 if ( edge_rev != beta_rev ) {
625                                     print "Promoting $edge_rev to beta in place of $beta_rev"
626                                     beta_track=channel+"beta"
627                                     sh "snapcraft release $snap $edge_rev $beta_track"
628                                 }
629                             }
630                         } // stage("Snap promotion")
631                     } // if ( params.DO_DOCKERPUSH )
632                 } // stage("Archive")
633             } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
634         } // dir(OSM_DEVOPS)
635     } finally {
636         if ( params.DO_INSTALL && server_id != null) {
637             delete_vm = true
638             if (error && params.SAVE_CONTAINER_ON_FAIL ) {
639                 delete_vm = false
640             }
641             if (!error && params.SAVE_CONTAINER_ON_PASS ) {
642                 delete_vm = false
643             }
644
645             if ( delete_vm ) {
646                 if (server_id != null) {
647                     println("Deleting VM: $server_id")
648                     sh """#!/bin/sh -e
649                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
650                         openstack server delete ${server_id}
651                     """
652                 } else {
653                     println("Saved VM $server_id in ETSI VIM")
654                 }
655             }
656         }
657         if ( http_server_name != null ) {
658             sh "docker stop ${http_server_name} || true"
659             sh "docker rm ${http_server_name} || true"
660         }
661
662         if ( devopstempdir != null ) {
663             sh "rm -rf ${devopstempdir}"
664         }
665     }
666 }