Bug 1873: Artifactory times out
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
31         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
32         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
33         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
34         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
35         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
37         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
38         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
39         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
41         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
42         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
43         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
44         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
45         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
48         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
49         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
50         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
51         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
52         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
53     ])
54 ])
55
56
57 ////////////////////////////////////////////////////////////////////////////////////////
58 // Helper Functions
59 ////////////////////////////////////////////////////////////////////////////////////////
60 void run_robot_systest(String tagName,
61                        String testName,
62                        String osmHostname,
63                        String prometheusHostname,
64                        Integer prometheusPort=null,
65                        String envfile=null,
66                        String portmappingfile=null,
67                        String kubeconfig=null,
68                        String clouds=null,
69                        String hostfile=null,
70                        String jujuPassword=null,
71                        String osmRSAfile=null,
72                        String pass_th='0.0',
73                        String unstable_th='0.0') {
74     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
75     if ( !envfile ) {
76         sh(script: "touch ${tempdir}/env")
77         envfile="${tempdir}/env"
78     }
79     PROMETHEUS_PORT_VAR = ""
80     if ( prometheusPort != null) {
81         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
82     }
83     hostfilemount=""
84     if ( hostfile ) {
85         hostfilemount="-v "+hostfile+":/etc/hosts"
86     }
87
88     JUJU_PASSWORD_VAR = ""
89     if ( jujuPassword != null) {
90         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
91     }
92
93     try {
94         sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
95     } finally {
96         sh "cp ${tempdir}/* ."
97         outputDirectory = sh(returnStdout: true, script: "pwd").trim()
98         println ("Present Directory is : ${outputDirectory}")
99         step([
100             $class : 'RobotPublisher',
101             outputPath : "${outputDirectory}",
102             outputFileName : "*.xml",
103             disableArchiveOutput : false,
104             reportFileName : "report.html",
105             logFileName : "log.html",
106             passThreshold : pass_th,
107             unstableThreshold: unstable_th,
108             otherFiles : "*.png",
109         ])
110     }
111 }
112
113 def archive_logs(remote) {
114
115     sshCommand remote: remote, command: '''mkdir -p logs'''
116     if (useCharmedInstaller) {
117         sshCommand remote: remote, command: '''
118             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
119                 logfile=`echo $container | cut -d- -f1`
120                 echo "Extracting log for $logfile"
121                 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
122             done
123         '''
124     } else {
125         sshCommand remote: remote, command: '''
126             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
127                 echo "Extracting log for $deployment"
128                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
129             done
130         '''
131         sshCommand remote: remote, command: '''
132             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
133                 echo "Extracting log for $statefulset"
134                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
135             done
136         '''
137     }
138
139     sh "rm -rf logs"
140     sshCommand remote: remote, command: '''ls -al logs'''
141     sshGet remote: remote, from: 'logs', into: '.', override: true
142     sh "cp logs/* ."
143     archiveArtifacts artifacts: '*.log'
144 }
145
146 def get_value(key, output) {
147     for (String line : output.split( '\n' )) {
148         data = line.split( '\\|' )
149         if (data.length > 1) {
150             if ( data[1].trim() == key ) {
151                 return data[2].trim()
152             }
153         }
154     }
155 }
156
157 ////////////////////////////////////////////////////////////////////////////////////////
158 // Main Script
159 ////////////////////////////////////////////////////////////////////////////////////////
160 node("${params.NODE}") {
161
162     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
163     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
164     APT_PROXY="http://172.21.1.1:3142"
165     SSH_KEY = '~/hive/cicd_rsa'
166     sh 'env'
167
168     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
169
170     stage("Checkout") {
171         checkout scm
172     }
173
174     ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
175
176     def upstream_main_job = params.UPSTREAM_SUFFIX
177
178     // upstream jobs always use merged artifacts
179     upstream_main_job += '-merge'
180     container_name_prefix = "osm-${tag_or_branch}"
181     container_name = "${container_name_prefix}"
182
183     keep_artifacts = false
184     if ( JOB_NAME.contains('merge') ) {
185         container_name += "-merge"
186
187         // On a merge job, we keep artifacts on smoke success
188         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
189     }
190     container_name += "-${BUILD_NUMBER}"
191
192     server_id = null
193     http_server_name = null
194     devopstempdir = null
195     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
196
197     try {
198         builtModules = [:]
199 ///////////////////////////////////////////////////////////////////////////////////////
200 // Fetch stage 2 .deb artifacts
201 ///////////////////////////////////////////////////////////////////////////////////////
202         stage("Copy Artifacts") {
203             // cleanup any previous repo
204             sh 'rm -rf repo'
205             dir("repo") {
206                 packageList = []
207                 dir("${RELEASE}") {
208                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
209
210                     // check if an upstream artifact based on specific build number has been requested
211                     // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
212                     // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
213                     def upstreamComponent=""
214                     if ( params.UPSTREAM_JOB_NAME ) {
215                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
216                         lock('Artifactory') {
217                             step ([$class: 'CopyArtifact',
218                                 projectName: "${params.UPSTREAM_JOB_NAME}",
219                                 selector: [$class: 'SpecificBuildSelector',
220                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
221                                 ])
222
223                             upstreamComponent = ci_helper.get_mdg_from_project(
224                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
225                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
226                             dir("$upstreamComponent") {
227                                 // the upstream job name contains suffix with the project. Need this stripped off
228                                 def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
229                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
230                                     upstreamComponent,
231                                     GERRIT_BRANCH,
232                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
233                                     buildNumber)
234
235                                 packageList.addAll(packages)
236                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
237                             }
238                         } // lock artifactory
239                     }
240
241                     parallelSteps = [:]
242                     def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
243                     if (upstreamComponent.length()>0) {
244                         println("Skipping upstream fetch of "+upstreamComponent)
245                         list.remove(upstreamComponent)
246                     }
247                     for (buildStep in list) {
248                         def component = buildStep
249                         parallelSteps[component] = {
250                             dir("$component") {
251                                 println("Fetching artifact for ${component}")
252                                 step ([$class: 'CopyArtifact',
253                                        projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
254
255                                 // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
256                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
257                                     component,
258                                     GERRIT_BRANCH,
259                                     "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
260                                     ci_helper.get_env_value('build.env','BUILD_NUMBER'))
261                                 packageList.addAll(packages)
262                                 println("Fetched ${component}: ${packages}")
263                                 sh "rm -rf dists"
264                             }
265                         }
266                     }
267                     lock('Artifactory') {
268                         parallel parallelSteps
269                     }
270
271 ///////////////////////////////////////////////////////////////////////////////////////
272 // Create Devops APT repository
273 ///////////////////////////////////////////////////////////////////////////////////////
274                     sh "mkdir -p pool"
275                     for (component in [ "devops", "IM", "osmclient" ]) {
276                         sh "ls -al ${component}/pool/"
277                         sh "cp -r ${component}/pool/* pool/"
278                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
279                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
280                         sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
281                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
282                     }
283
284                     // create and sign the release file
285                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
286                     sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
287
288                     // copy the public key into the release folder
289                     // this pulls the key from the home dir of the current user (jenkins)
290                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
291                     sh "cp ~/${REPO_KEY_NAME} ."
292                 }
293
294                 // start an apache server to serve up the packages
295                 http_server_name = "${container_name}-apache"
296
297                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
298                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
299                 repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
300                 NODE_IP_ADDRESS=sh(returnStdout: true, script:
301                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
302             }
303
304             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
305             osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
306             devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
307             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
308             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
309             OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
310             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
311             for (remotePath in packageList) {
312                 packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
313                 packageName=packageName.substring(0,packageName.indexOf('_'))
314                 builtModules[packageName]=remotePath
315             }
316         }
317
318 ///////////////////////////////////////////////////////////////////////////////////////
319 // Build docker containers
320 ///////////////////////////////////////////////////////////////////////////////////////
321         dir(OSM_DEVOPS) {
322             def remote = [:]
323             error = null
324             if ( params.DO_BUILD ) {
325                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
326                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
327                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
328                 }
329                 datetime = sh(returnStdout: true, script: "date +%Y-%m-%d:%H:%M:%S").trim()
330                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
331                 for (packageName in builtModules.keySet()) {
332                     envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
333                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
334                 }
335                 dir ("docker") {
336                     stage("Build") {
337                         containerList = sh(returnStdout: true, script:
338                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
339                         containerList=Arrays.asList(containerList.split("\n"))
340                         print(containerList)
341                         parallelSteps = [:]
342                         for (buildStep in containerList) {
343                             def module = buildStep
344                             def moduleName = buildStep.toLowerCase()
345                             def moduleTag = container_name
346                             parallelSteps[module] = {
347                                 dir("$module") {
348                                     sh "docker build --build-arg APT_PROXY=${APT_PROXY} -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
349                                     println("Tagging ${moduleName}:${moduleTag}")
350                                     sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
351                                     sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
352                                 }
353                             }
354                         }
355                         parallel parallelSteps
356                     }
357                 }
358             } // if ( params.DO_BUILD )
359
360             if ( params.DO_INSTALL ) {
361 ///////////////////////////////////////////////////////////////////////////////////////
362 // Launch VM
363 ///////////////////////////////////////////////////////////////////////////////////////
364                 stage("Spawn Remote VM") {
365                     println("Launching new VM")
366                     output=sh(returnStdout: true, script: """#!/bin/sh -e
367                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
368                         openstack server create --flavor osm.sanity \
369                                                 --image ${OPENSTACK_BASE_IMAGE} \
370                                                 --key-name CICD \
371                                                 --property build_url="${BUILD_URL}" \
372                                                 --nic net-id=osm-ext \
373                                                 ${container_name}
374                     """).trim()
375
376                     server_id = get_value('id', output)
377
378                     if (server_id == null) {
379                         println("VM launch output: ")
380                         println(output)
381                         throw new Exception("VM Launch failed")
382                     }
383                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
384
385                     IP_ADDRESS = ""
386
387                     while (IP_ADDRESS == "") {
388                         output=sh(returnStdout: true, script: """#!/bin/sh -e
389                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
390                             openstack server show ${server_id}
391                         """).trim()
392                         IP_ADDRESS = get_value('addresses', output)
393                     }
394                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
395                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
396
397                     alive = false
398                     while (! alive) {
399                         output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
400                         println("output is [$output]")
401                         alive = output.contains("succeeded")
402                     }
403                     println("VM is ready and accepting ssh connections")
404                 } // stage("Spawn Remote VM")
405
406 ///////////////////////////////////////////////////////////////////////////////////////
407 // Installation
408 ///////////////////////////////////////////////////////////////////////////////////////
409                 stage("Install") {
410                     commit_id = ''
411                     repo_distro = ''
412                     repo_key_name = ''
413                     release = ''
414
415                     if ( params.COMMIT_ID )
416                     {
417                         commit_id = "-b ${params.COMMIT_ID}"
418                     }
419
420                     if ( params.REPO_DISTRO )
421                     {
422                         repo_distro = "-r ${params.REPO_DISTRO}"
423                     }
424
425                     if ( params.REPO_KEY_NAME )
426                     {
427                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
428                     }
429
430                     if ( params.RELEASE )
431                     {
432                         release = "-R ${params.RELEASE}"
433                     }
434
435                     if ( params.REPOSITORY_BASE )
436                     {
437                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
438                     }
439                     else
440                     {
441                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
442                     }
443
444                     remote.name = container_name
445                     remote.host = IP_ADDRESS
446                     remote.user = 'ubuntu'
447                     remote.identityFile = SSH_KEY
448                     remote.allowAnyHosts = true
449                     remote.logLevel = 'INFO'
450                     remote.pty = true
451
452                     // Force time sync to avoid clock drift and invalid certificates
453                     sshCommand remote: remote, command: """
454                         sudo apt update
455                         sudo apt install -y ntp
456                         sudo service ntp stop
457                         sudo ntpd -gq
458                         sudo service ntp start
459                     """
460
461                     sshCommand remote: remote, command: """
462                         wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
463                         chmod +x ./install_osm.sh
464                         sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
465                     """
466
467                     if ( useCharmedInstaller ) {
468                         // Use local proxy for docker hub
469                         sshCommand remote: remote, command: '''
470                             sudo snap install microk8s --classic --channel=1.19/stable
471                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
472                             sudo systemctl restart snap.microk8s.daemon-containerd.service
473                             sudo snap alias microk8s.kubectl kubectl
474                         '''
475
476                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
477                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
478                             sshCommand remote: remote, command: """
479                                 ./install_osm.sh -y \
480                                     ${repo_base_url} \
481                                     ${repo_key_name} \
482                                     ${release} -r unstable \
483                                     --charmed  \
484                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
485                                     --tag ${container_name}
486                             """
487                         }
488                         prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
489                         prometheusPort = 80
490                         osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
491                     } else {
492                         // Run -k8s installer here specifying internal docker registry and docker proxy
493                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
494                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
495                             sshCommand remote: remote, command: """
496                                 ./install_osm.sh -y \
497                                     ${repo_base_url} \
498                                     ${repo_key_name} \
499                                     ${release} -r unstable \
500                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
501                                     -p ${INTERNAL_DOCKER_PROXY} \
502                                     -t ${container_name}
503                             """
504                         }
505                         prometheusHostname = IP_ADDRESS
506                         prometheusPort = 9091
507                         osmHostname = IP_ADDRESS
508                     }
509                 } // stage("Install")
510 ///////////////////////////////////////////////////////////////////////////////////////
511 // Health check of installed OSM in remote vm
512 ///////////////////////////////////////////////////////////////////////////////////////
513                 stage("OSM Health") {
514                     stackName = "osm"
515                     sshCommand remote: remote, command: """
516                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
517                     """
518                 } // stage("OSM Health")
519             } // if ( params.DO_INSTALL )
520
521
522 ///////////////////////////////////////////////////////////////////////////////////////
523 // Execute Robot tests
524 ///////////////////////////////////////////////////////////////////////////////////////
525             stage_archive = false
526             if ( params.DO_ROBOT ) {
527                 try {
528                     stage("System Integration Test") {
529                         if ( useCharmedInstaller ) {
530                             tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
531                             sh(script: "touch ${tempdir}/hosts")
532                             hostfile="${tempdir}/hosts"
533                             sh """cat << EOF > ${hostfile}
534 127.0.0.1           localhost
535 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
536 EOF"""
537                         } else {
538                             hostfile=null
539                         }
540
541                         jujuPassword=sshCommand remote: remote, command: """
542                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
543                         """
544
545                         run_robot_systest(
546                             container_name,
547                             params.ROBOT_TAG_NAME,
548                             osmHostname,
549                             prometheusHostname,
550                             prometheusPort,
551                             params.ROBOT_VIM,
552                             params.ROBOT_PORT_MAPPING_VIM,
553                             params.KUBECONFIG,
554                             params.CLOUDS,
555                             hostfile,
556                             jujuPassword,
557                             SSH_KEY,
558                             params.ROBOT_PASS_THRESHOLD,
559                             params.ROBOT_UNSTABLE_THRESHOLD
560                         )
561                     } // stage("System Integration Test")
562                 } finally {
563                     stage("Archive Container Logs") {
564                         // Archive logs to containers_logs.txt
565                         archive_logs(remote)
566                         if ( ! currentBuild.result.equals('FAILURE') ) {
567                             stage_archive = keep_artifacts
568                         } else {
569                             println ("Systest test failed, throwing error")
570                             error = new Exception("Systest test failed")
571                             currentBuild.result = 'FAILURE'
572                             throw error
573                         }
574                     }
575                 }
576             } // if ( params.DO_ROBOT )
577
578             if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
579                 stage("Archive") {
580                     sh "echo ${container_name} > build_version.txt"
581                     archiveArtifacts artifacts: "build_version.txt", fingerprint: true
582
583                     // Archive the tested repo
584                     dir("${RELEASE_DIR}") {
585                         ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
586                     }
587                     if ( params.DO_DOCKERPUSH ) {
588                         stage("Publish to Dockerhub") {
589                             parallelSteps = [:]
590                             for (buildStep in containerList) {
591                                 def module = buildStep
592                                 def moduleName = buildStep.toLowerCase()
593                                 def dockerTag = params.DOCKER_TAG
594                                 def moduleTag = container_name
595
596                                 parallelSteps[module] = {
597                                     dir("$module") {
598                                         sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}"
599                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
600                                     }
601                                 }
602                             }
603                             parallel parallelSteps
604                         }
605
606                         stage("Snap promotion") {
607                             def snaps = ["osmclient"]
608                             sh "snapcraft login --with ~/.snapcraft/config"
609                             for (snap in snaps) {
610                                 channel="latest/"
611                                 if (BRANCH_NAME.startsWith("v")) {
612                                     channel=BRANCH_NAME.substring(1)+"/"
613                                 } else if (BRANCH_NAME!="master") {
614                                     channel+="/"+BRANCH_NAME.replaceAll('/','-')
615                                 }
616                                 track=channel+"edge\\*"
617                                 edge_rev=sh(returnStdout: true,
618                                     script: "snapcraft revisions $snap | " +
619                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
620                                 print "edge rev is $edge_rev"
621                                 track=channel+"beta\\*"
622                                 beta_rev=sh(returnStdout: true,
623                                     script: "snapcraft revisions $snap | " +
624                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
625                                 print "beta rev is $beta_rev"
626
627                                 if ( edge_rev != beta_rev ) {
628                                     print "Promoting $edge_rev to beta in place of $beta_rev"
629                                     beta_track=channel+"beta"
630                                     sh "snapcraft release $snap $edge_rev $beta_track"
631                                 }
632                             }
633                         } // stage("Snap promotion")
634                     } // if ( params.DO_DOCKERPUSH )
635                 } // stage("Archive")
636             } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
637         } // dir(OSM_DEVOPS)
638     } finally {
639         if ( params.DO_INSTALL && server_id != null) {
640             delete_vm = true
641             if (error && params.SAVE_CONTAINER_ON_FAIL ) {
642                 delete_vm = false
643             }
644             if (!error && params.SAVE_CONTAINER_ON_PASS ) {
645                 delete_vm = false
646             }
647
648             if ( delete_vm ) {
649                 if (server_id != null) {
650                     println("Deleting VM: $server_id")
651                     sh """#!/bin/sh -e
652                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
653                         openstack server delete ${server_id}
654                     """
655                 } else {
656                     println("Saved VM $server_id in ETSI VIM")
657                 }
658             }
659         }
660         if ( http_server_name != null ) {
661             sh "docker stop ${http_server_name} || true"
662             sh "docker rm ${http_server_name} || true"
663         }
664
665         if ( devopstempdir != null ) {
666             sh "rm -rf ${devopstempdir}"
667         }
668     }
669 }