Add NTP Sync
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
31         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
32         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
33         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
34         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
35         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
37         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
38         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
39         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
41         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
42         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
43         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
44         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
45         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
46         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
47         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
48         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
49         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
50         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
51         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
52     ])
53 ])
54
55
56 ////////////////////////////////////////////////////////////////////////////////////////
57 // Helper Functions
58 ////////////////////////////////////////////////////////////////////////////////////////
59 void run_robot_systest(String tagName,
60                        String testName,
61                        String osmHostname,
62                        String prometheusHostname,
63                        Integer prometheusPort=null,
64                        String envfile=null,
65                        String portmappingfile=null,
66                        String kubeconfig=null,
67                        String clouds=null,
68                        String hostfile=null,
69                        String jujuPassword=null,
70                        String osmRSAfile=null,
71                        String pass_th='0.0',
72                        String unstable_th='0.0') {
73     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
74     if ( !envfile ) {
75         sh(script: "touch ${tempdir}/env")
76         envfile="${tempdir}/env"
77     }
78     PROMETHEUS_PORT_VAR = ""
79     if ( prometheusPort != null) {
80         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
81     }
82     hostfilemount=""
83     if ( hostfile ) {
84         hostfilemount="-v "+hostfile+":/etc/hosts"
85     }
86
87     JUJU_PASSWORD_VAR = ""
88     if ( jujuPassword != null) {
89         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
90     }
91
92     try {
93         sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
94     } finally {
95         sh "cp ${tempdir}/* ."
96         outputDirectory = sh(returnStdout: true, script: "pwd").trim()
97         println ("Present Directory is : ${outputDirectory}")
98         step([
99             $class : 'RobotPublisher',
100             outputPath : "${outputDirectory}",
101             outputFileName : "*.xml",
102             disableArchiveOutput : false,
103             reportFileName : "report.html",
104             logFileName : "log.html",
105             passThreshold : pass_th,
106             unstableThreshold: unstable_th,
107             otherFiles : "*.png",
108         ])
109     }
110 }
111
112 def archive_logs(remote) {
113
114     sshCommand remote: remote, command: '''mkdir -p logs'''
115     if (useCharmedInstaller) {
116         sshCommand remote: remote, command: '''
117             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
118                 logfile=`echo $container | cut -d- -f1`
119                 echo "Extracting log for $logfile"
120                 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
121             done
122         '''
123     } else {
124         sshCommand remote: remote, command: '''
125             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
126                 echo "Extracting log for $deployment"
127                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
128             done
129         '''
130         sshCommand remote: remote, command: '''
131             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
132                 echo "Extracting log for $statefulset"
133                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
134             done
135         '''
136     }
137
138     sh "rm -rf logs"
139     sshCommand remote: remote, command: '''ls -al logs'''
140     sshGet remote: remote, from: 'logs', into: '.', override: true
141     sh "cp logs/* ."
142     archiveArtifacts artifacts: '*.log'
143 }
144
145 def get_value(key, output) {
146     for (String line : output.split( '\n' )) {
147         data = line.split( '\\|' )
148         if (data.length > 1) {
149             if ( data[1].trim() == key ) {
150                 return data[2].trim()
151             }
152         }
153     }
154 }
155
156 ////////////////////////////////////////////////////////////////////////////////////////
157 // Main Script
158 ////////////////////////////////////////////////////////////////////////////////////////
159 node("${params.NODE}") {
160
161     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
162     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
163     SSH_KEY = '~/hive/cicd_rsa'
164     sh 'env'
165
166     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
167
168     stage("Checkout") {
169         checkout scm
170     }
171
172     ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
173
174     def upstream_main_job = params.UPSTREAM_SUFFIX
175
176     // upstream jobs always use merged artifacts
177     upstream_main_job += '-merge'
178     container_name_prefix = "osm-${tag_or_branch}"
179     container_name = "${container_name_prefix}"
180
181     keep_artifacts = false
182     if ( JOB_NAME.contains('merge') ) {
183         container_name += "-merge"
184
185         // On a merge job, we keep artifacts on smoke success
186         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
187     }
188     container_name += "-${BUILD_NUMBER}"
189
190     server_id = null
191     http_server_name = null
192     devopstempdir = null
193     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
194
195     try {
196         builtModules = [:]
197 ///////////////////////////////////////////////////////////////////////////////////////
198 // Fetch stage 2 .deb artifacts
199 ///////////////////////////////////////////////////////////////////////////////////////
200         stage("Copy Artifacts") {
201             // cleanup any previous repo
202             sh 'rm -rf repo'
203             dir("repo") {
204                 packageList = []
205                 dir("${RELEASE}") {
206                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
207
208                     // check if an upstream artifact based on specific build number has been requested
209                     // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
210                     // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
211                     def upstreamComponent=""
212                     if ( params.UPSTREAM_JOB_NAME ) {
213                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
214
215                         step ([$class: 'CopyArtifact',
216                                projectName: "${params.UPSTREAM_JOB_NAME}",
217                                selector: [$class: 'SpecificBuildSelector',
218                                buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
219                               ])
220
221                         upstreamComponent = ci_helper.get_mdg_from_project(
222                             ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
223                         def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
224                         dir("$upstreamComponent") {
225                             // the upstream job name contains suffix with the project. Need this stripped off
226                             def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
227                             def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
228                                 upstreamComponent,
229                                 GERRIT_BRANCH,
230                                 "${project_without_branch} :: ${GERRIT_BRANCH}",
231                                 buildNumber)
232
233                             packageList.addAll(packages)
234                             println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
235                         }
236                     }
237
238                     parallelSteps = [:]
239                     def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
240                     if (upstreamComponent.length()>0) {
241                         println("Skipping upstream fetch of "+upstreamComponent)
242                         list.remove(upstreamComponent)
243                     }
244                     for (buildStep in list) {
245                         def component = buildStep
246                         parallelSteps[component] = {
247                             dir("$component") {
248                                 println("Fetching artifact for ${component}")
249                                 step ([$class: 'CopyArtifact',
250                                        projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
251
252                                 // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
253                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
254                                     component,
255                                     GERRIT_BRANCH,
256                                     "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
257                                     ci_helper.get_env_value('build.env','BUILD_NUMBER'))
258                                 packageList.addAll(packages)
259                                 println("Fetched ${component}: ${packages}")
260                                 sh "rm -rf dists"
261                             }
262                         }
263                     }
264                     parallel parallelSteps
265
266 ///////////////////////////////////////////////////////////////////////////////////////
267 // Create Devops APT repository
268 ///////////////////////////////////////////////////////////////////////////////////////
269                     sh "mkdir -p pool"
270                     for (component in [ "devops", "IM", "osmclient" ]) {
271                         sh "ls -al ${component}/pool/"
272                         sh "cp -r ${component}/pool/* pool/"
273                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
274                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
275                         sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
276                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
277                     }
278
279                     // create and sign the release file
280                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
281                     sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
282
283                     // copy the public key into the release folder
284                     // this pulls the key from the home dir of the current user (jenkins)
285                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
286                     sh "cp ~/${REPO_KEY_NAME} ."
287                 }
288
289                 // start an apache server to serve up the packages
290                 http_server_name = "${container_name}-apache"
291
292                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
293                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
294                 repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
295                 NODE_IP_ADDRESS=sh(returnStdout: true, script:
296                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
297             }
298
299             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
300             osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
301             devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
302             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
303             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
304             OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
305             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
306             for (remotePath in packageList) {
307                 packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
308                 packageName=packageName.substring(0,packageName.indexOf('_'))
309                 builtModules[packageName]=remotePath
310             }
311         }
312
313 ///////////////////////////////////////////////////////////////////////////////////////
314 // Build docker containers
315 ///////////////////////////////////////////////////////////////////////////////////////
316         dir(OSM_DEVOPS) {
317             def remote = [:]
318             error = null
319             if ( params.DO_BUILD ) {
320                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
321                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
322                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
323                 }
324                 moduleBuildArgs = ""
325                 for (packageName in builtModules.keySet()) {
326                     envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
327                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
328                 }
329                 dir ("docker") {
330                     stage("Build") {
331                         containerList = sh(returnStdout: true, script:
332                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
333                         containerList=Arrays.asList(containerList.split("\n"))
334                         print(containerList)
335                         parallelSteps = [:]
336                         for (buildStep in containerList) {
337                             def module = buildStep
338                             def moduleName = buildStep.toLowerCase()
339                             def moduleTag = container_name
340                             parallelSteps[module] = {
341                                 dir("$module") {
342                                     sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
343                                     println("Tagging ${moduleName}:${moduleTag}")
344                                     sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
345                                     sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
346                                 }
347                             }
348                         }
349                         parallel parallelSteps
350                     }
351                 }
352             } // if ( params.DO_BUILD )
353
354             if ( params.DO_INSTALL ) {
355 ///////////////////////////////////////////////////////////////////////////////////////
356 // Launch VM
357 ///////////////////////////////////////////////////////////////////////////////////////
358                 stage("Spawn Remote VM") {
359                     println("Launching new VM")
360                     output=sh(returnStdout: true, script: """#!/bin/sh -e
361                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
362                         openstack server create --flavor osm.sanity \
363                                                 --image ubuntu18.04 \
364                                                 --key-name CICD \
365                                                 --property build_url="${BUILD_URL}" \
366                                                 --nic net-id=osm-ext \
367                                                 ${container_name}
368                     """).trim()
369
370                     server_id = get_value('id', output)
371
372                     if (server_id == null) {
373                         println("VM launch output: ")
374                         println(output)
375                         throw new Exception("VM Launch failed")
376                     }
377                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
378
379                     IP_ADDRESS = ""
380
381                     while (IP_ADDRESS == "") {
382                         output=sh(returnStdout: true, script: """#!/bin/sh -e
383                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
384                             openstack server show ${server_id}
385                         """).trim()
386                         IP_ADDRESS = get_value('addresses', output)
387                     }
388                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
389                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
390
391                     alive = false
392                     while (! alive) {
393                         output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
394                         println("output is [$output]")
395                         alive = output.contains("succeeded")
396                     }
397                     println("VM is ready and accepting ssh connections")
398                 } // stage("Spawn Remote VM")
399
400 ///////////////////////////////////////////////////////////////////////////////////////
401 // Installation
402 ///////////////////////////////////////////////////////////////////////////////////////
403                 stage("Install") {
404                     commit_id = ''
405                     repo_distro = ''
406                     repo_key_name = ''
407                     release = ''
408
409                     if ( params.COMMIT_ID )
410                     {
411                         commit_id = "-b ${params.COMMIT_ID}"
412                     }
413
414                     if ( params.REPO_DISTRO )
415                     {
416                         repo_distro = "-r ${params.REPO_DISTRO}"
417                     }
418
419                     if ( params.REPO_KEY_NAME )
420                     {
421                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
422                     }
423
424                     if ( params.RELEASE )
425                     {
426                         release = "-R ${params.RELEASE}"
427                     }
428
429                     if ( params.REPOSITORY_BASE )
430                     {
431                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
432                     }
433                     else
434                     {
435                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
436                     }
437
438                     remote.name = container_name
439                     remote.host = IP_ADDRESS
440                     remote.user = 'ubuntu'
441                     remote.identityFile = SSH_KEY
442                     remote.allowAnyHosts = true
443                     remote.logLevel = 'INFO'
444                     remote.pty = true
445
446                     // Force time sync to avoid clock drift and invalid certificates
447                     sshCommand remote: remote, command: """
448                         sudo apt update
449                         sudo apt install -y ntp
450                         sudo service ntp stop
451                         sudo ntpd -gq
452                         sudo service ntp start
453                     """
454
455                     sshCommand remote: remote, command: """
456                         wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh
457                         chmod +x ./install_osm.sh
458                         sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
459                     """
460
461                     if ( useCharmedInstaller ) {
462                         // Use local proxy for docker hub
463                         sshCommand remote: remote, command: '''
464                             sudo snap install microk8s --classic --channel=1.19/stable
465                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
466                             sudo systemctl restart snap.microk8s.daemon-containerd.service
467                             sudo snap alias microk8s.kubectl kubectl
468                         '''
469
470                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
471                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
472                             sshCommand remote: remote, command: """
473                                 ./install_osm.sh -y \
474                                     ${repo_base_url} \
475                                     ${repo_key_name} \
476                                     ${release} -r unstable \
477                                     --charmed  \
478                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
479                                     --tag ${container_name}
480                             """
481                         }
482                         prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
483                         prometheusPort = 80
484                         osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
485                     } else {
486                         // Run -k8s installer here specifying internal docker registry and docker proxy
487                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
488                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
489                             sshCommand remote: remote, command: """
490                                 ./install_osm.sh -y \
491                                     ${repo_base_url} \
492                                     ${repo_key_name} \
493                                     ${release} -r unstable \
494                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
495                                     -p ${INTERNAL_DOCKER_PROXY} \
496                                     -t ${container_name} \
497                                     --nocachelxdimages
498                             """
499                         }
500                         prometheusHostname = IP_ADDRESS
501                         prometheusPort = 9091
502                         osmHostname = IP_ADDRESS
503                     }
504                 } // stage("Install")
505 ///////////////////////////////////////////////////////////////////////////////////////
506 // Health check of installed OSM in remote vm
507 ///////////////////////////////////////////////////////////////////////////////////////
508                 stage("OSM Health") {
509                     stackName = "osm"
510                     sshCommand remote: remote, command: """
511                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
512                     """
513                 } // stage("OSM Health")
514             } // if ( params.DO_INSTALL )
515
516
517 ///////////////////////////////////////////////////////////////////////////////////////
518 // Execute Robot tests
519 ///////////////////////////////////////////////////////////////////////////////////////
520             stage_archive = false
521             if ( params.DO_ROBOT ) {
522                 try {
523                     stage("System Integration Test") {
524                         if ( useCharmedInstaller ) {
525                             tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
526                             sh(script: "touch ${tempdir}/hosts")
527                             hostfile="${tempdir}/hosts"
528                             sh """cat << EOF > ${hostfile}
529 127.0.0.1           localhost
530 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
531 EOF"""
532                         } else {
533                             hostfile=null
534                         }
535
536                         jujuPassword=sshCommand remote: remote, command: """
537                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
538                         """
539
540                         run_robot_systest(
541                             container_name,
542                             params.ROBOT_TAG_NAME,
543                             osmHostname,
544                             prometheusHostname,
545                             prometheusPort,
546                             params.ROBOT_VIM,
547                             params.ROBOT_PORT_MAPPING_VIM,
548                             params.KUBECONFIG,
549                             params.CLOUDS,
550                             hostfile,
551                             jujuPassword,
552                             SSH_KEY,
553                             params.ROBOT_PASS_THRESHOLD,
554                             params.ROBOT_UNSTABLE_THRESHOLD
555                         )
556                     } // stage("System Integration Test")
557                 } finally {
558                     stage("Archive Container Logs") {
559                         // Archive logs to containers_logs.txt
560                         archive_logs(remote)
561                         if ( ! currentBuild.result.equals('FAILURE') ) {
562                             stage_archive = keep_artifacts
563                         } else {
564                             println ("Systest test failed, throwing error")
565                             error = new Exception("Systest test failed")
566                             currentBuild.result = 'FAILURE'
567                             throw error
568                         }
569                     }
570                 }
571             } // if ( params.DO_ROBOT )
572
573             if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
574                 stage("Archive") {
575                     sh "echo ${container_name} > build_version.txt"
576                     archiveArtifacts artifacts: "build_version.txt", fingerprint: true
577
578                     // Archive the tested repo
579                     dir("${RELEASE_DIR}") {
580                         ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
581                     }
582                     if ( params.DO_DOCKERPUSH ) {
583                         stage("Publish to Dockerhub") {
584                             parallelSteps = [:]
585                             for (buildStep in containerList) {
586                                 def module = buildStep
587                                 def moduleName = buildStep.toLowerCase()
588                                 def dockerTag = params.DOCKER_TAG
589                                 def moduleTag = container_name
590
591                                 parallelSteps[module] = {
592                                     dir("$module") {
593                                         sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}"
594                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
595                                     }
596                                 }
597                             }
598                             parallel parallelSteps
599                         }
600
601                         stage("Snap promotion") {
602                             def snaps = ["osmclient"]
603                             sh "snapcraft login --with ~/.snapcraft/config"
604                             for (snap in snaps) {
605                                 channel="latest/"
606                                 if (BRANCH_NAME.startsWith("v")) {
607                                     channel=BRANCH_NAME.substring(1)+"/"
608                                 } else if (BRANCH_NAME!="master") {
609                                     channel+="/"+BRANCH_NAME.replaceAll('/','-')
610                                 }
611                                 track=channel+"edge\\*"
612                                 edge_rev=sh(returnStdout: true,
613                                     script: "snapcraft revisions $snap | " +
614                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
615                                 print "edge rev is $edge_rev"
616                                 track=channel+"beta\\*"
617                                 beta_rev=sh(returnStdout: true,
618                                     script: "snapcraft revisions $snap | " +
619                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
620                                 print "beta rev is $beta_rev"
621
622                                 if ( edge_rev != beta_rev ) {
623                                     print "Promoting $edge_rev to beta in place of $beta_rev"
624                                     beta_track=channel+"beta"
625                                     sh "snapcraft release $snap $edge_rev $beta_track"
626                                 }
627                             }
628                         } // stage("Snap promotion")
629                     } // if ( params.DO_DOCKERPUSH )
630                 } // stage("Archive")
631             } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
632         } // dir(OSM_DEVOPS)
633     } finally {
634         if ( params.DO_INSTALL && server_id != null) {
635             delete_vm = true
636             if (error && params.SAVE_CONTAINER_ON_FAIL ) {
637                 delete_vm = false
638             }
639             if (!error && params.SAVE_CONTAINER_ON_PASS ) {
640                 delete_vm = false
641             }
642
643             if ( delete_vm ) {
644                 if (server_id != null) {
645                     println("Deleting VM: $server_id")
646                     sh """#!/bin/sh -e
647                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
648                         openstack server delete ${server_id}
649                     """
650                 } else {
651                     println("Saved VM $server_id in ETSI VIM")
652                 }
653             }
654         }
655         if ( http_server_name != null ) {
656             sh "docker stop ${http_server_name} || true"
657             sh "docker rm ${http_server_name} || true"
658         }
659
660         if ( devopstempdir != null ) {
661             sh "rm -rf ${devopstempdir}"
662         }
663     }
664 }