c428e08300db8c889b10afcd215a8a170d1b907d
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
31         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
32         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
33         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
34         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
35         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
37         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
38         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
39         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
41         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
42         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
43         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
44         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
45         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
48         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
49         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
50         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
51         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
52         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
53     ])
54 ])
55
56
57 ////////////////////////////////////////////////////////////////////////////////////////
58 // Helper Functions
59 ////////////////////////////////////////////////////////////////////////////////////////
60 void run_robot_systest(String tagName,
61                        String testName,
62                        String osmHostname,
63                        String prometheusHostname,
64                        Integer prometheusPort=null,
65                        String envfile=null,
66                        String portmappingfile=null,
67                        String kubeconfig=null,
68                        String clouds=null,
69                        String hostfile=null,
70                        String jujuPassword=null,
71                        String osmRSAfile=null,
72                        String pass_th='0.0',
73                        String unstable_th='0.0') {
74     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
75     if ( !envfile ) {
76         sh(script: "touch ${tempdir}/env")
77         envfile="${tempdir}/env"
78     }
79     PROMETHEUS_PORT_VAR = ""
80     if ( prometheusPort != null) {
81         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
82     }
83     hostfilemount=""
84     if ( hostfile ) {
85         hostfilemount="-v "+hostfile+":/etc/hosts"
86     }
87
88     JUJU_PASSWORD_VAR = ""
89     if ( jujuPassword != null) {
90         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
91     }
92
93     try {
94         sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
95     } finally {
96         sh "cp ${tempdir}/* ."
97         outputDirectory = sh(returnStdout: true, script: "pwd").trim()
98         println ("Present Directory is : ${outputDirectory}")
99         step([
100             $class : 'RobotPublisher',
101             outputPath : "${outputDirectory}",
102             outputFileName : "*.xml",
103             disableArchiveOutput : false,
104             reportFileName : "report.html",
105             logFileName : "log.html",
106             passThreshold : pass_th,
107             unstableThreshold: unstable_th,
108             otherFiles : "*.png",
109         ])
110     }
111 }
112
113 def archive_logs(remote) {
114
115     sshCommand remote: remote, command: '''mkdir -p logs'''
116     if (useCharmedInstaller) {
117         sshCommand remote: remote, command: '''
118             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
119                 logfile=`echo $container | cut -d- -f1`
120                 echo "Extracting log for $logfile"
121                 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
122             done
123         '''
124     } else {
125         sshCommand remote: remote, command: '''
126             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
127                 echo "Extracting log for $deployment"
128                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
129             done
130         '''
131         sshCommand remote: remote, command: '''
132             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
133                 echo "Extracting log for $statefulset"
134                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
135             done
136         '''
137     }
138
139     sh "rm -rf logs"
140     sshCommand remote: remote, command: '''ls -al logs'''
141     sshGet remote: remote, from: 'logs', into: '.', override: true
142     sh "cp logs/* ."
143     archiveArtifacts artifacts: '*.log'
144 }
145
146 def get_value(key, output) {
147     for (String line : output.split( '\n' )) {
148         data = line.split( '\\|' )
149         if (data.length > 1) {
150             if ( data[1].trim() == key ) {
151                 return data[2].trim()
152             }
153         }
154     }
155 }
156
157 ////////////////////////////////////////////////////////////////////////////////////////
158 // Main Script
159 ////////////////////////////////////////////////////////////////////////////////////////
160 node("${params.NODE}") {
161
162     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
163     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
164     SSH_KEY = '~/hive/cicd_rsa'
165     sh 'env'
166
167     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
168
169     stage("Checkout") {
170         checkout scm
171     }
172
173     ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
174
175     def upstream_main_job = params.UPSTREAM_SUFFIX
176
177     // upstream jobs always use merged artifacts
178     upstream_main_job += '-merge'
179     container_name_prefix = "osm-${tag_or_branch}"
180     container_name = "${container_name_prefix}"
181
182     keep_artifacts = false
183     if ( JOB_NAME.contains('merge') ) {
184         container_name += "-merge"
185
186         // On a merge job, we keep artifacts on smoke success
187         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
188     }
189     container_name += "-${BUILD_NUMBER}"
190
191     server_id = null
192     http_server_name = null
193     devopstempdir = null
194     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
195
196     try {
197         builtModules = [:]
198 ///////////////////////////////////////////////////////////////////////////////////////
199 // Fetch stage 2 .deb artifacts
200 ///////////////////////////////////////////////////////////////////////////////////////
201         stage("Copy Artifacts") {
202             // cleanup any previous repo
203             sh 'rm -rf repo'
204             dir("repo") {
205                 packageList = []
206                 dir("${RELEASE}") {
207                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
208
209                     // check if an upstream artifact based on specific build number has been requested
210                     // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
211                     // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
212                     def upstreamComponent=""
213                     if ( params.UPSTREAM_JOB_NAME ) {
214                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
215                         lock('Artifactory') {
216                             step ([$class: 'CopyArtifact',
217                                 projectName: "${params.UPSTREAM_JOB_NAME}",
218                                 selector: [$class: 'SpecificBuildSelector',
219                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
220                                 ])
221
222                             upstreamComponent = ci_helper.get_mdg_from_project(
223                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
224                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
225                             dir("$upstreamComponent") {
226                                 // the upstream job name contains suffix with the project. Need this stripped off
227                                 def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
228                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
229                                     upstreamComponent,
230                                     GERRIT_BRANCH,
231                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
232                                     buildNumber)
233
234                                 packageList.addAll(packages)
235                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
236                             }
237                         } // lock artifactory
238                     }
239
240                     parallelSteps = [:]
241                     def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
242                     if (upstreamComponent.length()>0) {
243                         println("Skipping upstream fetch of "+upstreamComponent)
244                         list.remove(upstreamComponent)
245                     }
246                     for (buildStep in list) {
247                         def component = buildStep
248                         parallelSteps[component] = {
249                             dir("$component") {
250                                 println("Fetching artifact for ${component}")
251                                 step ([$class: 'CopyArtifact',
252                                        projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
253
254                                 // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
255                                 def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
256                                     component,
257                                     GERRIT_BRANCH,
258                                     "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
259                                     ci_helper.get_env_value('build.env','BUILD_NUMBER'))
260                                 packageList.addAll(packages)
261                                 println("Fetched ${component}: ${packages}")
262                                 sh "rm -rf dists"
263                             }
264                         }
265                     }
266                     lock('Artifactory') {
267                         parallel parallelSteps
268                     }
269
270 ///////////////////////////////////////////////////////////////////////////////////////
271 // Create Devops APT repository
272 ///////////////////////////////////////////////////////////////////////////////////////
273                     sh "mkdir -p pool"
274                     for (component in [ "devops", "IM", "osmclient" ]) {
275                         sh "ls -al ${component}/pool/"
276                         sh "cp -r ${component}/pool/* pool/"
277                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
278                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
279                         sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
280                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
281                     }
282
283                     // create and sign the release file
284                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
285                     sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
286
287                     // copy the public key into the release folder
288                     // this pulls the key from the home dir of the current user (jenkins)
289                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
290                     sh "cp ~/${REPO_KEY_NAME} ."
291                 }
292
293                 // start an apache server to serve up the packages
294                 http_server_name = "${container_name}-apache"
295
296                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
297                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
298                                'print(s.getsockname()[1]); s.close()\');',
299                                returnStdout: true).trim()
300                 internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
301                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
302                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
303                 ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
304             }
305
306             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
307             osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
308             devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
309             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
310             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
311             OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
312             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
313             for (remotePath in packageList) {
314                 packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
315                 packageName=packageName.substring(0,packageName.indexOf('_'))
316                 builtModules[packageName]=remotePath
317             }
318         }
319
320 ///////////////////////////////////////////////////////////////////////////////////////
321 // Build docker containers
322 ///////////////////////////////////////////////////////////////////////////////////////
323         dir(OSM_DEVOPS) {
324             def remote = [:]
325             error = null
326             if ( params.DO_BUILD ) {
327                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
328                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
329                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
330                 }
331                 moduleBuildArgs = ""
332                 for (packageName in builtModules.keySet()) {
333                     envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
334                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
335                 }
336                 dir ("docker") {
337                     stage("Build") {
338                         containerList = sh(returnStdout: true, script:
339                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
340                         containerList=Arrays.asList(containerList.split("\n"))
341                         print(containerList)
342                         parallelSteps = [:]
343                         for (buildStep in containerList) {
344                             def module = buildStep
345                             def moduleName = buildStep.toLowerCase()
346                             def moduleTag = container_name
347                             parallelSteps[module] = {
348                                 dir("$module") {
349                                     sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
350                                     println("Tagging ${moduleName}:${moduleTag}")
351                                     sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
352                                     sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
353                                 }
354                             }
355                         }
356                         parallel parallelSteps
357                     }
358                 }
359             } // if ( params.DO_BUILD )
360
361             if ( params.DO_INSTALL ) {
362 ///////////////////////////////////////////////////////////////////////////////////////
363 // Launch VM
364 ///////////////////////////////////////////////////////////////////////////////////////
365                 stage("Spawn Remote VM") {
366                     println("Launching new VM")
367                     output=sh(returnStdout: true, script: """#!/bin/sh -e
368                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
369                         openstack server create --flavor osm.sanity \
370                                                 --image ${OPENSTACK_BASE_IMAGE} \
371                                                 --key-name CICD \
372                                                 --property build_url="${BUILD_URL}" \
373                                                 --nic net-id=osm-ext \
374                                                 ${container_name}
375                     """).trim()
376
377                     server_id = get_value('id', output)
378
379                     if (server_id == null) {
380                         println("VM launch output: ")
381                         println(output)
382                         throw new Exception("VM Launch failed")
383                     }
384                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
385
386                     IP_ADDRESS = ""
387
388                     while (IP_ADDRESS == "") {
389                         output=sh(returnStdout: true, script: """#!/bin/sh -e
390                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
391                             openstack server show ${server_id}
392                         """).trim()
393                         IP_ADDRESS = get_value('addresses', output)
394                     }
395                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
396                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
397
398                     alive = false
399                     timeout(time: 1, unit: 'MINUTES') {
400                         while (!alive) {
401                             output = sh(
402                                 returnStatus: true,
403                                 script: "ssh -T -i ${SSH_KEY} " +
404                                     "-o StrictHostKeyChecking=no " +
405                                     "-o UserKnownHostsFile=/dev/null " +
406                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
407                             alive = (output == 0)
408                         }
409                     }
410                     println("VM is ready and accepting ssh connections")
411                 } // stage("Spawn Remote VM")
412
413 ///////////////////////////////////////////////////////////////////////////////////////
414 // Installation
415 ///////////////////////////////////////////////////////////////////////////////////////
416                 stage("Install") {
417                     commit_id = ''
418                     repo_distro = ''
419                     repo_key_name = ''
420                     release = ''
421
422                     if ( params.COMMIT_ID )
423                     {
424                         commit_id = "-b ${params.COMMIT_ID}"
425                     }
426
427                     if ( params.REPO_DISTRO )
428                     {
429                         repo_distro = "-r ${params.REPO_DISTRO}"
430                     }
431
432                     if ( params.REPO_KEY_NAME )
433                     {
434                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
435                     }
436
437                     if ( params.RELEASE )
438                     {
439                         release = "-R ${params.RELEASE}"
440                     }
441
442                     if ( params.REPOSITORY_BASE )
443                     {
444                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
445                     }
446                     else
447                     {
448                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
449                     }
450
451                     remote.name = container_name
452                     remote.host = IP_ADDRESS
453                     remote.user = 'ubuntu'
454                     remote.identityFile = SSH_KEY
455                     remote.allowAnyHosts = true
456                     remote.logLevel = 'INFO'
457                     remote.pty = true
458
459                     // Ensure the VM is ready
460                     sshCommand remote: remote, command: 'cloud-init status --wait'
461
462                     // Force time sync to avoid clock drift and invalid certificates
463                     sshCommand remote: remote, command: """
464                         sudo apt update
465                         sudo apt install -y chrony
466                         sudo service chrony stop
467                         sudo chrony -vq
468                         sudo service chrony start
469                     """
470
471                     sshCommand remote: remote, command: """
472                         wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh
473                         chmod +x ./install_osm.sh
474                         sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
475                     """
476
477                     if ( useCharmedInstaller ) {
478                         // Use local proxy for docker hub
479                         sshCommand remote: remote, command: '''
480                             sudo snap install microk8s --classic --channel=1.19/stable
481                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
482                             sudo systemctl restart snap.microk8s.daemon-containerd.service
483                             sudo snap alias microk8s.kubectl kubectl
484                         '''
485
486                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
487                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
488                             sshCommand remote: remote, command: """
489                                 ./install_osm.sh -y \
490                                     ${repo_base_url} \
491                                     ${repo_key_name} \
492                                     ${release} -r unstable \
493                                     --charmed  \
494                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
495                                     --tag ${container_name}
496                             """
497                         }
498                         prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
499                         prometheusPort = 80
500                         osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
501                     } else {
502                         // Run -k8s installer here specifying internal docker registry and docker proxy
503                         withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
504                                         usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
505                             sshCommand remote: remote, command: """
506                                 ./install_osm.sh -y \
507                                     ${repo_base_url} \
508                                     ${repo_key_name} \
509                                     ${release} -r unstable \
510                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
511                                     -p ${INTERNAL_DOCKER_PROXY} \
512                                     -t ${container_name} \
513                                     --nocachelxdimages
514                             """
515                         }
516                         prometheusHostname = IP_ADDRESS
517                         prometheusPort = 9091
518                         osmHostname = IP_ADDRESS
519                     }
520                 } // stage("Install")
521 ///////////////////////////////////////////////////////////////////////////////////////
522 // Health check of installed OSM in remote vm
523 ///////////////////////////////////////////////////////////////////////////////////////
524                 stage("OSM Health") {
525                     stackName = "osm"
526                     sshCommand remote: remote, command: """
527                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
528                     """
529                 } // stage("OSM Health")
530             } // if ( params.DO_INSTALL )
531
532
533 ///////////////////////////////////////////////////////////////////////////////////////
534 // Execute Robot tests
535 ///////////////////////////////////////////////////////////////////////////////////////
536             stage_archive = false
537             if ( params.DO_ROBOT ) {
538                 try {
539                     stage("System Integration Test") {
540                         if ( useCharmedInstaller ) {
541                             tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
542                             sh(script: "touch ${tempdir}/hosts")
543                             hostfile="${tempdir}/hosts"
544                             sh """cat << EOF > ${hostfile}
545 127.0.0.1           localhost
546 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
547 EOF"""
548                         } else {
549                             hostfile=null
550                         }
551
552                         jujuPassword=sshCommand remote: remote, command: """
553                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
554                         """
555
556                         run_robot_systest(
557                             container_name,
558                             params.ROBOT_TAG_NAME,
559                             osmHostname,
560                             prometheusHostname,
561                             prometheusPort,
562                             params.ROBOT_VIM,
563                             params.ROBOT_PORT_MAPPING_VIM,
564                             params.KUBECONFIG,
565                             params.CLOUDS,
566                             hostfile,
567                             jujuPassword,
568                             SSH_KEY,
569                             params.ROBOT_PASS_THRESHOLD,
570                             params.ROBOT_UNSTABLE_THRESHOLD
571                         )
572                     } // stage("System Integration Test")
573                 } finally {
574                     stage("Archive Container Logs") {
575                         // Archive logs to containers_logs.txt
576                         archive_logs(remote)
577                         if ( ! currentBuild.result.equals('FAILURE') ) {
578                             stage_archive = keep_artifacts
579                         } else {
580                             println ("Systest test failed, throwing error")
581                             error = new Exception("Systest test failed")
582                             currentBuild.result = 'FAILURE'
583                             throw error
584                         }
585                     }
586                 }
587             } // if ( params.DO_ROBOT )
588
589             if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
590                 stage("Archive") {
591                     sh "echo ${container_name} > build_version.txt"
592                     archiveArtifacts artifacts: "build_version.txt", fingerprint: true
593
594                     // Archive the tested repo
595                     dir("${RELEASE_DIR}") {
596                         ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
597                     }
598                     if ( params.DO_DOCKERPUSH ) {
599                         stage("Publish to Dockerhub") {
600                             parallelSteps = [:]
601                             for (buildStep in containerList) {
602                                 def module = buildStep
603                                 def moduleName = buildStep.toLowerCase()
604                                 def dockerTag = params.DOCKER_TAG
605                                 def moduleTag = container_name
606
607                                 parallelSteps[module] = {
608                                     dir("$module") {
609                                         sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
610                                         sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
611                                            opensourcemano/${moduleName}:${dockerTag}""")
612                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
613                                     }
614                                 }
615                             }
616                             parallel parallelSteps
617                         }
618
619                         stage('Snap promotion') {
620                             withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) {
621                                 snaps = ['osmclient']
622                                 for (snap in snaps) {
623                                     channel = 'latest/'
624                                     if (BRANCH_NAME.startsWith('v')) {
625                                         channel = BRANCH_NAME.substring(1) + '/'
626                                     } else if (BRANCH_NAME != 'master') {
627                                         channel += '/' + BRANCH_NAME.replaceAll('/', '-')
628                                     }
629                                     track = channel + 'edge\\*'
630                                     edge_rev = sh(returnStdout: true,
631                                         script: "snapcraft revisions $snap | " +
632                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
633                                     print "edge rev is $edge_rev"
634                                     track = channel + 'beta\\*'
635                                     beta_rev = sh(returnStdout: true,
636                                         script: "snapcraft revisions $snap | " +
637                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
638                                     print "beta rev is $beta_rev"
639
640                                     if (edge_rev != beta_rev) {
641                                         print "Promoting $edge_rev to beta in place of $beta_rev"
642                                         beta_track = channel + 'beta'
643                                         sh "snapcraft release $snap $edge_rev $beta_track"
644                                     }
645                                 }
646                             }
647                         } // stage("Snap promotion")
648                     } // if ( params.DO_DOCKERPUSH )
649                 } // stage("Archive")
650             } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
651         } // dir(OSM_DEVOPS)
652     } finally {
653         if ( params.DO_INSTALL && server_id != null) {
654             delete_vm = true
655             if (error && params.SAVE_CONTAINER_ON_FAIL ) {
656                 delete_vm = false
657             }
658             if (!error && params.SAVE_CONTAINER_ON_PASS ) {
659                 delete_vm = false
660             }
661
662             if ( delete_vm ) {
663                 if (server_id != null) {
664                     println("Deleting VM: $server_id")
665                     sh """#!/bin/sh -e
666                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
667                         openstack server delete ${server_id}
668                     """
669                 } else {
670                     println("Saved VM $server_id in ETSI VIM")
671                 }
672             }
673         }
674         if ( http_server_name != null ) {
675             sh "docker stop ${http_server_name} || true"
676             sh "docker rm ${http_server_name} || true"
677         }
678
679         if ( devopstempdir != null ) {
680             sh "rm -rf ${devopstempdir}"
681         }
682     }
683 }