Adding TCP Dump
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
31         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
32         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
33         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
34         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
35         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
37         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
38         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
39         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
41         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
42         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
43         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
44         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
45                name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
48                description: 'Port mapping file for SDN assist in ETSI VIM',
49                name: 'ROBOT_PORT_MAPPING_VIM'),
50         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
51         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
52         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
53         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
54                name: 'ROBOT_PASS_THRESHOLD'),
55         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
56                '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
57     ])
58 ])
59
60 ////////////////////////////////////////////////////////////////////////////////////////
61 // Helper Functions
62 ////////////////////////////////////////////////////////////////////////////////////////
63 void run_robot_systest(String tagName,
64                        String testName,
65                        String osmHostname,
66                        String prometheusHostname,
67                        Integer prometheusPort=null,
68                        String envfile=null,
69                        String portmappingfile=null,
70                        String kubeconfig=null,
71                        String clouds=null,
72                        String hostfile=null,
73                        String jujuPassword=null,
74                        String osmRSAfile=null,
75                        String passThreshold='0.0',
76                        String unstableThreshold='0.0') {
77     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
78     String environmentFile = ''
79     if (envfile) {
80         environmentFile = envfile
81     } else {
82         sh(script: "touch ${tempdir}/env")
83         environmentFile = "${tempdir}/env"
84     }
85     PROMETHEUS_PORT_VAR = ''
86     if (prometheusPort != null) {
87         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
88     }
89     hostfilemount = ''
90     if (hostfile) {
91         hostfilemount = "-v ${hostfile}:/etc/hosts"
92     }
93
94     JUJU_PASSWORD_VAR = ''
95     if (jujuPassword != null) {
96         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
97     }
98
99     try {
100         sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
101            ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
102            -v ${clouds}:/etc/openstack/clouds.yaml \
103            -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
104            -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
105            -c -t ${testName}""")
106     } finally {
107         sh("cp ${tempdir}/* .")
108         outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
109         println("Present Directory is : ${outputDirectory}")
110         step([
111             $class : 'RobotPublisher',
112             outputPath : "${outputDirectory}",
113             outputFileName : '*.xml',
114             disableArchiveOutput : false,
115             reportFileName : 'report.html',
116             logFileName : 'log.html',
117             passThreshold : passThreshold,
118             unstableThreshold: unstableThreshold,
119             otherFiles : '*.png',
120         ])
121     }
122 }
123
124 void archive_logs(Map remote) {
125
126     sshCommand remote: remote, command: '''mkdir -p logs'''
127     if (useCharmedInstaller) {
128         sshCommand remote: remote, command: '''
129             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
130                 logfile=`echo $container | cut -d- -f1`
131                 echo "Extracting log for $logfile"
132                 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
133             done
134         '''
135     } else {
136         sshCommand remote: remote, command: '''
137             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
138                 echo "Extracting log for $deployment"
139                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
140                 > logs/$deployment.log
141             done
142         '''
143         sshCommand remote: remote, command: '''
144             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
145                 echo "Extracting log for $statefulset"
146                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
147                 > logs/$statefulset.log
148             done
149         '''
150     }
151
152     sh 'rm -rf logs'
153     sshCommand remote: remote, command: '''ls -al logs'''
154     sshGet remote: remote, from: 'logs', into: '.', override: true
155     sh 'cp logs/* .'
156     sshGet remote: remote, from: 'ens3.pcap', into: 'ens3.pcap', override: true
157     archiveArtifacts artifacts: '*.log, *.pcap'
158 }
159
160 String get_value(String key, String output) {
161     for (String line : output.split( '\n' )) {
162         data = line.split( '\\|' )
163         if (data.length > 1) {
164             if ( data[1].trim() == key ) {
165                 return data[2].trim()
166             }
167         }
168     }
169 }
170
171 ////////////////////////////////////////////////////////////////////////////////////////
172 // Main Script
173 ////////////////////////////////////////////////////////////////////////////////////////
174 node("${params.NODE}") {
175
176     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
177     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
178     APT_PROXY = 'http://172.21.1.1:3142'
179     SSH_KEY = '~/hive/cicd_rsa'
180     ARCHIVE_LOGS_FLAG = false
181     sh 'env'
182
183     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
184
185     stage('Checkout') {
186         checkout scm
187     }
188
189     ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
190
191     def upstreamMainJob = params.UPSTREAM_SUFFIX
192
193     // upstream jobs always use merged artifacts
194     upstreamMainJob += '-merge'
195     containerNamePrefix = "osm-${tag_or_branch}"
196     containerName = "${containerNamePrefix}"
197
198     keep_artifacts = false
199     if ( JOB_NAME.contains('merge') ) {
200         containerName += '-merge'
201
202         // On a merge job, we keep artifacts on smoke success
203         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
204     }
205     containerName += "-${BUILD_NUMBER}"
206
207     server_id = null
208     http_server_name = null
209     devopstempdir = null
210     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
211
212     try {
213         builtModules = [:]
214 ///////////////////////////////////////////////////////////////////////////////////////
215 // Fetch stage 2 .deb artifacts
216 ///////////////////////////////////////////////////////////////////////////////////////
217         stage('Copy Artifacts') {
218             // cleanup any previous repo
219             sh 'rm -rf repo'
220             dir('repo') {
221                 packageList = []
222                 dir("${RELEASE}") {
223                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
224
225                     // check if an upstream artifact based on specific build number has been requested
226                     // This is the case of a merge build and the upstream merge build is not yet complete
227                     // (it is not deemed a successful build yet). The upstream job is calling this downstream
228                     // job (with the its build artifact)
229                     def upstreamComponent = ''
230                     if (params.UPSTREAM_JOB_NAME) {
231                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
232                         lock('Artifactory') {
233                             step ([$class: 'CopyArtifact',
234                                 projectName: "${params.UPSTREAM_JOB_NAME}",
235                                 selector: [$class: 'SpecificBuildSelector',
236                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
237                                 ])
238
239                             upstreamComponent = ci_helper.get_mdg_from_project(
240                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
241                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
242                             dir("$upstreamComponent") {
243                                 // the upstream job name contains suffix with the project. Need this stripped off
244                                 project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
245                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
246                                     upstreamComponent,
247                                     GERRIT_BRANCH,
248                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
249                                     buildNumber)
250
251                                 packageList.addAll(packages)
252                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
253                             }
254                         } // lock artifactory
255                     }
256
257                     parallelSteps = [:]
258                     list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
259                             'common', 'LCM', 'POL', 'NG-UI', 'PLA', 'tests']
260                     if (upstreamComponent.length() > 0) {
261                         println("Skipping upstream fetch of ${upstreamComponent}")
262                         list.remove(upstreamComponent)
263                     }
264                     for (buildStep in list) {
265                         def component = buildStep
266                         parallelSteps[component] = {
267                             dir("$component") {
268                                 println("Fetching artifact for ${component}")
269                                 step([$class: 'CopyArtifact',
270                                        projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
271
272                                 // grab the archives from the stage_2 builds
273                                 // (ie. this will be the artifacts stored based on a merge)
274                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
275                                     component,
276                                     GERRIT_BRANCH,
277                                     "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
278                                     ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
279                                 packageList.addAll(packages)
280                                 println("Fetched ${component}: ${packages}")
281                                 sh 'rm -rf dists'
282                             }
283                         }
284                     }
285                     lock('Artifactory') {
286                         parallel parallelSteps
287                     }
288
289 ///////////////////////////////////////////////////////////////////////////////////////
290 // Create Devops APT repository
291 ///////////////////////////////////////////////////////////////////////////////////////
292                     sh 'mkdir -p pool'
293                     for (component in [ 'devops', 'IM', 'osmclient' ]) {
294                         sh "ls -al ${component}/pool/"
295                         sh "cp -r ${component}/pool/* pool/"
296                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
297                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
298                         sh("""apt-ftparchive packages pool/${component} \
299                            > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
300                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
301                     }
302
303                     // create and sign the release file
304                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
305                     sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
306                        -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
307
308                     // copy the public key into the release folder
309                     // this pulls the key from the home dir of the current user (jenkins)
310                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
311                     sh "cp ~/${REPO_KEY_NAME} ."
312                 }
313
314                 // start an apache server to serve up the packages
315                 http_server_name = "${containerName}-apache"
316
317                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
318                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
319                                'print(s.getsockname()[1]); s.close()\');',
320                                returnStdout: true).trim()
321                 repo_base_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
322                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
323                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
324             }
325
326             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
327             osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
328             devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
329             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
330             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
331             OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
332             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
333             for (remotePath in packageList) {
334                 packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
335                 packageName = packageName[0 .. packageName.indexOf('_') - 1]
336                 builtModules[packageName] = remotePath
337             }
338         }
339
340 ///////////////////////////////////////////////////////////////////////////////////////
341 // Build docker containers
342 ///////////////////////////////////////////////////////////////////////////////////////
343         dir(OSM_DEVOPS) {
344             Map remote = [:]
345             error = null
346             if ( params.DO_BUILD ) {
347                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
348                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
349                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
350                 }
351                 datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
352                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
353                 for (packageName in builtModules.keySet()) {
354                     envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
355                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
356                 }
357                 dir('docker') {
358                     stage('Build') {
359                         containerList = sh(returnStdout: true, script:
360                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
361                         containerList = Arrays.asList(containerList.split('\n'))
362                         print(containerList)
363                         parallelSteps = [:]
364                         for (buildStep in containerList) {
365                             def module = buildStep
366                             def moduleName = buildStep.toLowerCase()
367                             def moduleTag = containerName
368                             parallelSteps[module] = {
369                                 dir("$module") {
370                                     sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
371                                     -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
372                                     println("Tagging ${moduleName}:${moduleTag}")
373                                     sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
374                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
375                                     sh("""docker push \
376                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
377                                 }
378                             }
379                         }
380                         parallel parallelSteps
381                     }
382                 }
383             } // if (params.DO_BUILD)
384
385             if (params.DO_INSTALL) {
386 ///////////////////////////////////////////////////////////////////////////////////////
387 // Launch VM
388 ///////////////////////////////////////////////////////////////////////////////////////
389                 stage('Spawn Remote VM') {
390                     println('Launching new VM')
391                     output = sh(returnStdout: true, script: """#!/bin/sh -e
392                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
393                         openstack server create --flavor osm.sanity \
394                                                 --image ${OPENSTACK_BASE_IMAGE} \
395                                                 --key-name CICD \
396                                                 --property build_url="${BUILD_URL}" \
397                                                 --nic net-id=osm-ext \
398                                                 ${containerName}
399                     """).trim()
400
401                     server_id = get_value('id', output)
402
403                     if (server_id == null) {
404                         println('VM launch output: ')
405                         println(output)
406                         throw new Exception('VM Launch failed')
407                     }
408                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
409
410                     IP_ADDRESS = ''
411
412                     while (IP_ADDRESS == '') {
413                         output = sh(returnStdout: true, script: """#!/bin/sh -e
414                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
415                             openstack server show ${server_id}
416                         """).trim()
417                         IP_ADDRESS = get_value('addresses', output)
418                     }
419                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
420                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
421
422                     alive = false
423                     timeout(time: 1, unit: 'MINUTES') {
424                         while (!alive) {
425                             output = sh(
426                                 returnStatus: true,
427                                 script: "ssh -T -i ${SSH_KEY} " +
428                                     "-o StrictHostKeyChecking=no " +
429                                     "-o UserKnownHostsFile=/dev/null " +
430                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
431                             alive = (output == 0)
432                         }
433                     }
434                     println('VM is ready and accepting ssh connections')
435                 } // stage("Spawn Remote VM")
436
437 ///////////////////////////////////////////////////////////////////////////////////////
438 // Checks before installation
439 ///////////////////////////////////////////////////////////////////////////////////////
440                 stage('Checks before installation') {
441                     remote = [
442                         name: containerName,
443                         host: IP_ADDRESS,
444                         user: 'ubuntu',
445                         identityFile: SSH_KEY,
446                         allowAnyHosts: true,
447                         logLevel: 'INFO',
448                         pty: true
449                     ]
450
451                     // Force time sync to avoid clock drift and invalid certificates
452                     sshCommand remote: remote, command: 'nohup sudo tcpdump -i ens3 -w ens3.pcap -s 400 & sleep 5'
453                     sshCommand remote: remote, command: 'sudo apt-get update'
454                     sshCommand remote: remote, command: 'sudo apt-get install -y chrony'
455                     sshCommand remote: remote, command: 'sudo service chrony stop'
456                     sshCommand remote: remote, command: 'sudo chronyd -vq'
457                     sshCommand remote: remote, command: 'sudo service chrony start'
458
459                  } // stage("Checks before installation")
460 ///////////////////////////////////////////////////////////////////////////////////////
461 // Installation
462 ///////////////////////////////////////////////////////////////////////////////////////
463                 stage('Install') {
464                     commit_id = ''
465                     repo_distro = ''
466                     repo_key_name = ''
467                     release = ''
468
469                     if (params.COMMIT_ID) {
470                         commit_id = "-b ${params.COMMIT_ID}"
471                     }
472                     if (params.REPO_DISTRO) {
473                         repo_distro = "-r ${params.REPO_DISTRO}"
474                     }
475                     if (params.REPO_KEY_NAME) {
476                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
477                     }
478                     if (params.RELEASE) {
479                         release = "-R ${params.RELEASE}"
480                     }
481                     if (params.REPOSITORY_BASE) {
482                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
483                     } else {
484                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
485                     }
486
487                     remote = [
488                         name: containerName,
489                         host: IP_ADDRESS,
490                         user: 'ubuntu',
491                         identityFile: SSH_KEY,
492                         allowAnyHosts: true,
493                         logLevel: 'INFO',
494                         pty: true
495                     ]
496
497                     sshCommand remote: remote, command: '''
498                         wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
499                         chmod +x ./install_osm.sh
500                         sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
501                     '''
502
503                     Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
504                                                 credentialsId: 'gitlab-registry',
505                                                 usernameVariable: 'USERNAME',
506                                                 passwordVariable: 'PASSWORD']
507                     if (useCharmedInstaller) {
508                         // Use local proxy for docker hub
509                         sshCommand remote: remote, command: '''
510                             sudo snap install microk8s --classic --channel=1.19/stable
511                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
512                             /var/snap/microk8s/current/args/containerd-template.toml
513                             sudo systemctl restart snap.microk8s.daemon-containerd.service
514                             sudo snap alias microk8s.kubectl kubectl
515                         '''
516
517                         withCredentials([gitlabCredentialsMap]) {
518                             sshCommand remote: remote, command: """
519                                 ./install_osm.sh -y \
520                                     ${repo_base_url} \
521                                     ${repo_key_name} \
522                                     ${release} -r unstable \
523                                     --charmed  \
524                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
525                                     --tag ${containerName}
526                             """
527                         }
528                         prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
529                         prometheusPort = 80
530                         osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
531                     } else {
532                         // Run -k8s installer here specifying internal docker registry and docker proxy
533                         withCredentials([gitlabCredentialsMap]) {
534                             sshCommand remote: remote, command: """
535                                 ./install_osm.sh -y \
536                                     ${repo_base_url} \
537                                     ${repo_key_name} \
538                                     ${release} -r unstable \
539                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
540                                     -p ${INTERNAL_DOCKER_PROXY} \
541                                     -t ${containerName}
542                             """
543                         }
544                         prometheusHostname = IP_ADDRESS
545                         prometheusPort = 9091
546                         osmHostname = IP_ADDRESS
547                     }
548                     sshCommand remote: remote, command: """
549                         sudo killall tcpdump
550                     """
551
552                 } // stage("Install")
553 ///////////////////////////////////////////////////////////////////////////////////////
554 // Health check of installed OSM in remote vm
555 ///////////////////////////////////////////////////////////////////////////////////////
556                 stage('OSM Health') {
557                     // if this point is reached, logs should be archived
558                     ARCHIVE_LOGS_FLAG = true
559                     stackName = 'osm'
560                     sshCommand remote: remote, command: """
561                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
562                     """
563                 } // stage("OSM Health")
564             } // if ( params.DO_INSTALL )
565
566
567 ///////////////////////////////////////////////////////////////////////////////////////
568 // Execute Robot tests
569 ///////////////////////////////////////////////////////////////////////////////////////
570             stage_archive = false
571             if ( params.DO_ROBOT ) {
572                 try {
573                     stage('System Integration Test') {
574
575                         if (useCharmedInstaller) {
576                             tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
577                             sh(script: "touch ${tempdir}/hosts")
578                             hostfile = "${tempdir}/hosts"
579                             sh """cat << EOF > ${hostfile}
580 127.0.0.1           localhost
581 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
582 EOF"""
583                         } else {
584                             hostfile = null
585                         }
586
587                         jujuPassword = sshCommand remote: remote, command: '''
588                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
589                         '''
590
591                         run_robot_systest(
592                             containerName,
593                             params.ROBOT_TAG_NAME,
594                             osmHostname,
595                             prometheusHostname,
596                             prometheusPort,
597                             params.ROBOT_VIM,
598                             params.ROBOT_PORT_MAPPING_VIM,
599                             params.KUBECONFIG,
600                             params.CLOUDS,
601                             hostfile,
602                             jujuPassword,
603                             SSH_KEY,
604                             params.ROBOT_PASS_THRESHOLD,
605                             params.ROBOT_UNSTABLE_THRESHOLD
606                         )
607                     } // stage("System Integration Test")
608                 } finally {
609                     stage('After System Integration test') {
610                         if (currentBuild.result != 'FAILURE') {
611                             stage_archive = keep_artifacts
612                         } else {
613                             println('Systest test failed, throwing error')
614                             error = new Exception('Systest test failed')
615                             currentBuild.result = 'FAILURE'
616                             throw error
617                         }
618                     }
619                 }
620             } // if ( params.DO_ROBOT )
621
622             if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
623                 stage('Archive') {
624                     // Archive the tested repo
625                     dir("${RELEASE_DIR}") {
626                         ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
627                     }
628                     if (params.DO_DOCKERPUSH) {
629                         stage('Publish to Dockerhub') {
630                             parallelSteps = [:]
631                             for (buildStep in containerList) {
632                                 def module = buildStep
633                                 def moduleName = buildStep.toLowerCase()
634                                 def dockerTag = params.DOCKER_TAG
635                                 def moduleTag = containerName
636
637                                 parallelSteps[module] = {
638                                     dir("$module") {
639                                         sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
640                                            opensourcemano/${moduleName}:${dockerTag}""")
641                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
642                                     }
643                                 }
644                             }
645                             parallel parallelSteps
646                         }
647
648                         stage('Snap promotion') {
649                             snaps = ['osmclient']
650                             sh 'snapcraft login --with ~/.snapcraft/config'
651                             for (snap in snaps) {
652                                 channel = 'latest/'
653                                 if (BRANCH_NAME.startsWith('v')) {
654                                     channel = BRANCH_NAME.substring(1) + '/'
655                                 } else if (BRANCH_NAME != 'master') {
656                                     channel += '/' + BRANCH_NAME.replaceAll('/', '-')
657                                 }
658                                 track = channel + 'edge\\*'
659                                 edge_rev = sh(returnStdout: true,
660                                     script: "snapcraft revisions $snap | " +
661                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
662                                 print "edge rev is $edge_rev"
663                                 track = channel + 'beta\\*'
664                                 beta_rev = sh(returnStdout: true,
665                                     script: "snapcraft revisions $snap | " +
666                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
667                                 print "beta rev is $beta_rev"
668
669                                 if (edge_rev != beta_rev) {
670                                     print "Promoting $edge_rev to beta in place of $beta_rev"
671                                     beta_track = channel + 'beta'
672                                     sh "snapcraft release $snap $edge_rev $beta_track"
673                                 }
674                             }
675                         } // stage('Snap promotion')
676                     } // if (params.DO_DOCKERPUSH)
677                 } // stage('Archive')
678             } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
679         } // dir(OSM_DEVOPS)
680     } finally {
681        stage('Archive Container Logs') {
682             if ( ARCHIVE_LOGS_FLAG ) {
683                 // Archive logs
684                 remote = [
685                     name: containerName,
686                     host: IP_ADDRESS,
687                     user: 'ubuntu',
688                     identityFile: SSH_KEY,
689                     allowAnyHosts: true,
690                     logLevel: 'INFO',
691                     pty: true
692                 ]
693                 println('Archiving container logs')
694                 archive_logs(remote)
695             } // end if ( ARCHIVE_LOGS_FLAG )
696         }
697         stage('Cleanup') {
698             if ( params.DO_INSTALL && server_id != null) {
699                 delete_vm = true
700                 if (error && params.SAVE_CONTAINER_ON_FAIL ) {
701                     delete_vm = false
702                 }
703                 if (!error && params.SAVE_CONTAINER_ON_PASS ) {
704                     delete_vm = false
705                 }
706
707                 if ( delete_vm ) {
708                     if (server_id != null) {
709                         println("Deleting VM: $server_id")
710                         sh """#!/bin/sh -e
711                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
712                             openstack server delete ${server_id}
713                         """
714                     } else {
715                         println("Saved VM $server_id in ETSI VIM")
716                     }
717                 }
718             }
719             if ( http_server_name != null ) {
720                 sh "docker stop ${http_server_name} || true"
721                 sh "docker rm ${http_server_name} || true"
722             }
723
724             if ( devopstempdir != null ) {
725                 sh "rm -rf ${devopstempdir}"
726             }
727         }
728     }
729 }