Fixes Bug 1993 - Substitutes NTP with Chrony
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
31         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
32         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
33         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
34         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
35         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
37         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
38         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
39         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
41         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
42         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
43         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
44         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
45                name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
48                description: 'Port mapping file for SDN assist in ETSI VIM',
49                name: 'ROBOT_PORT_MAPPING_VIM'),
50         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
51         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
52         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
53         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
54                name: 'ROBOT_PASS_THRESHOLD'),
55         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
56                '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
57     ])
58 ])
59
60 ////////////////////////////////////////////////////////////////////////////////////////
61 // Helper Functions
62 ////////////////////////////////////////////////////////////////////////////////////////
63 void run_robot_systest(String tagName,
64                        String testName,
65                        String osmHostname,
66                        String prometheusHostname,
67                        Integer prometheusPort=null,
68                        String envfile=null,
69                        String portmappingfile=null,
70                        String kubeconfig=null,
71                        String clouds=null,
72                        String hostfile=null,
73                        String jujuPassword=null,
74                        String osmRSAfile=null,
75                        String passThreshold='0.0',
76                        String unstableThreshold='0.0') {
77     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
78     String environmentFile = ''
79     if (envfile) {
80         environmentFile = envfile
81     } else {
82         sh(script: "touch ${tempdir}/env")
83         environmentFile = "${tempdir}/env"
84     }
85     PROMETHEUS_PORT_VAR = ''
86     if (prometheusPort != null) {
87         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
88     }
89     hostfilemount = ''
90     if (hostfile) {
91         hostfilemount = "-v ${hostfile}:/etc/hosts"
92     }
93
94     JUJU_PASSWORD_VAR = ''
95     if (jujuPassword != null) {
96         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
97     }
98
99     try {
100         sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
101            ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
102            -v ${clouds}:/etc/openstack/clouds.yaml \
103            -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
104            -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
105            -c -t ${testName}""")
106     } finally {
107         sh("cp ${tempdir}/* .")
108         outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
109         println("Present Directory is : ${outputDirectory}")
110         step([
111             $class : 'RobotPublisher',
112             outputPath : "${outputDirectory}",
113             outputFileName : '*.xml',
114             disableArchiveOutput : false,
115             reportFileName : 'report.html',
116             logFileName : 'log.html',
117             passThreshold : passThreshold,
118             unstableThreshold: unstableThreshold,
119             otherFiles : '*.png',
120         ])
121     }
122 }
123
124 void archive_logs(Map remote) {
125
126     sshCommand remote: remote, command: '''mkdir -p logs'''
127     if (useCharmedInstaller) {
128         sshCommand remote: remote, command: '''
129             for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
130                 logfile=`echo $container | cut -d- -f1`
131                 echo "Extracting log for $logfile"
132                 kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
133             done
134         '''
135     } else {
136         sshCommand remote: remote, command: '''
137             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
138                 echo "Extracting log for $deployment"
139                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
140                 > logs/$deployment.log
141             done
142         '''
143         sshCommand remote: remote, command: '''
144             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
145                 echo "Extracting log for $statefulset"
146                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
147                 > logs/$statefulset.log
148             done
149         '''
150     }
151
152     sh 'rm -rf logs'
153     sshCommand remote: remote, command: '''ls -al logs'''
154     sshGet remote: remote, from: 'logs', into: '.', override: true
155     sh 'cp logs/* .'
156     archiveArtifacts artifacts: '*.log'
157 }
158
159 String get_value(String key, String output) {
160     for (String line : output.split( '\n' )) {
161         data = line.split( '\\|' )
162         if (data.length > 1) {
163             if ( data[1].trim() == key ) {
164                 return data[2].trim()
165             }
166         }
167     }
168 }
169
170 ////////////////////////////////////////////////////////////////////////////////////////
171 // Main Script
172 ////////////////////////////////////////////////////////////////////////////////////////
173 node("${params.NODE}") {
174
175     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
176     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
177     APT_PROXY = 'http://172.21.1.1:3142'
178     SSH_KEY = '~/hive/cicd_rsa'
179     ARCHIVE_LOGS_FLAG = false
180     sh 'env'
181
182     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
183
184     stage('Checkout') {
185         checkout scm
186     }
187
188     ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
189
190     def upstreamMainJob = params.UPSTREAM_SUFFIX
191
192     // upstream jobs always use merged artifacts
193     upstreamMainJob += '-merge'
194     containerNamePrefix = "osm-${tag_or_branch}"
195     containerName = "${containerNamePrefix}"
196
197     keep_artifacts = false
198     if ( JOB_NAME.contains('merge') ) {
199         containerName += '-merge'
200
201         // On a merge job, we keep artifacts on smoke success
202         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
203     }
204     containerName += "-${BUILD_NUMBER}"
205
206     server_id = null
207     http_server_name = null
208     devopstempdir = null
209     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
210
211     try {
212         builtModules = [:]
213 ///////////////////////////////////////////////////////////////////////////////////////
214 // Fetch stage 2 .deb artifacts
215 ///////////////////////////////////////////////////////////////////////////////////////
216         stage('Copy Artifacts') {
217             // cleanup any previous repo
218             sh 'rm -rf repo'
219             dir('repo') {
220                 packageList = []
221                 dir("${RELEASE}") {
222                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
223
224                     // check if an upstream artifact based on specific build number has been requested
225                     // This is the case of a merge build and the upstream merge build is not yet complete
226                     // (it is not deemed a successful build yet). The upstream job is calling this downstream
227                     // job (with the its build artifact)
228                     def upstreamComponent = ''
229                     if (params.UPSTREAM_JOB_NAME) {
230                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
231                         lock('Artifactory') {
232                             step ([$class: 'CopyArtifact',
233                                 projectName: "${params.UPSTREAM_JOB_NAME}",
234                                 selector: [$class: 'SpecificBuildSelector',
235                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
236                                 ])
237
238                             upstreamComponent = ci_helper.get_mdg_from_project(
239                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
240                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
241                             dir("$upstreamComponent") {
242                                 // the upstream job name contains suffix with the project. Need this stripped off
243                                 project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
244                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
245                                     upstreamComponent,
246                                     GERRIT_BRANCH,
247                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
248                                     buildNumber)
249
250                                 packageList.addAll(packages)
251                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
252                             }
253                         } // lock artifactory
254                     }
255
256                     parallelSteps = [:]
257                     list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
258                             'common', 'LCM', 'POL', 'NG-UI', 'PLA', 'tests']
259                     if (upstreamComponent.length() > 0) {
260                         println("Skipping upstream fetch of ${upstreamComponent}")
261                         list.remove(upstreamComponent)
262                     }
263                     for (buildStep in list) {
264                         def component = buildStep
265                         parallelSteps[component] = {
266                             dir("$component") {
267                                 println("Fetching artifact for ${component}")
268                                 step([$class: 'CopyArtifact',
269                                        projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
270
271                                 // grab the archives from the stage_2 builds
272                                 // (ie. this will be the artifacts stored based on a merge)
273                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
274                                     component,
275                                     GERRIT_BRANCH,
276                                     "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
277                                     ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
278                                 packageList.addAll(packages)
279                                 println("Fetched ${component}: ${packages}")
280                                 sh 'rm -rf dists'
281                             }
282                         }
283                     }
284                     lock('Artifactory') {
285                         parallel parallelSteps
286                     }
287
288 ///////////////////////////////////////////////////////////////////////////////////////
289 // Create Devops APT repository
290 ///////////////////////////////////////////////////////////////////////////////////////
291                     sh 'mkdir -p pool'
292                     for (component in [ 'devops', 'IM', 'osmclient' ]) {
293                         sh "ls -al ${component}/pool/"
294                         sh "cp -r ${component}/pool/* pool/"
295                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
296                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
297                         sh("""apt-ftparchive packages pool/${component} \
298                            > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
299                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
300                     }
301
302                     // create and sign the release file
303                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
304                     sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
305                        -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
306
307                     // copy the public key into the release folder
308                     // this pulls the key from the home dir of the current user (jenkins)
309                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
310                     sh "cp ~/${REPO_KEY_NAME} ."
311                 }
312
313                 // start an apache server to serve up the packages
314                 http_server_name = "${containerName}-apache"
315
316                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
317                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
318                                'print(s.getsockname()[1]); s.close()\');',
319                                returnStdout: true).trim()
320                 repo_base_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
321                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
322                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
323             }
324
325             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
326             osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
327             devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
328             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
329             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
330             OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
331             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
332             for (remotePath in packageList) {
333                 packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
334                 packageName = packageName[0 .. packageName.indexOf('_') - 1]
335                 builtModules[packageName] = remotePath
336             }
337         }
338
339 ///////////////////////////////////////////////////////////////////////////////////////
340 // Build docker containers
341 ///////////////////////////////////////////////////////////////////////////////////////
342         dir(OSM_DEVOPS) {
343             Map remote = [:]
344             error = null
345             if ( params.DO_BUILD ) {
346                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
347                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
348                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
349                 }
350                 datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
351                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
352                 for (packageName in builtModules.keySet()) {
353                     envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
354                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
355                 }
356                 dir('docker') {
357                     stage('Build') {
358                         containerList = sh(returnStdout: true, script:
359                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
360                         containerList = Arrays.asList(containerList.split('\n'))
361                         print(containerList)
362                         parallelSteps = [:]
363                         for (buildStep in containerList) {
364                             def module = buildStep
365                             def moduleName = buildStep.toLowerCase()
366                             def moduleTag = containerName
367                             parallelSteps[module] = {
368                                 dir("$module") {
369                                     sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
370                                     -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
371                                     println("Tagging ${moduleName}:${moduleTag}")
372                                     sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
373                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
374                                     sh("""docker push \
375                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
376                                 }
377                             }
378                         }
379                         parallel parallelSteps
380                     }
381                 }
382             } // if (params.DO_BUILD)
383
384             if (params.DO_INSTALL) {
385 ///////////////////////////////////////////////////////////////////////////////////////
386 // Launch VM
387 ///////////////////////////////////////////////////////////////////////////////////////
388                 stage('Spawn Remote VM') {
389                     println('Launching new VM')
390                     output = sh(returnStdout: true, script: """#!/bin/sh -e
391                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
392                         openstack server create --flavor osm.sanity \
393                                                 --image ${OPENSTACK_BASE_IMAGE} \
394                                                 --key-name CICD \
395                                                 --property build_url="${BUILD_URL}" \
396                                                 --nic net-id=osm-ext \
397                                                 ${containerName}
398                     """).trim()
399
400                     server_id = get_value('id', output)
401
402                     if (server_id == null) {
403                         println('VM launch output: ')
404                         println(output)
405                         throw new Exception('VM Launch failed')
406                     }
407                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
408
409                     IP_ADDRESS = ''
410
411                     while (IP_ADDRESS == '') {
412                         output = sh(returnStdout: true, script: """#!/bin/sh -e
413                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
414                             openstack server show ${server_id}
415                         """).trim()
416                         IP_ADDRESS = get_value('addresses', output)
417                     }
418                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
419                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
420
421                     alive = false
422                     timeout(time: 1, unit: 'MINUTES') {
423                         while (!alive) {
424                             output = sh(
425                                 returnStatus: true,
426                                 script: "ssh -T -i ${SSH_KEY} " +
427                                     "-o StrictHostKeyChecking=no " +
428                                     "-o UserKnownHostsFile=/dev/null " +
429                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
430                             alive = (output == 0)
431                         }
432                     }
433                     println('VM is ready and accepting ssh connections')
434                 } // stage("Spawn Remote VM")
435
436 ///////////////////////////////////////////////////////////////////////////////////////
437 // Checks before installation
438 ///////////////////////////////////////////////////////////////////////////////////////
439                 stage('Checks before installation') {
440                     remote = [
441                         name: containerName,
442                         host: IP_ADDRESS,
443                         user: 'ubuntu',
444                         identityFile: SSH_KEY,
445                         allowAnyHosts: true,
446                         logLevel: 'INFO',
447                         pty: true
448                     ]
449
450                     // Force time sync to avoid clock drift and invalid certificates
451                     sshCommand remote: remote, command: 'sudo apt-get update'
452                     sshCommand remote: remote, command: 'sudo apt-get install -y chrony'
453                     sshCommand remote: remote, command: 'sudo service chrony stop'
454                     sshCommand remote: remote, command: 'sudo chronyd -vq'
455                     sshCommand remote: remote, command: 'sudo service chrony start'
456
457                  } // stage("Checks before installation")
458 ///////////////////////////////////////////////////////////////////////////////////////
459 // Installation
460 ///////////////////////////////////////////////////////////////////////////////////////
461                 stage('Install') {
462                     commit_id = ''
463                     repo_distro = ''
464                     repo_key_name = ''
465                     release = ''
466
467                     if (params.COMMIT_ID) {
468                         commit_id = "-b ${params.COMMIT_ID}"
469                     }
470                     if (params.REPO_DISTRO) {
471                         repo_distro = "-r ${params.REPO_DISTRO}"
472                     }
473                     if (params.REPO_KEY_NAME) {
474                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
475                     }
476                     if (params.RELEASE) {
477                         release = "-R ${params.RELEASE}"
478                     }
479                     if (params.REPOSITORY_BASE) {
480                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
481                     } else {
482                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
483                     }
484
485                     remote = [
486                         name: containerName,
487                         host: IP_ADDRESS,
488                         user: 'ubuntu',
489                         identityFile: SSH_KEY,
490                         allowAnyHosts: true,
491                         logLevel: 'INFO',
492                         pty: true
493                     ]
494
495                     sshCommand remote: remote, command: '''
496                         wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
497                         chmod +x ./install_osm.sh
498                         sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
499                     '''
500
501                     Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
502                                                 credentialsId: 'gitlab-registry',
503                                                 usernameVariable: 'USERNAME',
504                                                 passwordVariable: 'PASSWORD']
505                     if (useCharmedInstaller) {
506                         // Use local proxy for docker hub
507                         sshCommand remote: remote, command: '''
508                             sudo snap install microk8s --classic --channel=1.19/stable
509                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
510                             /var/snap/microk8s/current/args/containerd-template.toml
511                             sudo systemctl restart snap.microk8s.daemon-containerd.service
512                             sudo snap alias microk8s.kubectl kubectl
513                         '''
514
515                         withCredentials([gitlabCredentialsMap]) {
516                             sshCommand remote: remote, command: """
517                                 ./install_osm.sh -y \
518                                     ${repo_base_url} \
519                                     ${repo_key_name} \
520                                     ${release} -r unstable \
521                                     --charmed  \
522                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
523                                     --tag ${containerName}
524                             """
525                         }
526                         prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
527                         prometheusPort = 80
528                         osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
529                     } else {
530                         // Run -k8s installer here specifying internal docker registry and docker proxy
531                         withCredentials([gitlabCredentialsMap]) {
532                             sshCommand remote: remote, command: """
533                                 ./install_osm.sh -y \
534                                     ${repo_base_url} \
535                                     ${repo_key_name} \
536                                     ${release} -r unstable \
537                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
538                                     -p ${INTERNAL_DOCKER_PROXY} \
539                                     -t ${containerName}
540                             """
541                         }
542                         prometheusHostname = IP_ADDRESS
543                         prometheusPort = 9091
544                         osmHostname = IP_ADDRESS
545                     }
546                 } // stage("Install")
547 ///////////////////////////////////////////////////////////////////////////////////////
548 // Health check of installed OSM in remote vm
549 ///////////////////////////////////////////////////////////////////////////////////////
550                 stage('OSM Health') {
551                     // if this point is reached, logs should be archived
552                     ARCHIVE_LOGS_FLAG = true
553                     stackName = 'osm'
554                     sshCommand remote: remote, command: """
555                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
556                     """
557                 } // stage("OSM Health")
558             } // if ( params.DO_INSTALL )
559
560
561 ///////////////////////////////////////////////////////////////////////////////////////
562 // Execute Robot tests
563 ///////////////////////////////////////////////////////////////////////////////////////
564             stage_archive = false
565             if ( params.DO_ROBOT ) {
566                 try {
567                     stage('System Integration Test') {
568                         if (useCharmedInstaller) {
569                             tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
570                             sh(script: "touch ${tempdir}/hosts")
571                             hostfile = "${tempdir}/hosts"
572                             sh """cat << EOF > ${hostfile}
573 127.0.0.1           localhost
574 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
575 EOF"""
576                         } else {
577                             hostfile = null
578                         }
579
580                         jujuPassword = sshCommand remote: remote, command: '''
581                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
582                         '''
583
584                         run_robot_systest(
585                             containerName,
586                             params.ROBOT_TAG_NAME,
587                             osmHostname,
588                             prometheusHostname,
589                             prometheusPort,
590                             params.ROBOT_VIM,
591                             params.ROBOT_PORT_MAPPING_VIM,
592                             params.KUBECONFIG,
593                             params.CLOUDS,
594                             hostfile,
595                             jujuPassword,
596                             SSH_KEY,
597                             params.ROBOT_PASS_THRESHOLD,
598                             params.ROBOT_UNSTABLE_THRESHOLD
599                         )
600                     } // stage("System Integration Test")
601                 } finally {
602                     stage('After System Integration test') {
603                         if (currentBuild.result != 'FAILURE') {
604                             stage_archive = keep_artifacts
605                         } else {
606                             println('Systest test failed, throwing error')
607                             error = new Exception('Systest test failed')
608                             currentBuild.result = 'FAILURE'
609                             throw error
610                         }
611                     }
612                 }
613             } // if ( params.DO_ROBOT )
614
615             if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
616                 stage('Archive') {
617                     sh "echo ${containerName} > build_version.txt"
618                     archiveArtifacts artifacts: 'build_version.txt', fingerprint: true
619
620                     // Archive the tested repo
621                     dir("${RELEASE_DIR}") {
622                         ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
623                     }
624                     if (params.DO_DOCKERPUSH) {
625                         stage('Publish to Dockerhub') {
626                             parallelSteps = [:]
627                             for (buildStep in containerList) {
628                                 def module = buildStep
629                                 def moduleName = buildStep.toLowerCase()
630                                 def dockerTag = params.DOCKER_TAG
631                                 def moduleTag = containerName
632
633                                 parallelSteps[module] = {
634                                     dir("$module") {
635                                         sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
636                                            opensourcemano/${moduleName}:${dockerTag}""")
637                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
638                                     }
639                                 }
640                             }
641                             parallel parallelSteps
642                         }
643
644                         stage('Snap promotion') {
645                             snaps = ['osmclient']
646                             sh 'snapcraft login --with ~/.snapcraft/config'
647                             for (snap in snaps) {
648                                 channel = 'latest/'
649                                 if (BRANCH_NAME.startsWith('v')) {
650                                     channel = BRANCH_NAME.substring(1) + '/'
651                                 } else if (BRANCH_NAME != 'master') {
652                                     channel += '/' + BRANCH_NAME.replaceAll('/', '-')
653                                 }
654                                 track = channel + 'edge\\*'
655                                 edge_rev = sh(returnStdout: true,
656                                     script: "snapcraft revisions $snap | " +
657                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
658                                 print "edge rev is $edge_rev"
659                                 track = channel + 'beta\\*'
660                                 beta_rev = sh(returnStdout: true,
661                                     script: "snapcraft revisions $snap | " +
662                                     "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
663                                 print "beta rev is $beta_rev"
664
665                                 if (edge_rev != beta_rev) {
666                                     print "Promoting $edge_rev to beta in place of $beta_rev"
667                                     beta_track = channel + 'beta'
668                                     sh "snapcraft release $snap $edge_rev $beta_track"
669                                 }
670                             }
671                         } // stage('Snap promotion')
672                     } // if (params.DO_DOCKERPUSH)
673                 } // stage('Archive')
674             } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
675         } // dir(OSM_DEVOPS)
676     } finally {
677        stage('Archive Container Logs') {
678             if ( ARCHIVE_LOGS_FLAG ) {
679                 // Archive logs
680                 remote = [
681                     name: containerName,
682                     host: IP_ADDRESS,
683                     user: 'ubuntu',
684                     identityFile: SSH_KEY,
685                     allowAnyHosts: true,
686                     logLevel: 'INFO',
687                     pty: true
688                 ]
689                 println('Archiving container logs')
690                 archive_logs(remote)
691             } // end if ( ARCHIVE_LOGS_FLAG )
692         }
693         stage('Cleanup') {
694             if ( params.DO_INSTALL && server_id != null) {
695                 delete_vm = true
696                 if (error && params.SAVE_CONTAINER_ON_FAIL ) {
697                     delete_vm = false
698                 }
699                 if (!error && params.SAVE_CONTAINER_ON_PASS ) {
700                     delete_vm = false
701                 }
702
703                 if ( delete_vm ) {
704                     if (server_id != null) {
705                         println("Deleting VM: $server_id")
706                         sh """#!/bin/sh -e
707                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
708                             openstack server delete ${server_id}
709                         """
710                     } else {
711                         println("Saved VM $server_id in ETSI VIM")
712                     }
713                 }
714             }
715             if ( http_server_name != null ) {
716                 sh "docker stop ${http_server_name} || true"
717                 sh "docker rm ${http_server_name} || true"
718             }
719
720             if ( devopstempdir != null ) {
721                 sh "rm -rf ${devopstempdir}"
722             }
723         }
724     }
725 }