2c2e008b79787a3185fad5101a80abd9dd874543
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
31         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
32         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
33         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
34         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
35         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
37         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
38         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
39         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
41         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
42         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
43         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
44         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
45                name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
48                description: 'Port mapping file for SDN assist in ETSI VIM',
49                name: 'ROBOT_PORT_MAPPING_VIM'),
50         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
51         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
52         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
53         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
54                name: 'ROBOT_PASS_THRESHOLD'),
55         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
56                '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
57     ])
58 ])
59
60 ////////////////////////////////////////////////////////////////////////////////////////
61 // Helper Functions
62 ////////////////////////////////////////////////////////////////////////////////////////
63 void run_robot_systest(String tagName,
64                        String testName,
65                        String osmHostname,
66                        String prometheusHostname,
67                        Integer prometheusPort=null,
68                        String envfile=null,
69                        String portmappingfile=null,
70                        String kubeconfig=null,
71                        String clouds=null,
72                        String hostfile=null,
73                        String jujuPassword=null,
74                        String osmRSAfile=null,
75                        String passThreshold='0.0',
76                        String unstableThreshold='0.0') {
77     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
78     String environmentFile = ''
79     if (envfile) {
80         environmentFile = envfile
81     } else {
82         sh(script: "touch ${tempdir}/env")
83         environmentFile = "${tempdir}/env"
84     }
85     PROMETHEUS_PORT_VAR = ''
86     if (prometheusPort != null) {
87         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
88     }
89     hostfilemount = ''
90     if (hostfile) {
91         hostfilemount = "-v ${hostfile}:/etc/hosts"
92     }
93
94     JUJU_PASSWORD_VAR = ''
95     if (jujuPassword != null) {
96         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
97     }
98
99     try {
100         sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
101            ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
102            -v ${clouds}:/etc/openstack/clouds.yaml \
103            -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
104            -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
105            -c -t ${testName}""")
106     } finally {
107         sh("cp ${tempdir}/*.xml .")
108         sh("cp ${tempdir}/*.html .")
109         outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
110         println("Present Directory is : ${outputDirectory}")
111         step([
112             $class : 'RobotPublisher',
113             outputPath : "${outputDirectory}",
114             outputFileName : '*.xml',
115             disableArchiveOutput : false,
116             reportFileName : 'report.html',
117             logFileName : 'log.html',
118             passThreshold : passThreshold,
119             unstableThreshold: unstableThreshold,
120             otherFiles : '*.png',
121         ])
122     }
123 }
124
125 void archive_logs(Map remote) {
126
127     sshCommand remote: remote, command: '''mkdir -p logs/dags'''
128     if (useCharmedInstaller) {
129         sshCommand remote: remote, command: '''
130             for pod in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
131                 logfile=`echo $pod | cut -d- -f1`
132                 echo "Extracting log for $logfile"
133                 kubectl logs -n osm $pod --timestamps=true 2>&1 > logs/$logfile.log
134             done
135         '''
136     } else {
137         sshCommand remote: remote, command: '''
138             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
139                 echo "Extracting log for $deployment"
140                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
141                 > logs/$deployment.log
142             done
143         '''
144         sshCommand remote: remote, command: '''
145             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
146                 echo "Extracting log for $statefulset"
147                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
148                 > logs/$statefulset.log
149             done
150         '''
151         sshCommand remote: remote, command: '''
152             schedulerPod="$(kubectl get pods -n osm | grep airflow-scheduler| awk '{print $1; exit}')"; \
153             echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
154             kubectl cp -n osm ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler
155         '''
156     }
157
158     sh 'rm -rf logs'
159     sshCommand remote: remote, command: '''ls -al logs'''
160     sshGet remote: remote, from: 'logs', into: '.', override: true
161     sh 'cp logs/*.log logs/dags/*.log .'
162     archiveArtifacts artifacts: '*.log'
163 }
164
165 String get_value(String key, String output) {
166     for (String line : output.split( '\n' )) {
167         data = line.split( '\\|' )
168         if (data.length > 1) {
169             if ( data[1].trim() == key ) {
170                 return data[2].trim()
171             }
172         }
173     }
174 }
175
176 ////////////////////////////////////////////////////////////////////////////////////////
177 // Main Script
178 ////////////////////////////////////////////////////////////////////////////////////////
179 node("${params.NODE}") {
180
181     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
182     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
183     APT_PROXY = 'http://172.21.1.1:3142'
184     SSH_KEY = '~/hive/cicd_rsa'
185     ARCHIVE_LOGS_FLAG = false
186     sh 'env'
187
188     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
189
190     stage('Checkout') {
191         checkout scm
192     }
193
194     ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
195
196     def upstreamMainJob = params.UPSTREAM_SUFFIX
197
198     // upstream jobs always use merged artifacts
199     upstreamMainJob += '-merge'
200     containerNamePrefix = "osm-${tag_or_branch}"
201     containerName = "${containerNamePrefix}"
202
203     keep_artifacts = false
204     if ( JOB_NAME.contains('merge') ) {
205         containerName += '-merge'
206
207         // On a merge job, we keep artifacts on smoke success
208         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
209     }
210     containerName += "-${BUILD_NUMBER}"
211
212     server_id = null
213     http_server_name = null
214     devopstempdir = null
215     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
216
217     try {
218         builtModules = [:]
219 ///////////////////////////////////////////////////////////////////////////////////////
220 // Fetch stage 2 .deb artifacts
221 ///////////////////////////////////////////////////////////////////////////////////////
222         stage('Copy Artifacts') {
223             // cleanup any previous repo
224             sh "tree -fD repo || exit 0"
225             sh 'rm -rvf repo'
226             sh "tree -fD repo && lsof repo || exit 0"
227             dir('repo') {
228                 packageList = []
229                 dir("${RELEASE}") {
230                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
231
232                     // check if an upstream artifact based on specific build number has been requested
233                     // This is the case of a merge build and the upstream merge build is not yet complete
234                     // (it is not deemed a successful build yet). The upstream job is calling this downstream
235                     // job (with the its build artifact)
236                     def upstreamComponent = ''
237                     if (params.UPSTREAM_JOB_NAME) {
238                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
239                         lock('Artifactory') {
240                             step ([$class: 'CopyArtifact',
241                                 projectName: "${params.UPSTREAM_JOB_NAME}",
242                                 selector: [$class: 'SpecificBuildSelector',
243                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
244                                 ])
245
246                             upstreamComponent = ci_helper.get_mdg_from_project(
247                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
248                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
249                             dir("$upstreamComponent") {
250                                 // the upstream job name contains suffix with the project. Need this stripped off
251                                 project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
252                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
253                                     upstreamComponent,
254                                     GERRIT_BRANCH,
255                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
256                                     buildNumber)
257
258                                 packageList.addAll(packages)
259                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
260                             }
261                         } // lock artifactory
262                     }
263
264                     parallelSteps = [:]
265                     list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
266                             'common', 'LCM', 'POL', 'NG-UI', 'NG-SA', 'PLA', 'tests']
267                     if (upstreamComponent.length() > 0) {
268                         println("Skipping upstream fetch of ${upstreamComponent}")
269                         list.remove(upstreamComponent)
270                     }
271                     for (buildStep in list) {
272                         def component = buildStep
273                         parallelSteps[component] = {
274                             dir("$component") {
275                                 println("Fetching artifact for ${component}")
276                                 step([$class: 'CopyArtifact',
277                                        projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
278
279                                 // grab the archives from the stage_2 builds
280                                 // (ie. this will be the artifacts stored based on a merge)
281                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
282                                     component,
283                                     GERRIT_BRANCH,
284                                     "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
285                                     ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
286                                 packageList.addAll(packages)
287                                 println("Fetched ${component}: ${packages}")
288                                 sh 'rm -rf dists'
289                             }
290                         }
291                     }
292                     lock('Artifactory') {
293                         parallel parallelSteps
294                     }
295
296 ///////////////////////////////////////////////////////////////////////////////////////
297 // Create Devops APT repository
298 ///////////////////////////////////////////////////////////////////////////////////////
299                     sh 'mkdir -p pool'
300                     for (component in [ 'devops', 'IM', 'osmclient' ]) {
301                         sh "ls -al ${component}/pool/"
302                         sh "cp -r ${component}/pool/* pool/"
303                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
304                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
305                         sh("""apt-ftparchive packages pool/${component} \
306                            > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
307                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
308                     }
309
310                     // create and sign the release file
311                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
312                     sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
313                        -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
314
315                     // copy the public key into the release folder
316                     // this pulls the key from the home dir of the current user (jenkins)
317                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
318                     sh "cp ~/${REPO_KEY_NAME} ."
319                 }
320
321                 // start an apache server to serve up the packages
322                 http_server_name = "${containerName}-apache"
323
324                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
325                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
326                                'print(s.getsockname()[1]); s.close()\');',
327                                returnStdout: true).trim()
328                 internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
329                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
330                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
331                 ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
332             }
333
334             sh "tree -fD repo"
335
336             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
337             osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
338             devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
339             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
340             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
341             OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
342             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
343             for (remotePath in packageList) {
344                 packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
345                 packageName = packageName[0 .. packageName.indexOf('_') - 1]
346                 builtModules[packageName] = remotePath
347             }
348         }
349
350 ///////////////////////////////////////////////////////////////////////////////////////
351 // Build docker containers
352 ///////////////////////////////////////////////////////////////////////////////////////
353         dir(OSM_DEVOPS) {
354             Map remote = [:]
355             error = null
356             if ( params.DO_BUILD ) {
357                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
358                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
359                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
360                 }
361                 datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
362                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
363                 for (packageName in builtModules.keySet()) {
364                     envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
365                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
366                 }
367                 dir('docker') {
368                     stage('Build') {
369                         containerList = sh(returnStdout: true, script:
370                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
371                         containerList = Arrays.asList(containerList.split('\n'))
372                         print(containerList)
373                         parallelSteps = [:]
374                         for (buildStep in containerList) {
375                             def module = buildStep
376                             def moduleName = buildStep.toLowerCase()
377                             def moduleTag = containerName
378                             parallelSteps[module] = {
379                                 dir("$module") {
380                                     sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
381                                     -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
382                                     println("Tagging ${moduleName}:${moduleTag}")
383                                     sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
384                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
385                                     sh("""docker push \
386                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
387                                 }
388                             }
389                         }
390                         parallel parallelSteps
391                     }
392                 }
393             } // if (params.DO_BUILD)
394
395             if (params.DO_INSTALL) {
396 ///////////////////////////////////////////////////////////////////////////////////////
397 // Launch VM
398 ///////////////////////////////////////////////////////////////////////////////////////
399                 stage('Spawn Remote VM') {
400                     println('Launching new VM')
401                     output = sh(returnStdout: true, script: """#!/bin/sh -e
402                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
403                         openstack server create --flavor osm.sanity \
404                                                 --image ${OPENSTACK_BASE_IMAGE} \
405                                                 --key-name CICD \
406                                                 --property build_url="${BUILD_URL}" \
407                                                 --nic net-id=osm-ext \
408                                                 ${containerName}
409                     """).trim()
410
411                     server_id = get_value('id', output)
412
413                     if (server_id == null) {
414                         println('VM launch output: ')
415                         println(output)
416                         throw new Exception('VM Launch failed')
417                     }
418                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
419
420                     IP_ADDRESS = ''
421
422                     while (IP_ADDRESS == '') {
423                         output = sh(returnStdout: true, script: """#!/bin/sh -e
424                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
425                             openstack server show ${server_id}
426                         """).trim()
427                         IP_ADDRESS = get_value('addresses', output)
428                     }
429                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
430                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
431
432                     alive = false
433                     timeout(time: 1, unit: 'MINUTES') {
434                         while (!alive) {
435                             output = sh(
436                                 returnStatus: true,
437                                 script: "ssh -T -i ${SSH_KEY} " +
438                                     "-o StrictHostKeyChecking=no " +
439                                     "-o UserKnownHostsFile=/dev/null " +
440                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
441                             alive = (output == 0)
442                         }
443                     }
444                     println('VM is ready and accepting ssh connections')
445                 } // stage("Spawn Remote VM")
446
447 ///////////////////////////////////////////////////////////////////////////////////////
448 // Checks before installation
449 ///////////////////////////////////////////////////////////////////////////////////////
450                 stage('Checks before installation') {
451                     remote = [
452                         name: containerName,
453                         host: IP_ADDRESS,
454                         user: 'ubuntu',
455                         identityFile: SSH_KEY,
456                         allowAnyHosts: true,
457                         logLevel: 'INFO',
458                         pty: true
459                     ]
460
461                     // Ensure the VM is ready
462                     sshCommand remote: remote, command: 'cloud-init status --wait'
463                     // Force time sync to avoid clock drift and invalid certificates
464                     sshCommand remote: remote, command: 'sudo apt-get -y update'
465                     sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
466                     sshCommand remote: remote, command: 'sudo service chrony stop'
467                     sshCommand remote: remote, command: 'sudo chronyd -vq'
468                     sshCommand remote: remote, command: 'sudo service chrony start'
469
470                  } // stage("Checks before installation")
471 ///////////////////////////////////////////////////////////////////////////////////////
472 // Installation
473 ///////////////////////////////////////////////////////////////////////////////////////
474                 stage('Install') {
475                     commit_id = ''
476                     repo_distro = ''
477                     repo_key_name = ''
478                     release = ''
479
480                     if (params.COMMIT_ID) {
481                         commit_id = "-b ${params.COMMIT_ID}"
482                     }
483                     if (params.REPO_DISTRO) {
484                         repo_distro = "-r ${params.REPO_DISTRO}"
485                     }
486                     if (params.REPO_KEY_NAME) {
487                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
488                     }
489                     if (params.RELEASE) {
490                         release = "-R ${params.RELEASE}"
491                     }
492                     if (params.REPOSITORY_BASE) {
493                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
494                     } else {
495                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
496                     }
497
498                     remote = [
499                         name: containerName,
500                         host: IP_ADDRESS,
501                         user: 'ubuntu',
502                         identityFile: SSH_KEY,
503                         allowAnyHosts: true,
504                         logLevel: 'INFO',
505                         pty: true
506                     ]
507
508                     sshCommand remote: remote, command: '''
509                         wget https://osm-download.etsi.org/ftp/osm-13.0-thirteen/install_osm.sh
510                         chmod +x ./install_osm.sh
511                         sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
512                     '''
513
514                     Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
515                                                 credentialsId: 'gitlab-registry',
516                                                 usernameVariable: 'USERNAME',
517                                                 passwordVariable: 'PASSWORD']
518                     if (useCharmedInstaller) {
519                         // Use local proxy for docker hub
520                         sshCommand remote: remote, command: '''
521                             sudo snap install microk8s --classic --channel=1.19/stable
522                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
523                             /var/snap/microk8s/current/args/containerd-template.toml
524                             sudo systemctl restart snap.microk8s.daemon-containerd.service
525                             sudo snap alias microk8s.kubectl kubectl
526                         '''
527
528                         withCredentials([gitlabCredentialsMap]) {
529                             sshCommand remote: remote, command: """
530                                 ./install_osm.sh -y \
531                                     ${repo_base_url} \
532                                     ${repo_key_name} \
533                                     ${release} -r unstable \
534                                     --charmed  \
535                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
536                                     --tag ${containerName}
537                             """
538                         }
539                         prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
540                         prometheusPort = 80
541                         osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
542                     } else {
543                         // Run -k8s installer here specifying internal docker registry and docker proxy
544                         withCredentials([gitlabCredentialsMap]) {
545                             sshCommand remote: remote, command: """
546                                 ./install_osm.sh -y \
547                                     ${repo_base_url} \
548                                     ${repo_key_name} \
549                                     ${release} -r unstable \
550                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
551                                     -p ${INTERNAL_DOCKER_PROXY} \
552                                     -t ${containerName}
553                             """
554                         }
555                         prometheusHostname = IP_ADDRESS
556                         prometheusPort = 9091
557                         osmHostname = IP_ADDRESS
558                     }
559                 } // stage("Install")
560 ///////////////////////////////////////////////////////////////////////////////////////
561 // Health check of installed OSM in remote vm
562 ///////////////////////////////////////////////////////////////////////////////////////
563                 stage('OSM Health') {
564                     // if this point is reached, logs should be archived
565                     ARCHIVE_LOGS_FLAG = true
566                     stackName = 'osm'
567                     sshCommand remote: remote, command: """
568                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
569                     """
570                 } // stage("OSM Health")
571             } // if ( params.DO_INSTALL )
572
573
574 ///////////////////////////////////////////////////////////////////////////////////////
575 // Execute Robot tests
576 ///////////////////////////////////////////////////////////////////////////////////////
577             stage_archive = false
578             if ( params.DO_ROBOT ) {
579                 try {
580                     stage('System Integration Test') {
581                         if (useCharmedInstaller) {
582                             tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
583                             sh(script: "touch ${tempdir}/hosts")
584                             hostfile = "${tempdir}/hosts"
585                             sh """cat << EOF > ${hostfile}
586 127.0.0.1           localhost
587 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
588 EOF"""
589                         } else {
590                             hostfile = null
591                         }
592
593                         jujuPassword = sshCommand remote: remote, command: '''
594                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
595                         '''
596
597                         run_robot_systest(
598                             containerName,
599                             params.ROBOT_TAG_NAME,
600                             osmHostname,
601                             prometheusHostname,
602                             prometheusPort,
603                             params.ROBOT_VIM,
604                             params.ROBOT_PORT_MAPPING_VIM,
605                             params.KUBECONFIG,
606                             params.CLOUDS,
607                             hostfile,
608                             jujuPassword,
609                             SSH_KEY,
610                             params.ROBOT_PASS_THRESHOLD,
611                             params.ROBOT_UNSTABLE_THRESHOLD
612                         )
613                     } // stage("System Integration Test")
614                 } finally {
615                     stage('After System Integration test') {
616                         if (currentBuild.result != 'FAILURE') {
617                             stage_archive = keep_artifacts
618                         } else {
619                             println('Systest test failed, throwing error')
620                             error = new Exception('Systest test failed')
621                             currentBuild.result = 'FAILURE'
622                             throw error
623                         }
624                     }
625                 }
626             } // if ( params.DO_ROBOT )
627
628             if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
629                 stage('Archive') {
630                     // Archive the tested repo
631                     dir("${RELEASE_DIR}") {
632                         ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
633                     }
634                     if (params.DO_DOCKERPUSH) {
635                         stage('Publish to Dockerhub') {
636                             parallelSteps = [:]
637                             for (buildStep in containerList) {
638                                 def module = buildStep
639                                 def moduleName = buildStep.toLowerCase()
640                                 def dockerTag = params.DOCKER_TAG
641                                 def moduleTag = containerName
642
643                                 parallelSteps[module] = {
644                                     dir("$module") {
645                                         sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
646                                         sh("""docker tag ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag} \
647                                            opensourcemano/${moduleName}:${dockerTag}""")
648                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
649                                     }
650                                 }
651                             }
652                             parallel parallelSteps
653                         }
654                         stage('Snap promotion') {
655                             withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) {
656                                 snaps = ['osmclient']
657                                 for (snap in snaps) {
658                                     channel = 'latest/'
659                                     if (BRANCH_NAME.startsWith('v')) {
660                                         channel = BRANCH_NAME.substring(1) + '/'
661                                     } else if (BRANCH_NAME != 'master') {
662                                         channel += '/' + BRANCH_NAME.replaceAll('/', '-')
663                                     }
664                                     track = channel + 'edge\\*'
665                                     edge_rev = sh(returnStdout: true,
666                                         script: "snapcraft revisions $snap | " +
667                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
668                                     track = channel + 'beta\\*'
669                                     beta_rev = sh(returnStdout: true,
670                                         script: "snapcraft revisions $snap | " +
671                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
672
673                                     print "Edge: $edge_rev, Beta: $beta_rev"
674
675                                     if (edge_rev != beta_rev) {
676                                         print "Promoting $edge_rev to beta in place of $beta_rev"
677                                         beta_track = channel + 'beta'
678                                         sh "snapcraft release $snap $edge_rev $beta_track"
679                                     }
680                                 }
681                             }
682                         } // stage('Snap promotion')
683                         stage('Charm promotion') {
684                             charms = [
685                                 'osm', // bundle
686                                 'osm-ha', // bundle
687                                 'osm-grafana',
688                                 'osm-mariadb',
689                                 'mongodb-exporter-k8s',
690                                 'mysqld-exporter-k8s',
691                                 'osm-lcm',
692                                 'osm-mon',
693                                 'osm-nbi',
694                                 'osm-ng-ui',
695                                 'osm-pol',
696                                 'osm-ro',
697                                 'osm-prometheus',
698                                 'osm-update-db-operator',
699                                 'osm-vca-integrator',
700                             ]
701                             for (charm in charms) {
702
703                                 channel = 'latest'
704                                 if (BRANCH_NAME.startsWith('v')) {
705                                     channel = BRANCH_NAME.substring(1)
706                                 } else if (BRANCH_NAME != 'master') {
707                                     channel += '/' + BRANCH_NAME.replaceAll('/', '-')
708                                 }
709
710                                 withCredentials([string(credentialsId: 'Charmstore', variable: 'CHARMCRAFT_AUTH')]) {
711                                     sh "charmcraft status $charm --format json > ${charm}.json"
712                                     isCharm = sh(returnStdout: true, script: "grep architecture ${charm}.json | wc -l").trim() as int
713                                     resourceArgument = ""
714                                     if (isCharm) {
715                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
716                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
717                                         index=0
718                                         while (index < 5) {
719                                             resourceNameScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].name'|head -1"
720                                             resourceRevsScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].revision'|head -1"
721                                             resourceName = sh(returnStdout: true, script: resourceNameScript).trim()
722                                             resourceRevs = sh(returnStdout: true, script: resourceRevsScript).trim()
723                                             if (resourceName != "null") {
724                                                 resourceArgument += " --resource ${resourceName}:${resourceRevs}"
725                                             } else {
726                                                 break
727                                             }
728                                             index ++
729                                         }
730                                     } else {
731                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
732                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
733                                     }
734                                     // edge/merged is used in place of /edge as 10.1.0 LTS uses latest/edge
735                                     edge_rev = sh(returnStdout: true, script: jqScriptEdge).trim()
736                                     beta_rev = sh(returnStdout: true, script: jqScriptBeta).trim()
737                                     try { edge_rev = edge_rev as int } catch (NumberFormatException nfe) {edge_rev = 0}
738                                     try { beta_rev = beta_rev as int } catch (NumberFormatException nfe) {beta_rev = 0}
739
740                                     print "Edge: $edge_rev, Beta: $beta_rev $resourceArgument"
741
742                                     if (edge_rev > beta_rev) {
743                                         print "Promoting $edge_rev to beta in place of $beta_rev"
744                                         beta_track = channel + 'beta'
745                                         sh "charmcraft release ${charm} --revision=${edge_rev}  ${resourceArgument} --channel=${channel}/beta"
746                                     }
747
748                                 }
749                             }
750                         } // stage('Charm promotion')
751                     } // if (params.DO_DOCKERPUSH)
752                 } // stage('Archive')
753             } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
754         } // dir(OSM_DEVOPS)
755     } finally {
756         // stage('Debug') {
757         //     sleep 900
758         // }
759         stage('Archive Container Logs') {
760             if ( ARCHIVE_LOGS_FLAG ) {
761                 try {
762                     // Archive logs
763                     remote = [
764                         name: containerName,
765                         host: IP_ADDRESS,
766                         user: 'ubuntu',
767                         identityFile: SSH_KEY,
768                         allowAnyHosts: true,
769                         logLevel: 'INFO',
770                         pty: true
771                     ]
772                     println('Archiving container logs')
773                     archive_logs(remote)
774                 } catch (Exception e) {
775                     println('Error fetching logs: '+ e.getMessage())
776                 }
777             } // end if ( ARCHIVE_LOGS_FLAG )
778         }
779         stage('Cleanup') {
780             if ( params.DO_INSTALL && server_id != null) {
781                 delete_vm = true
782                 if (error && params.SAVE_CONTAINER_ON_FAIL ) {
783                     delete_vm = false
784                 }
785                 if (!error && params.SAVE_CONTAINER_ON_PASS ) {
786                     delete_vm = false
787                 }
788
789                 if ( delete_vm ) {
790                     if (server_id != null) {
791                         println("Deleting VM: $server_id")
792                         sh """#!/bin/sh -e
793                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
794                             openstack server delete ${server_id}
795                         """
796                     } else {
797                         println("Saved VM $server_id in ETSI VIM")
798                     }
799                 }
800             }
801             if ( http_server_name != null ) {
802                 sh "docker stop ${http_server_name} || true"
803                 sh "docker rm ${http_server_name} || true"
804             }
805
806             if ( devopstempdir != null ) {
807                 sh "rm -rf ${devopstempdir}"
808             }
809         }
810     }
811 }