Archive Airflow DAGs logs in stage3
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
31         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
32         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
33         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
34         string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
35         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
36         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
37         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
38         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
39         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
40         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
41         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
42         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
43         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
44         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
45                name: 'ROBOT_TAG_NAME'),
46         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
47         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
48                description: 'Port mapping file for SDN assist in ETSI VIM',
49                name: 'ROBOT_PORT_MAPPING_VIM'),
50         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
51         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
52         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
53         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
54                name: 'ROBOT_PASS_THRESHOLD'),
55         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
56                '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
57     ])
58 ])
59
60 ////////////////////////////////////////////////////////////////////////////////////////
61 // Helper Functions
62 ////////////////////////////////////////////////////////////////////////////////////////
63 void run_robot_systest(String tagName,
64                        String testName,
65                        String osmHostname,
66                        String prometheusHostname,
67                        Integer prometheusPort=null,
68                        String envfile=null,
69                        String portmappingfile=null,
70                        String kubeconfig=null,
71                        String clouds=null,
72                        String hostfile=null,
73                        String jujuPassword=null,
74                        String osmRSAfile=null,
75                        String passThreshold='0.0',
76                        String unstableThreshold='0.0') {
77     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
78     String environmentFile = ''
79     if (envfile) {
80         environmentFile = envfile
81     } else {
82         sh(script: "touch ${tempdir}/env")
83         environmentFile = "${tempdir}/env"
84     }
85     PROMETHEUS_PORT_VAR = ''
86     if (prometheusPort != null) {
87         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
88     }
89     hostfilemount = ''
90     if (hostfile) {
91         hostfilemount = "-v ${hostfile}:/etc/hosts"
92     }
93
94     JUJU_PASSWORD_VAR = ''
95     if (jujuPassword != null) {
96         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
97     }
98
99     try {
100         sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
101            ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
102            -v ${clouds}:/etc/openstack/clouds.yaml \
103            -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
104            -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
105            -c -t ${testName}""")
106     } finally {
107         sh("cp ${tempdir}/*.xml .")
108         sh("cp ${tempdir}/*.html .")
109         outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
110         println("Present Directory is : ${outputDirectory}")
111         step([
112             $class : 'RobotPublisher',
113             outputPath : "${outputDirectory}",
114             outputFileName : '*.xml',
115             disableArchiveOutput : false,
116             reportFileName : 'report.html',
117             logFileName : 'log.html',
118             passThreshold : passThreshold,
119             unstableThreshold: unstableThreshold,
120             otherFiles : '*.png',
121         ])
122     }
123 }
124
125 void archive_logs(Map remote) {
126
127     sshCommand remote: remote, command: '''mkdir -p logs/dags'''
128     if (useCharmedInstaller) {
129         sshCommand remote: remote, command: '''
130             for pod in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
131                 logfile=`echo $pod | cut -d- -f1`
132                 echo "Extracting log for $logfile"
133                 kubectl logs -n osm $pod --timestamps=true 2>&1 > logs/$logfile.log
134             done
135         '''
136     } else {
137         sshCommand remote: remote, command: '''
138             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
139                 echo "Extracting log for $deployment"
140                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
141                 > logs/$deployment.log
142             done
143         '''
144         sshCommand remote: remote, command: '''
145             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
146                 echo "Extracting log for $statefulset"
147                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
148                 > logs/$statefulset.log
149             done
150         '''
151         sshCommand remote: remote, command: '''
152             schedulerPod="$(kubectl get pods -n osm | grep airflow-scheduler| awk '{print $1; exit}')"; \
153             echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
154             kubectl cp -n osm ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler
155         '''
156     }
157
158     sh 'rm -rf logs'
159     sshCommand remote: remote, command: '''ls -al logs'''
160     sshGet remote: remote, from: 'logs', into: '.', override: true
161     sh 'cp logs/* .'
162     sh 'cp logs/dags/* .'
163     archiveArtifacts artifacts: '*.log'
164 }
165
166 String get_value(String key, String output) {
167     for (String line : output.split( '\n' )) {
168         data = line.split( '\\|' )
169         if (data.length > 1) {
170             if ( data[1].trim() == key ) {
171                 return data[2].trim()
172             }
173         }
174     }
175 }
176
177 ////////////////////////////////////////////////////////////////////////////////////////
178 // Main Script
179 ////////////////////////////////////////////////////////////////////////////////////////
180 node("${params.NODE}") {
181
182     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
183     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
184     APT_PROXY = 'http://172.21.1.1:3142'
185     SSH_KEY = '~/hive/cicd_rsa'
186     ARCHIVE_LOGS_FLAG = false
187     sh 'env'
188
189     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
190
191     stage('Checkout') {
192         checkout scm
193     }
194
195     ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
196
197     def upstreamMainJob = params.UPSTREAM_SUFFIX
198
199     // upstream jobs always use merged artifacts
200     upstreamMainJob += '-merge'
201     containerNamePrefix = "osm-${tag_or_branch}"
202     containerName = "${containerNamePrefix}"
203
204     keep_artifacts = false
205     if ( JOB_NAME.contains('merge') ) {
206         containerName += '-merge'
207
208         // On a merge job, we keep artifacts on smoke success
209         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
210     }
211     containerName += "-${BUILD_NUMBER}"
212
213     server_id = null
214     http_server_name = null
215     devopstempdir = null
216     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
217
218     try {
219         builtModules = [:]
220 ///////////////////////////////////////////////////////////////////////////////////////
221 // Fetch stage 2 .deb artifacts
222 ///////////////////////////////////////////////////////////////////////////////////////
223         stage('Copy Artifacts') {
224             // cleanup any previous repo
225             sh "tree -fD repo || exit 0"
226             sh 'rm -rvf repo'
227             sh "tree -fD repo && lsof repo || exit 0"
228             dir('repo') {
229                 packageList = []
230                 dir("${RELEASE}") {
231                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
232
233                     // check if an upstream artifact based on specific build number has been requested
234                     // This is the case of a merge build and the upstream merge build is not yet complete
235                     // (it is not deemed a successful build yet). The upstream job is calling this downstream
236                     // job (with the its build artifact)
237                     def upstreamComponent = ''
238                     if (params.UPSTREAM_JOB_NAME) {
239                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
240                         lock('Artifactory') {
241                             step ([$class: 'CopyArtifact',
242                                 projectName: "${params.UPSTREAM_JOB_NAME}",
243                                 selector: [$class: 'SpecificBuildSelector',
244                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
245                                 ])
246
247                             upstreamComponent = ci_helper.get_mdg_from_project(
248                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
249                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
250                             dir("$upstreamComponent") {
251                                 // the upstream job name contains suffix with the project. Need this stripped off
252                                 project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
253                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
254                                     upstreamComponent,
255                                     GERRIT_BRANCH,
256                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
257                                     buildNumber)
258
259                                 packageList.addAll(packages)
260                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
261                             }
262                         } // lock artifactory
263                     }
264
265                     parallelSteps = [:]
266                     list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
267                             'common', 'LCM', 'POL', 'NG-UI', 'NG-SA', 'PLA', 'tests']
268                     if (upstreamComponent.length() > 0) {
269                         println("Skipping upstream fetch of ${upstreamComponent}")
270                         list.remove(upstreamComponent)
271                     }
272                     for (buildStep in list) {
273                         def component = buildStep
274                         parallelSteps[component] = {
275                             dir("$component") {
276                                 println("Fetching artifact for ${component}")
277                                 step([$class: 'CopyArtifact',
278                                        projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
279
280                                 // grab the archives from the stage_2 builds
281                                 // (ie. this will be the artifacts stored based on a merge)
282                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
283                                     component,
284                                     GERRIT_BRANCH,
285                                     "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
286                                     ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
287                                 packageList.addAll(packages)
288                                 println("Fetched ${component}: ${packages}")
289                                 sh 'rm -rf dists'
290                             }
291                         }
292                     }
293                     lock('Artifactory') {
294                         parallel parallelSteps
295                     }
296
297 ///////////////////////////////////////////////////////////////////////////////////////
298 // Create Devops APT repository
299 ///////////////////////////////////////////////////////////////////////////////////////
300                     sh 'mkdir -p pool'
301                     for (component in [ 'devops', 'IM', 'osmclient' ]) {
302                         sh "ls -al ${component}/pool/"
303                         sh "cp -r ${component}/pool/* pool/"
304                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
305                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
306                         sh("""apt-ftparchive packages pool/${component} \
307                            > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
308                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
309                     }
310
311                     // create and sign the release file
312                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
313                     sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
314                        -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
315
316                     // copy the public key into the release folder
317                     // this pulls the key from the home dir of the current user (jenkins)
318                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
319                     sh "cp ~/${REPO_KEY_NAME} ."
320                 }
321
322                 // start an apache server to serve up the packages
323                 http_server_name = "${containerName}-apache"
324
325                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
326                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
327                                'print(s.getsockname()[1]); s.close()\');',
328                                returnStdout: true).trim()
329                 internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
330                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
331                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
332                 ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
333             }
334
335             sh "tree -fD repo"
336
337             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
338             osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
339             devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
340             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
341             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
342             OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
343             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
344             for (remotePath in packageList) {
345                 packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
346                 packageName = packageName[0 .. packageName.indexOf('_') - 1]
347                 builtModules[packageName] = remotePath
348             }
349         }
350
351 ///////////////////////////////////////////////////////////////////////////////////////
352 // Build docker containers
353 ///////////////////////////////////////////////////////////////////////////////////////
354         dir(OSM_DEVOPS) {
355             Map remote = [:]
356             error = null
357             if ( params.DO_BUILD ) {
358                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
359                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
360                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
361                 }
362                 datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
363                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
364                 for (packageName in builtModules.keySet()) {
365                     envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
366                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
367                 }
368                 dir('docker') {
369                     stage('Build') {
370                         containerList = sh(returnStdout: true, script:
371                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
372                         containerList = Arrays.asList(containerList.split('\n'))
373                         print(containerList)
374                         parallelSteps = [:]
375                         for (buildStep in containerList) {
376                             def module = buildStep
377                             def moduleName = buildStep.toLowerCase()
378                             def moduleTag = containerName
379                             parallelSteps[module] = {
380                                 dir("$module") {
381                                     sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
382                                     -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
383                                     println("Tagging ${moduleName}:${moduleTag}")
384                                     sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
385                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
386                                     sh("""docker push \
387                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
388                                 }
389                             }
390                         }
391                         parallel parallelSteps
392                     }
393                 }
394             } // if (params.DO_BUILD)
395
396             if (params.DO_INSTALL) {
397 ///////////////////////////////////////////////////////////////////////////////////////
398 // Launch VM
399 ///////////////////////////////////////////////////////////////////////////////////////
400                 stage('Spawn Remote VM') {
401                     println('Launching new VM')
402                     output = sh(returnStdout: true, script: """#!/bin/sh -e
403                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
404                         openstack server create --flavor osm.sanity \
405                                                 --image ${OPENSTACK_BASE_IMAGE} \
406                                                 --key-name CICD \
407                                                 --property build_url="${BUILD_URL}" \
408                                                 --nic net-id=osm-ext \
409                                                 ${containerName}
410                     """).trim()
411
412                     server_id = get_value('id', output)
413
414                     if (server_id == null) {
415                         println('VM launch output: ')
416                         println(output)
417                         throw new Exception('VM Launch failed')
418                     }
419                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
420
421                     IP_ADDRESS = ''
422
423                     while (IP_ADDRESS == '') {
424                         output = sh(returnStdout: true, script: """#!/bin/sh -e
425                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
426                             openstack server show ${server_id}
427                         """).trim()
428                         IP_ADDRESS = get_value('addresses', output)
429                     }
430                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
431                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
432
433                     alive = false
434                     timeout(time: 1, unit: 'MINUTES') {
435                         while (!alive) {
436                             output = sh(
437                                 returnStatus: true,
438                                 script: "ssh -T -i ${SSH_KEY} " +
439                                     "-o StrictHostKeyChecking=no " +
440                                     "-o UserKnownHostsFile=/dev/null " +
441                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
442                             alive = (output == 0)
443                         }
444                     }
445                     println('VM is ready and accepting ssh connections')
446                 } // stage("Spawn Remote VM")
447
448 ///////////////////////////////////////////////////////////////////////////////////////
449 // Checks before installation
450 ///////////////////////////////////////////////////////////////////////////////////////
451                 stage('Checks before installation') {
452                     remote = [
453                         name: containerName,
454                         host: IP_ADDRESS,
455                         user: 'ubuntu',
456                         identityFile: SSH_KEY,
457                         allowAnyHosts: true,
458                         logLevel: 'INFO',
459                         pty: true
460                     ]
461
462                     // Ensure the VM is ready
463                     sshCommand remote: remote, command: 'cloud-init status --wait'
464                     // Force time sync to avoid clock drift and invalid certificates
465                     sshCommand remote: remote, command: 'sudo apt-get -y update'
466                     sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
467                     sshCommand remote: remote, command: 'sudo service chrony stop'
468                     sshCommand remote: remote, command: 'sudo chronyd -vq'
469                     sshCommand remote: remote, command: 'sudo service chrony start'
470
471                  } // stage("Checks before installation")
472 ///////////////////////////////////////////////////////////////////////////////////////
473 // Installation
474 ///////////////////////////////////////////////////////////////////////////////////////
475                 stage('Install') {
476                     commit_id = ''
477                     repo_distro = ''
478                     repo_key_name = ''
479                     release = ''
480
481                     if (params.COMMIT_ID) {
482                         commit_id = "-b ${params.COMMIT_ID}"
483                     }
484                     if (params.REPO_DISTRO) {
485                         repo_distro = "-r ${params.REPO_DISTRO}"
486                     }
487                     if (params.REPO_KEY_NAME) {
488                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
489                     }
490                     if (params.RELEASE) {
491                         release = "-R ${params.RELEASE}"
492                     }
493                     if (params.REPOSITORY_BASE) {
494                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
495                     } else {
496                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
497                     }
498
499                     remote = [
500                         name: containerName,
501                         host: IP_ADDRESS,
502                         user: 'ubuntu',
503                         identityFile: SSH_KEY,
504                         allowAnyHosts: true,
505                         logLevel: 'INFO',
506                         pty: true
507                     ]
508
509                     sshCommand remote: remote, command: '''
510                         wget https://osm-download.etsi.org/ftp/osm-13.0-thirteen/install_osm.sh
511                         chmod +x ./install_osm.sh
512                         sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
513                     '''
514
515                     Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
516                                                 credentialsId: 'gitlab-registry',
517                                                 usernameVariable: 'USERNAME',
518                                                 passwordVariable: 'PASSWORD']
519                     if (useCharmedInstaller) {
520                         // Use local proxy for docker hub
521                         sshCommand remote: remote, command: '''
522                             sudo snap install microk8s --classic --channel=1.19/stable
523                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
524                             /var/snap/microk8s/current/args/containerd-template.toml
525                             sudo systemctl restart snap.microk8s.daemon-containerd.service
526                             sudo snap alias microk8s.kubectl kubectl
527                         '''
528
529                         withCredentials([gitlabCredentialsMap]) {
530                             sshCommand remote: remote, command: """
531                                 ./install_osm.sh -y \
532                                     ${repo_base_url} \
533                                     ${repo_key_name} \
534                                     ${release} -r unstable \
535                                     --charmed  \
536                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
537                                     --tag ${containerName}
538                             """
539                         }
540                         prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
541                         prometheusPort = 80
542                         osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
543                     } else {
544                         // Run -k8s installer here specifying internal docker registry and docker proxy
545                         withCredentials([gitlabCredentialsMap]) {
546                             sshCommand remote: remote, command: """
547                                 ./install_osm.sh -y \
548                                     ${repo_base_url} \
549                                     ${repo_key_name} \
550                                     ${release} -r unstable \
551                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
552                                     -p ${INTERNAL_DOCKER_PROXY} \
553                                     -t ${containerName}
554                             """
555                         }
556                         prometheusHostname = IP_ADDRESS
557                         prometheusPort = 9091
558                         osmHostname = IP_ADDRESS
559                     }
560                 } // stage("Install")
561 ///////////////////////////////////////////////////////////////////////////////////////
562 // Health check of installed OSM in remote vm
563 ///////////////////////////////////////////////////////////////////////////////////////
564                 stage('OSM Health') {
565                     // if this point is reached, logs should be archived
566                     ARCHIVE_LOGS_FLAG = true
567                     stackName = 'osm'
568                     sshCommand remote: remote, command: """
569                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
570                     """
571                 } // stage("OSM Health")
572             } // if ( params.DO_INSTALL )
573
574
575 ///////////////////////////////////////////////////////////////////////////////////////
576 // Execute Robot tests
577 ///////////////////////////////////////////////////////////////////////////////////////
578             stage_archive = false
579             if ( params.DO_ROBOT ) {
580                 try {
581                     stage('System Integration Test') {
582                         if (useCharmedInstaller) {
583                             tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
584                             sh(script: "touch ${tempdir}/hosts")
585                             hostfile = "${tempdir}/hosts"
586                             sh """cat << EOF > ${hostfile}
587 127.0.0.1           localhost
588 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
589 EOF"""
590                         } else {
591                             hostfile = null
592                         }
593
594                         jujuPassword = sshCommand remote: remote, command: '''
595                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
596                         '''
597
598                         run_robot_systest(
599                             containerName,
600                             params.ROBOT_TAG_NAME,
601                             osmHostname,
602                             prometheusHostname,
603                             prometheusPort,
604                             params.ROBOT_VIM,
605                             params.ROBOT_PORT_MAPPING_VIM,
606                             params.KUBECONFIG,
607                             params.CLOUDS,
608                             hostfile,
609                             jujuPassword,
610                             SSH_KEY,
611                             params.ROBOT_PASS_THRESHOLD,
612                             params.ROBOT_UNSTABLE_THRESHOLD
613                         )
614                     } // stage("System Integration Test")
615                 } finally {
616                     stage('After System Integration test') {
617                         if (currentBuild.result != 'FAILURE') {
618                             stage_archive = keep_artifacts
619                         } else {
620                             println('Systest test failed, throwing error')
621                             error = new Exception('Systest test failed')
622                             currentBuild.result = 'FAILURE'
623                             throw error
624                         }
625                     }
626                 }
627             } // if ( params.DO_ROBOT )
628
629             if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
630                 stage('Archive') {
631                     // Archive the tested repo
632                     dir("${RELEASE_DIR}") {
633                         ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
634                     }
635                     if (params.DO_DOCKERPUSH) {
636                         stage('Publish to Dockerhub') {
637                             parallelSteps = [:]
638                             for (buildStep in containerList) {
639                                 def module = buildStep
640                                 def moduleName = buildStep.toLowerCase()
641                                 def dockerTag = params.DOCKER_TAG
642                                 def moduleTag = containerName
643
644                                 parallelSteps[module] = {
645                                     dir("$module") {
646                                         sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
647                                         sh("""docker tag ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag} \
648                                            opensourcemano/${moduleName}:${dockerTag}""")
649                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
650                                     }
651                                 }
652                             }
653                             parallel parallelSteps
654                         }
655                         stage('Snap promotion') {
656                             withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) {
657                                 snaps = ['osmclient']
658                                 for (snap in snaps) {
659                                     channel = 'latest/'
660                                     if (BRANCH_NAME.startsWith('v')) {
661                                         channel = BRANCH_NAME.substring(1) + '/'
662                                     } else if (BRANCH_NAME != 'master') {
663                                         channel += '/' + BRANCH_NAME.replaceAll('/', '-')
664                                     }
665                                     track = channel + 'edge\\*'
666                                     edge_rev = sh(returnStdout: true,
667                                         script: "snapcraft revisions $snap | " +
668                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
669                                     track = channel + 'beta\\*'
670                                     beta_rev = sh(returnStdout: true,
671                                         script: "snapcraft revisions $snap | " +
672                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
673
674                                     print "Edge: $edge_rev, Beta: $beta_rev"
675
676                                     if (edge_rev != beta_rev) {
677                                         print "Promoting $edge_rev to beta in place of $beta_rev"
678                                         beta_track = channel + 'beta'
679                                         sh "snapcraft release $snap $edge_rev $beta_track"
680                                     }
681                                 }
682                             }
683                         } // stage('Snap promotion')
684                         stage('Charm promotion') {
685                             charms = [
686                                 'osm', // bundle
687                                 'osm-ha', // bundle
688                                 'osm-grafana',
689                                 'osm-mariadb',
690                                 'mongodb-exporter-k8s',
691                                 'mysqld-exporter-k8s',
692                                 'osm-lcm',
693                                 'osm-mon',
694                                 'osm-nbi',
695                                 'osm-ng-ui',
696                                 'osm-pol',
697                                 'osm-ro',
698                                 'osm-prometheus',
699                                 'osm-update-db-operator',
700                                 'osm-vca-integrator',
701                             ]
702                             for (charm in charms) {
703
704                                 channel = 'latest'
705                                 if (BRANCH_NAME.startsWith('v')) {
706                                     channel = BRANCH_NAME.substring(1)
707                                 } else if (BRANCH_NAME != 'master') {
708                                     channel += '/' + BRANCH_NAME.replaceAll('/', '-')
709                                 }
710
711                                 withCredentials([string(credentialsId: 'Charmstore', variable: 'CHARMCRAFT_AUTH')]) {
712                                     sh "charmcraft status $charm --format json > ${charm}.json"
713                                     isCharm = sh(returnStdout: true, script: "grep architecture ${charm}.json | wc -l").trim() as int
714                                     resourceArgument = ""
715                                     if (isCharm) {
716                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
717                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
718                                         index=0
719                                         while (index < 5) {
720                                             resourceNameScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].name'|head -1"
721                                             resourceRevsScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].revision'|head -1"
722                                             resourceName = sh(returnStdout: true, script: resourceNameScript).trim()
723                                             resourceRevs = sh(returnStdout: true, script: resourceRevsScript).trim()
724                                             if (resourceName != "null") {
725                                                 resourceArgument += " --resource ${resourceName}:${resourceRevs}"
726                                             } else {
727                                                 break
728                                             }
729                                             index ++
730                                         }
731                                     } else {
732                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
733                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
734                                     }
735                                     // edge/merged is used in place of /edge as 10.1.0 LTS uses latest/edge
736                                     edge_rev = sh(returnStdout: true, script: jqScriptEdge).trim()
737                                     beta_rev = sh(returnStdout: true, script: jqScriptBeta).trim()
738                                     try { edge_rev = edge_rev as int } catch (NumberFormatException nfe) {edge_rev = 0}
739                                     try { beta_rev = beta_rev as int } catch (NumberFormatException nfe) {beta_rev = 0}
740
741                                     print "Edge: $edge_rev, Beta: $beta_rev $resourceArgument"
742
743                                     if (edge_rev > beta_rev) {
744                                         print "Promoting $edge_rev to beta in place of $beta_rev"
745                                         beta_track = channel + 'beta'
746                                         sh "charmcraft release ${charm} --revision=${edge_rev}  ${resourceArgument} --channel=${channel}/beta"
747                                     }
748
749                                 }
750                             }
751                         } // stage('Charm promotion')
752                     } // if (params.DO_DOCKERPUSH)
753                 } // stage('Archive')
754             } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
755         } // dir(OSM_DEVOPS)
756     } finally {
757         // stage('Debug') {
758         //     sleep 900
759         // }
760         stage('Archive Container Logs') {
761             if ( ARCHIVE_LOGS_FLAG ) {
762                 try {
763                     // Archive logs
764                     remote = [
765                         name: containerName,
766                         host: IP_ADDRESS,
767                         user: 'ubuntu',
768                         identityFile: SSH_KEY,
769                         allowAnyHosts: true,
770                         logLevel: 'INFO',
771                         pty: true
772                     ]
773                     println('Archiving container logs')
774                     archive_logs(remote)
775                 } catch (Exception e) {
776                     println('Error fetching logs: '+ e.getMessage())
777                 }
778             } // end if ( ARCHIVE_LOGS_FLAG )
779         }
780         stage('Cleanup') {
781             if ( params.DO_INSTALL && server_id != null) {
782                 delete_vm = true
783                 if (error && params.SAVE_CONTAINER_ON_FAIL ) {
784                     delete_vm = false
785                 }
786                 if (!error && params.SAVE_CONTAINER_ON_PASS ) {
787                     delete_vm = false
788                 }
789
790                 if ( delete_vm ) {
791                     if (server_id != null) {
792                         println("Deleting VM: $server_id")
793                         sh """#!/bin/sh -e
794                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
795                             openstack server delete ${server_id}
796                         """
797                     } else {
798                         println("Saved VM $server_id in ETSI VIM")
799                     }
800                 }
801             }
802             if ( http_server_name != null ) {
803                 sh "docker stop ${http_server_name} || true"
804                 sh "docker rm ${http_server_name} || true"
805             }
806
807             if ( devopstempdir != null ) {
808                 sh "rm -rf ${devopstempdir}"
809             }
810         }
811     }
812 }