Feature 11006: new boolean param in stage3 to enable juju installation
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
31         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
32         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
33         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
34         string(defaultValue: 'ubuntu22.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
35         string(defaultValue: 'osm.sanity', description: '', name: 'OPENSTACK_OSM_FLAVOR'),
36         booleanParam(defaultValue: false, description: '', name: 'TRY_OLD_SERVICE_ASSURANCE'),
37         booleanParam(defaultValue: true, description: '', name: 'TRY_JUJU_INSTALLATION'),
38         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
39         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
40         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
41         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
42         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
43         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
44         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
45         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
46         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
47         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
48                name: 'ROBOT_TAG_NAME'),
49         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
50         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
51                description: 'Port mapping file for SDN assist in ETSI VIM',
52                name: 'ROBOT_PORT_MAPPING_VIM'),
53         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
54         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
55         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
56         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
57                name: 'ROBOT_PASS_THRESHOLD'),
58         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
59                '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
60     ])
61 ])
62
63 ////////////////////////////////////////////////////////////////////////////////////////
64 // Helper Functions
65 ////////////////////////////////////////////////////////////////////////////////////////
66 void run_robot_systest(String tagName,
67                        String testName,
68                        String osmHostname,
69                        String prometheusHostname,
70                        Integer prometheusPort=null,
71                        String envfile=null,
72                        String portmappingfile=null,
73                        String kubeconfig=null,
74                        String clouds=null,
75                        String hostfile=null,
76                        String jujuPassword=null,
77                        String osmRSAfile=null,
78                        String passThreshold='0.0',
79                        String unstableThreshold='0.0') {
80     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
81     String environmentFile = ''
82     if (envfile) {
83         environmentFile = envfile
84     } else {
85         sh(script: "touch ${tempdir}/env")
86         environmentFile = "${tempdir}/env"
87     }
88     PROMETHEUS_PORT_VAR = ''
89     if (prometheusPort != null) {
90         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
91     }
92     hostfilemount = ''
93     if (hostfile) {
94         hostfilemount = "-v ${hostfile}:/etc/hosts"
95     }
96
97     JUJU_PASSWORD_VAR = ''
98     if (jujuPassword != null) {
99         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
100     }
101
102     try {
103         sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
104            ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
105            -v ${clouds}:/etc/openstack/clouds.yaml \
106            -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
107            -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
108            -c -t ${testName}""")
109     } finally {
110         sh("cp ${tempdir}/*.xml .")
111         sh("cp ${tempdir}/*.html .")
112         outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
113         println("Present Directory is : ${outputDirectory}")
114         step([
115             $class : 'RobotPublisher',
116             outputPath : "${outputDirectory}",
117             outputFileName : '*.xml',
118             disableArchiveOutput : false,
119             reportFileName : 'report.html',
120             logFileName : 'log.html',
121             passThreshold : passThreshold,
122             unstableThreshold: unstableThreshold,
123             otherFiles : '*.png',
124         ])
125     }
126 }
127
128 void archive_logs(Map remote) {
129
130     sshCommand remote: remote, command: '''mkdir -p logs/dags'''
131     if (useCharmedInstaller) {
132         sshCommand remote: remote, command: '''
133             for pod in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
134                 logfile=`echo $pod | cut -d- -f1`
135                 echo "Extracting log for $logfile"
136                 kubectl logs -n osm $pod --timestamps=true 2>&1 > logs/$logfile.log
137             done
138         '''
139     } else {
140         sshCommand remote: remote, command: '''
141             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
142                 echo "Extracting log for $deployment"
143                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
144                 > logs/$deployment.log
145             done
146         '''
147         sshCommand remote: remote, command: '''
148             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
149                 echo "Extracting log for $statefulset"
150                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
151                 > logs/$statefulset.log
152             done
153         '''
154         sshCommand remote: remote, command: '''
155             schedulerPod="$(kubectl get pods -n osm | grep airflow-scheduler| awk '{print $1; exit}')"; \
156             echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
157             kubectl cp -n osm ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler
158         '''
159     }
160
161     sh 'rm -rf logs'
162     sshCommand remote: remote, command: '''ls -al logs'''
163     sshGet remote: remote, from: 'logs', into: '.', override: true
164     archiveArtifacts artifacts: 'logs/*.log, logs/dags/*.log'
165 }
166
167 String get_value(String key, String output) {
168     for (String line : output.split( '\n' )) {
169         data = line.split( '\\|' )
170         if (data.length > 1) {
171             if ( data[1].trim() == key ) {
172                 return data[2].trim()
173             }
174         }
175     }
176 }
177
178 ////////////////////////////////////////////////////////////////////////////////////////
179 // Main Script
180 ////////////////////////////////////////////////////////////////////////////////////////
181 node("${params.NODE}") {
182
183     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
184     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
185     APT_PROXY = 'http://172.21.1.1:3142'
186     SSH_KEY = '~/hive/cicd_rsa'
187     ARCHIVE_LOGS_FLAG = false
188     sh 'env'
189
190     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
191
192     stage('Checkout') {
193         checkout scm
194     }
195
196     ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
197
198     def upstreamMainJob = params.UPSTREAM_SUFFIX
199
200     // upstream jobs always use merged artifacts
201     upstreamMainJob += '-merge'
202     containerNamePrefix = "osm-${tag_or_branch}"
203     containerName = "${containerNamePrefix}"
204
205     keep_artifacts = false
206     if ( JOB_NAME.contains('merge') ) {
207         containerName += '-merge'
208
209         // On a merge job, we keep artifacts on smoke success
210         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
211     }
212     containerName += "-${BUILD_NUMBER}"
213
214     server_id = null
215     http_server_name = null
216     devopstempdir = null
217     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
218
219     try {
220         builtModules = [:]
221 ///////////////////////////////////////////////////////////////////////////////////////
222 // Fetch stage 2 .deb artifacts
223 ///////////////////////////////////////////////////////////////////////////////////////
224         stage('Copy Artifacts') {
225             // cleanup any previous repo
226             sh "tree -fD repo || exit 0"
227             sh 'rm -rvf repo'
228             sh "tree -fD repo && lsof repo || exit 0"
229             dir('repo') {
230                 packageList = []
231                 dir("${RELEASE}") {
232                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
233
234                     // check if an upstream artifact based on specific build number has been requested
235                     // This is the case of a merge build and the upstream merge build is not yet complete
236                     // (it is not deemed a successful build yet). The upstream job is calling this downstream
237                     // job (with the its build artifact)
238                     def upstreamComponent = ''
239                     if (params.UPSTREAM_JOB_NAME) {
240                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
241                         lock('Artifactory') {
242                             step ([$class: 'CopyArtifact',
243                                 projectName: "${params.UPSTREAM_JOB_NAME}",
244                                 selector: [$class: 'SpecificBuildSelector',
245                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
246                                 ])
247
248                             upstreamComponent = ci_helper.get_mdg_from_project(
249                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
250                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
251                             dir("$upstreamComponent") {
252                                 // the upstream job name contains suffix with the project. Need this stripped off
253                                 project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
254                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
255                                     upstreamComponent,
256                                     GERRIT_BRANCH,
257                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
258                                     buildNumber)
259
260                                 packageList.addAll(packages)
261                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
262                             }
263                         } // lock artifactory
264                     }
265
266                     parallelSteps = [:]
267                     list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
268                             'common', 'LCM', 'POL', 'NG-UI', 'NG-SA', 'PLA', 'tests']
269                     if (upstreamComponent.length() > 0) {
270                         println("Skipping upstream fetch of ${upstreamComponent}")
271                         list.remove(upstreamComponent)
272                     }
273                     for (buildStep in list) {
274                         def component = buildStep
275                         parallelSteps[component] = {
276                             dir("$component") {
277                                 println("Fetching artifact for ${component}")
278                                 step([$class: 'CopyArtifact',
279                                        projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
280
281                                 // grab the archives from the stage_2 builds
282                                 // (ie. this will be the artifacts stored based on a merge)
283                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
284                                     component,
285                                     GERRIT_BRANCH,
286                                     "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
287                                     ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
288                                 packageList.addAll(packages)
289                                 println("Fetched ${component}: ${packages}")
290                                 sh 'rm -rf dists'
291                             }
292                         }
293                     }
294                     lock('Artifactory') {
295                         parallel parallelSteps
296                     }
297
298 ///////////////////////////////////////////////////////////////////////////////////////
299 // Create Devops APT repository
300 ///////////////////////////////////////////////////////////////////////////////////////
301                     sh 'mkdir -p pool'
302                     for (component in [ 'devops', 'IM', 'osmclient' ]) {
303                         sh "ls -al ${component}/pool/"
304                         sh "cp -r ${component}/pool/* pool/"
305                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
306                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
307                         sh("""apt-ftparchive packages pool/${component} \
308                            > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
309                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
310                     }
311
312                     // create and sign the release file
313                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
314                     sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
315                        -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
316
317                     // copy the public key into the release folder
318                     // this pulls the key from the home dir of the current user (jenkins)
319                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
320                     sh "cp ~/${REPO_KEY_NAME} ."
321                 }
322
323                 // start an apache server to serve up the packages
324                 http_server_name = "${containerName}-apache"
325
326                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
327                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
328                                'print(s.getsockname()[1]); s.close()\');',
329                                returnStdout: true).trim()
330                 internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
331                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
332                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
333                 ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
334             }
335
336             sh "tree -fD repo"
337
338             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
339             osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
340             devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
341             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
342             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
343             OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
344             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
345             for (remotePath in packageList) {
346                 packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
347                 packageName = packageName[0 .. packageName.indexOf('_') - 1]
348                 builtModules[packageName] = remotePath
349             }
350         }
351
352 ///////////////////////////////////////////////////////////////////////////////////////
353 // Build docker containers
354 ///////////////////////////////////////////////////////////////////////////////////////
355         dir(OSM_DEVOPS) {
356             Map remote = [:]
357             error = null
358             if ( params.DO_BUILD ) {
359                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
360                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
361                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
362                 }
363                 datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
364                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
365                 for (packageName in builtModules.keySet()) {
366                     envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
367                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
368                 }
369                 dir('docker') {
370                     stage('Build') {
371                         containerList = sh(returnStdout: true, script:
372                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
373                         containerList = Arrays.asList(containerList.split('\n'))
374                         print(containerList)
375                         parallelSteps = [:]
376                         for (buildStep in containerList) {
377                             def module = buildStep
378                             def moduleName = buildStep.toLowerCase()
379                             def moduleTag = containerName
380                             parallelSteps[module] = {
381                                 dir("$module") {
382                                     sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
383                                     -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
384                                     println("Tagging ${moduleName}:${moduleTag}")
385                                     sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
386                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
387                                     sh("""docker push \
388                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
389                                 }
390                             }
391                         }
392                         parallel parallelSteps
393                     }
394                 }
395             } // if (params.DO_BUILD)
396
397             if (params.DO_INSTALL) {
398 ///////////////////////////////////////////////////////////////////////////////////////
399 // Launch VM
400 ///////////////////////////////////////////////////////////////////////////////////////
401                 stage('Spawn Remote VM') {
402                     println('Launching new VM')
403                     output = sh(returnStdout: true, script: """#!/bin/sh -e
404                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
405                         openstack server create --flavor ${OPENSTACK_OSM_FLAVOR} \
406                                                 --image ${OPENSTACK_BASE_IMAGE} \
407                                                 --key-name CICD \
408                                                 --property build_url="${BUILD_URL}" \
409                                                 --nic net-id=osm-ext \
410                                                 ${containerName}
411                     """).trim()
412
413                     server_id = get_value('id', output)
414
415                     if (server_id == null) {
416                         println('VM launch output: ')
417                         println(output)
418                         throw new Exception('VM Launch failed')
419                     }
420                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
421
422                     IP_ADDRESS = ''
423
424                     while (IP_ADDRESS == '') {
425                         output = sh(returnStdout: true, script: """#!/bin/sh -e
426                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
427                             openstack server show ${server_id}
428                         """).trim()
429                         IP_ADDRESS = get_value('addresses', output)
430                     }
431                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
432                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
433
434                     alive = false
435                     timeout(time: 1, unit: 'MINUTES') {
436                         while (!alive) {
437                             output = sh(
438                                 returnStatus: true,
439                                 script: "ssh -T -i ${SSH_KEY} " +
440                                     "-o StrictHostKeyChecking=no " +
441                                     "-o UserKnownHostsFile=/dev/null " +
442                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
443                             alive = (output == 0)
444                         }
445                     }
446                     println('VM is ready and accepting ssh connections')
447
448                     //////////////////////////////////////////////////////////////////////////////////////////////
449                     println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins...')
450
451                     sh( returnStatus: true,
452                         script: "ssh -T -i ${SSH_KEY} " +
453                             "-o StrictHostKeyChecking=no " +
454                             "-o UserKnownHostsFile=/dev/null " +
455                             "ubuntu@${IP_ADDRESS} " +
456                             "'echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
457                     sh( returnStatus: true,
458                         script: "ssh -T -i ${SSH_KEY} " +
459                             "-o StrictHostKeyChecking=no " +
460                             "-o UserKnownHostsFile=/dev/null " +
461                             "ubuntu@${IP_ADDRESS} " +
462                             "'echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
463                     sh( returnStatus: true,
464                         script: "ssh -T -i ${SSH_KEY} " +
465                             "-o StrictHostKeyChecking=no " +
466                             "-o UserKnownHostsFile=/dev/null " +
467                             "ubuntu@${IP_ADDRESS} " +
468                             "'sudo systemctl restart sshd'")
469                     //////////////////////////////////////////////////////////////////////////////////////////////
470
471                 } // stage("Spawn Remote VM")
472
473 ///////////////////////////////////////////////////////////////////////////////////////
474 // Checks before installation
475 ///////////////////////////////////////////////////////////////////////////////////////
476                 stage('Checks before installation') {
477                     remote = [
478                         name: containerName,
479                         host: IP_ADDRESS,
480                         user: 'ubuntu',
481                         identityFile: SSH_KEY,
482                         allowAnyHosts: true,
483                         logLevel: 'INFO',
484                         pty: true
485                     ]
486
487                     // Ensure the VM is ready
488                     sshCommand remote: remote, command: 'cloud-init status --wait'
489                     // Force time sync to avoid clock drift and invalid certificates
490                     sshCommand remote: remote, command: 'sudo apt-get -y update'
491                     sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
492                     sshCommand remote: remote, command: 'sudo service chrony stop'
493                     sshCommand remote: remote, command: 'sudo chronyd -vq'
494                     sshCommand remote: remote, command: 'sudo service chrony start'
495
496                  } // stage("Checks before installation")
497 ///////////////////////////////////////////////////////////////////////////////////////
498 // Installation
499 ///////////////////////////////////////////////////////////////////////////////////////
500                 stage('Install') {
501                     commit_id = ''
502                     repo_distro = ''
503                     repo_key_name = ''
504                     release = ''
505
506                     if (params.COMMIT_ID) {
507                         commit_id = "-b ${params.COMMIT_ID}"
508                     }
509                     if (params.REPO_DISTRO) {
510                         repo_distro = "-r ${params.REPO_DISTRO}"
511                     }
512                     if (params.REPO_KEY_NAME) {
513                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
514                     }
515                     if (params.RELEASE) {
516                         release = "-R ${params.RELEASE}"
517                     }
518                     if (params.REPOSITORY_BASE) {
519                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
520                     } else {
521                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
522                     }
523
524                     remote = [
525                         name: containerName,
526                         host: IP_ADDRESS,
527                         user: 'ubuntu',
528                         identityFile: SSH_KEY,
529                         allowAnyHosts: true,
530                         logLevel: 'INFO',
531                         pty: true
532                     ]
533
534                     sshCommand remote: remote, command: '''
535                         wget https://osm-download.etsi.org/ftp/osm-14.0-fourteen/install_osm.sh
536                         chmod +x ./install_osm.sh
537                         sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
538                     '''
539
540                     Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
541                                                 credentialsId: 'gitlab-registry',
542                                                 usernameVariable: 'USERNAME',
543                                                 passwordVariable: 'PASSWORD']
544                     if (useCharmedInstaller) {
545                         // Use local proxy for docker hub
546                         sshCommand remote: remote, command: '''
547                             sudo snap install microk8s --classic --channel=1.19/stable
548                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
549                             /var/snap/microk8s/current/args/containerd-template.toml
550                             sudo systemctl restart snap.microk8s.daemon-containerd.service
551                             sudo snap alias microk8s.kubectl kubectl
552                         '''
553
554                         withCredentials([gitlabCredentialsMap]) {
555                             sshCommand remote: remote, command: """
556                                 ./install_osm.sh -y \
557                                     ${repo_base_url} \
558                                     ${repo_key_name} \
559                                     ${release} -r unstable \
560                                     --charmed  \
561                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
562                                     --tag ${containerName}
563                             """
564                         }
565                         prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
566                         prometheusPort = 80
567                         osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
568                     } else {
569                         // Run -k8s installer here specifying internal docker registry and docker proxy
570                         osm_installation_options = ""
571                         if (params.TRY_OLD_SERVICE_ASSURANCE) {
572                             osm_installation_options = "--old-sa"
573                         }
574                         withCredentials([gitlabCredentialsMap]) {
575                             sshCommand remote: remote, command: """
576                                 ./install_osm.sh -y \
577                                     ${repo_base_url} \
578                                     ${repo_key_name} \
579                                     ${release} -r unstable \
580                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
581                                     -p ${INTERNAL_DOCKER_PROXY} \
582                                     -t ${containerName} \
583                                     ${osm_installation_options}
584                             """
585                         }
586                         prometheusHostname = IP_ADDRESS
587                         prometheusPort = 9091
588                         osmHostname = IP_ADDRESS
589                     }
590                 } // stage("Install")
591 ///////////////////////////////////////////////////////////////////////////////////////
592 // Health check of installed OSM in remote vm
593 ///////////////////////////////////////////////////////////////////////////////////////
594                 stage('OSM Health') {
595                     // if this point is reached, logs should be archived
596                     ARCHIVE_LOGS_FLAG = true
597                     stackName = 'osm'
598                     sshCommand remote: remote, command: """
599                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
600                     """
601                 } // stage("OSM Health")
602             } // if ( params.DO_INSTALL )
603
604
605 ///////////////////////////////////////////////////////////////////////////////////////
606 // Execute Robot tests
607 ///////////////////////////////////////////////////////////////////////////////////////
608             stage_archive = false
609             if ( params.DO_ROBOT ) {
610                 try {
611                     stage('System Integration Test') {
612                         if (useCharmedInstaller) {
613                             tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
614                             sh(script: "touch ${tempdir}/hosts")
615                             hostfile = "${tempdir}/hosts"
616                             sh """cat << EOF > ${hostfile}
617 127.0.0.1           localhost
618 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
619 EOF"""
620                         } else {
621                             hostfile = null
622                         }
623
624                         jujuPassword = sshCommand remote: remote, command: '''
625                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
626                         '''
627
628                         run_robot_systest(
629                             containerName,
630                             params.ROBOT_TAG_NAME,
631                             osmHostname,
632                             prometheusHostname,
633                             prometheusPort,
634                             params.ROBOT_VIM,
635                             params.ROBOT_PORT_MAPPING_VIM,
636                             params.KUBECONFIG,
637                             params.CLOUDS,
638                             hostfile,
639                             jujuPassword,
640                             SSH_KEY,
641                             params.ROBOT_PASS_THRESHOLD,
642                             params.ROBOT_UNSTABLE_THRESHOLD
643                         )
644                     } // stage("System Integration Test")
645                 } finally {
646                     stage('After System Integration test') {
647                         if (currentBuild.result != 'FAILURE') {
648                             stage_archive = keep_artifacts
649                         } else {
650                             println('Systest test failed, throwing error')
651                             error = new Exception('Systest test failed')
652                             currentBuild.result = 'FAILURE'
653                             throw error
654                         }
655                     }
656                 }
657             } // if ( params.DO_ROBOT )
658
659             if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
660                 stage('Archive') {
661                     // Archive the tested repo
662                     dir("${RELEASE_DIR}") {
663                         ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
664                     }
665                     if (params.DO_DOCKERPUSH) {
666                         stage('Publish to Dockerhub') {
667                             parallelSteps = [:]
668                             for (buildStep in containerList) {
669                                 def module = buildStep
670                                 def moduleName = buildStep.toLowerCase()
671                                 def dockerTag = params.DOCKER_TAG
672                                 def moduleTag = containerName
673
674                                 parallelSteps[module] = {
675                                     dir("$module") {
676                                         sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
677                                         sh("""docker tag ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag} \
678                                            opensourcemano/${moduleName}:${dockerTag}""")
679                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
680                                     }
681                                 }
682                             }
683                             parallel parallelSteps
684                         }
685                         stage('Snap promotion') {
686                             withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) {
687                                 snaps = ['osmclient']
688                                 for (snap in snaps) {
689                                     channel = 'latest/'
690                                     if (BRANCH_NAME.startsWith('v')) {
691                                         channel = BRANCH_NAME.substring(1) + '/'
692                                     } else if (BRANCH_NAME != 'master') {
693                                         channel += '/' + BRANCH_NAME.replaceAll('/', '-')
694                                     }
695                                     track = channel + 'edge\\*'
696                                     edge_rev = sh(returnStdout: true,
697                                         script: "snapcraft revisions $snap | " +
698                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
699                                     track = channel + 'beta\\*'
700                                     beta_rev = sh(returnStdout: true,
701                                         script: "snapcraft revisions $snap | " +
702                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
703
704                                     print "Edge: $edge_rev, Beta: $beta_rev"
705
706                                     if (edge_rev != beta_rev) {
707                                         print "Promoting $edge_rev to beta in place of $beta_rev"
708                                         beta_track = channel + 'beta'
709                                         sh "snapcraft release $snap $edge_rev $beta_track"
710                                     }
711                                 }
712                             }
713                         } // stage('Snap promotion')
714                         stage('Charm promotion') {
715                             charms = [
716                                 [name: 'osm',                       base: "22.04"],
717                                 [name: 'osm-ha',                    base: "22.04"],
718                                 [name: 'mongodb-exporter-k8s',      base: "20.04"],
719                                 [name: 'mysqld-exporter-k8s',       base: "20.04"],
720                                 [name: 'osm-grafana',               base: "20.04"],
721                                 [name: 'osm-keystone',              base: "22.04"],
722                                 [name: 'osm-lcm',                   base: "22.04"],
723                                 [name: 'osm-mon',                   base: "22.04"],
724                                 [name: 'osm-nbi',                   base: "22.04"],
725                                 [name: 'osm-ng-ui',                 base: "22.04"],
726                                 [name: 'osm-pol',                   base: "22.04"],
727                                 [name: 'osm-ro',                    base: "22.04"],
728                                 [name: 'osm-prometheus',            base: "20.04"],
729                                 [name: 'osm-update-db-operator',    base: "20.04"],
730                                 [name: 'osm-vca-integrator',        base: "22.04"],
731                             ]
732                             for (entry in charms) {
733                                 charm = entry.name
734                                 base = entry.base
735                                 channel = 'latest'
736                                 if (BRANCH_NAME.startsWith('v')) {
737                                     channel = BRANCH_NAME.substring(1)
738                                 } else if (BRANCH_NAME != 'master') {
739                                     channel += '/' + BRANCH_NAME.replaceAll('/', '-')
740                                 }
741
742                                 withCredentials([string(credentialsId: 'Charmstore', variable: 'CHARMCRAFT_AUTH')]) {
743                                     sh "charmcraft status $charm --format json > ${charm}.json"
744                                     isCharm = sh(returnStdout: true, script: "grep architecture ${charm}.json | wc -l 2>/dev/null").trim() as int
745                                     resourceArgument = ""
746                                     if (isCharm) {
747                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
748                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
749                                         index=0
750                                         while (index < 5) {
751                                             resourceNameScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].name'|head -1"
752                                             resourceRevsScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].revision'|head -1"
753                                             resourceName = sh(returnStdout: true, script: resourceNameScript).trim()
754                                             resourceRevs = sh(returnStdout: true, script: resourceRevsScript).trim()
755                                             if (resourceName != "null") {
756                                                 resourceArgument += " --resource ${resourceName}:${resourceRevs}"
757                                             } else {
758                                                 break
759                                             }
760                                             index ++
761                                         }
762                                     } else {
763                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
764                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
765                                     }
766                                     // edge/merged is used in place of /edge as 10.1.0 LTS uses latest/edge
767                                     edge_rev = sh(returnStdout: true, script: jqScriptEdge).trim()
768                                     beta_rev = sh(returnStdout: true, script: jqScriptBeta).trim()
769                                     try { edge_rev = edge_rev as int } catch (NumberFormatException nfe) {edge_rev = 0}
770                                     try { beta_rev = beta_rev as int } catch (NumberFormatException nfe) {beta_rev = 0}
771
772                                     print "Edge: $edge_rev, Beta: $beta_rev $resourceArgument"
773
774                                     if (edge_rev > beta_rev) {
775                                         print "Promoting $edge_rev to beta in place of $beta_rev"
776                                         beta_track = channel + 'beta'
777                                         sh "charmcraft release ${charm} --revision=${edge_rev}  ${resourceArgument} --channel=${channel}/beta"
778                                     }
779
780                                 }
781                             }
782                         } // stage('Charm promotion')
783                     } // if (params.DO_DOCKERPUSH)
784                 } // stage('Archive')
785             } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
786         } // dir(OSM_DEVOPS)
787     } finally {
788         // stage('Debug') {
789         //     sleep 900
790         // }
791         stage('Archive Container Logs') {
792             if ( ARCHIVE_LOGS_FLAG ) {
793                 try {
794                     // Archive logs
795                     remote = [
796                         name: containerName,
797                         host: IP_ADDRESS,
798                         user: 'ubuntu',
799                         identityFile: SSH_KEY,
800                         allowAnyHosts: true,
801                         logLevel: 'INFO',
802                         pty: true
803                     ]
804                     println('Archiving container logs')
805                     archive_logs(remote)
806                 } catch (Exception e) {
807                     println('Error fetching logs: '+ e.getMessage())
808                 }
809             } // end if ( ARCHIVE_LOGS_FLAG )
810         }
811         stage('Cleanup') {
812             if ( params.DO_INSTALL && server_id != null) {
813                 delete_vm = true
814                 if (error && params.SAVE_CONTAINER_ON_FAIL ) {
815                     delete_vm = false
816                 }
817                 if (!error && params.SAVE_CONTAINER_ON_PASS ) {
818                     delete_vm = false
819                 }
820
821                 if ( delete_vm ) {
822                     if (server_id != null) {
823                         println("Deleting VM: $server_id")
824                         sh """#!/bin/sh -e
825                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
826                             openstack server delete ${server_id}
827                         """
828                     } else {
829                         println("Saved VM $server_id in ETSI VIM")
830                     }
831                 }
832             }
833             if ( http_server_name != null ) {
834                 sh "docker stop ${http_server_name} || true"
835                 sh "docker rm ${http_server_name} || true"
836             }
837
838             if ( devopstempdir != null ) {
839                 sh "rm -rf ${devopstempdir}"
840             }
841         }
842     }
843 }