Update jenkins stage3 to allow setting the VM flavor for OSM installation and tests
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
1 /* Copyright ETSI Contributors and Others
2  *
3  * All Rights Reserved.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License"); you may
6  *   not use this file except in compliance with the License. You may obtain
7  *   a copy of the License at
8  *
9  *        http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  *   License for the specific language governing permissions and limitations
15  *   under the License.
16  */
17
18 properties([
19     parameters([
20         string(defaultValue: env.GERRIT_BRANCH, description: '', name: 'GERRIT_BRANCH'),
21         string(defaultValue: 'system', description: '', name: 'NODE'),
22         string(defaultValue: '', description: '', name: 'BUILD_FROM_SOURCE'),
23         string(defaultValue: 'unstable', description: '', name: 'REPO_DISTRO'),
24         string(defaultValue: '', description: '', name: 'COMMIT_ID'),
25         string(defaultValue: '-stage_2', description: '', name: 'UPSTREAM_SUFFIX'),
26         string(defaultValue: 'pubkey.asc', description: '', name: 'REPO_KEY_NAME'),
27         string(defaultValue: 'release', description: '', name: 'RELEASE'),
28         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
29         string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
30         string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
31         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
32         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
33         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
34         string(defaultValue: 'ubuntu22.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
35         string(defaultValue: 'osm.sanity', description: '', name: 'OPENSTACK_OSM_FLAVOR'),
36         booleanParam(defaultValue: false, description: '', name: 'TRY_OLD_SERVICE_ASSURANCE'),
37         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
38         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
39         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
40         booleanParam(defaultValue: true, description: '',  name: 'DO_BUILD'),
41         booleanParam(defaultValue: true, description: '', name: 'DO_INSTALL'),
42         booleanParam(defaultValue: true, description: '', name: 'DO_DOCKERPUSH'),
43         booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
44         string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
45         booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
46         string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
47                name: 'ROBOT_TAG_NAME'),
48         string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
49         string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
50                description: 'Port mapping file for SDN assist in ETSI VIM',
51                name: 'ROBOT_PORT_MAPPING_VIM'),
52         string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
53         string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
54         string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
55         string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
56                name: 'ROBOT_PASS_THRESHOLD'),
57         string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
58                '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
59     ])
60 ])
61
62 ////////////////////////////////////////////////////////////////////////////////////////
63 // Helper Functions
64 ////////////////////////////////////////////////////////////////////////////////////////
65 void run_robot_systest(String tagName,
66                        String testName,
67                        String osmHostname,
68                        String prometheusHostname,
69                        Integer prometheusPort=null,
70                        String envfile=null,
71                        String portmappingfile=null,
72                        String kubeconfig=null,
73                        String clouds=null,
74                        String hostfile=null,
75                        String jujuPassword=null,
76                        String osmRSAfile=null,
77                        String passThreshold='0.0',
78                        String unstableThreshold='0.0') {
79     tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
80     String environmentFile = ''
81     if (envfile) {
82         environmentFile = envfile
83     } else {
84         sh(script: "touch ${tempdir}/env")
85         environmentFile = "${tempdir}/env"
86     }
87     PROMETHEUS_PORT_VAR = ''
88     if (prometheusPort != null) {
89         PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
90     }
91     hostfilemount = ''
92     if (hostfile) {
93         hostfilemount = "-v ${hostfile}:/etc/hosts"
94     }
95
96     JUJU_PASSWORD_VAR = ''
97     if (jujuPassword != null) {
98         JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
99     }
100
101     try {
102         sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
103            ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
104            -v ${clouds}:/etc/openstack/clouds.yaml \
105            -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
106            -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
107            -c -t ${testName}""")
108     } finally {
109         sh("cp ${tempdir}/*.xml .")
110         sh("cp ${tempdir}/*.html .")
111         outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
112         println("Present Directory is : ${outputDirectory}")
113         step([
114             $class : 'RobotPublisher',
115             outputPath : "${outputDirectory}",
116             outputFileName : '*.xml',
117             disableArchiveOutput : false,
118             reportFileName : 'report.html',
119             logFileName : 'log.html',
120             passThreshold : passThreshold,
121             unstableThreshold: unstableThreshold,
122             otherFiles : '*.png',
123         ])
124     }
125 }
126
127 void archive_logs(Map remote) {
128
129     sshCommand remote: remote, command: '''mkdir -p logs/dags'''
130     if (useCharmedInstaller) {
131         sshCommand remote: remote, command: '''
132             for pod in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
133                 logfile=`echo $pod | cut -d- -f1`
134                 echo "Extracting log for $logfile"
135                 kubectl logs -n osm $pod --timestamps=true 2>&1 > logs/$logfile.log
136             done
137         '''
138     } else {
139         sshCommand remote: remote, command: '''
140             for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
141                 echo "Extracting log for $deployment"
142                 kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
143                 > logs/$deployment.log
144             done
145         '''
146         sshCommand remote: remote, command: '''
147             for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
148                 echo "Extracting log for $statefulset"
149                 kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
150                 > logs/$statefulset.log
151             done
152         '''
153         sshCommand remote: remote, command: '''
154             schedulerPod="$(kubectl get pods -n osm | grep airflow-scheduler| awk '{print $1; exit}')"; \
155             echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
156             kubectl cp -n osm ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler
157         '''
158     }
159
160     sh 'rm -rf logs'
161     sshCommand remote: remote, command: '''ls -al logs'''
162     sshGet remote: remote, from: 'logs', into: '.', override: true
163     archiveArtifacts artifacts: 'logs/*.log, logs/dags/*.log'
164 }
165
166 String get_value(String key, String output) {
167     for (String line : output.split( '\n' )) {
168         data = line.split( '\\|' )
169         if (data.length > 1) {
170             if ( data[1].trim() == key ) {
171                 return data[2].trim()
172             }
173         }
174     }
175 }
176
177 ////////////////////////////////////////////////////////////////////////////////////////
178 // Main Script
179 ////////////////////////////////////////////////////////////////////////////////////////
180 node("${params.NODE}") {
181
182     INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
183     INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
184     APT_PROXY = 'http://172.21.1.1:3142'
185     SSH_KEY = '~/hive/cicd_rsa'
186     ARCHIVE_LOGS_FLAG = false
187     sh 'env'
188
189     tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
190
191     stage('Checkout') {
192         checkout scm
193     }
194
195     ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
196
197     def upstreamMainJob = params.UPSTREAM_SUFFIX
198
199     // upstream jobs always use merged artifacts
200     upstreamMainJob += '-merge'
201     containerNamePrefix = "osm-${tag_or_branch}"
202     containerName = "${containerNamePrefix}"
203
204     keep_artifacts = false
205     if ( JOB_NAME.contains('merge') ) {
206         containerName += '-merge'
207
208         // On a merge job, we keep artifacts on smoke success
209         keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
210     }
211     containerName += "-${BUILD_NUMBER}"
212
213     server_id = null
214     http_server_name = null
215     devopstempdir = null
216     useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
217
218     try {
219         builtModules = [:]
220 ///////////////////////////////////////////////////////////////////////////////////////
221 // Fetch stage 2 .deb artifacts
222 ///////////////////////////////////////////////////////////////////////////////////////
223         stage('Copy Artifacts') {
224             // cleanup any previous repo
225             sh "tree -fD repo || exit 0"
226             sh 'rm -rvf repo'
227             sh "tree -fD repo && lsof repo || exit 0"
228             dir('repo') {
229                 packageList = []
230                 dir("${RELEASE}") {
231                     RELEASE_DIR = sh(returnStdout:true,  script: 'pwd').trim()
232
233                     // check if an upstream artifact based on specific build number has been requested
234                     // This is the case of a merge build and the upstream merge build is not yet complete
235                     // (it is not deemed a successful build yet). The upstream job is calling this downstream
236                     // job (with the its build artifact)
237                     def upstreamComponent = ''
238                     if (params.UPSTREAM_JOB_NAME) {
239                         println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
240                         lock('Artifactory') {
241                             step ([$class: 'CopyArtifact',
242                                 projectName: "${params.UPSTREAM_JOB_NAME}",
243                                 selector: [$class: 'SpecificBuildSelector',
244                                 buildNumber: "${params.UPSTREAM_JOB_NUMBER}"]
245                                 ])
246
247                             upstreamComponent = ci_helper.get_mdg_from_project(
248                                 ci_helper.get_env_value('build.env','GERRIT_PROJECT'))
249                             def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
250                             dir("$upstreamComponent") {
251                                 // the upstream job name contains suffix with the project. Need this stripped off
252                                 project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
253                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
254                                     upstreamComponent,
255                                     GERRIT_BRANCH,
256                                     "${project_without_branch} :: ${GERRIT_BRANCH}",
257                                     buildNumber)
258
259                                 packageList.addAll(packages)
260                                 println("Fetched pre-merge ${params.UPSTREAM_JOB_NAME}: ${packages}")
261                             }
262                         } // lock artifactory
263                     }
264
265                     parallelSteps = [:]
266                     list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
267                             'common', 'LCM', 'POL', 'NG-UI', 'NG-SA', 'PLA', 'tests']
268                     if (upstreamComponent.length() > 0) {
269                         println("Skipping upstream fetch of ${upstreamComponent}")
270                         list.remove(upstreamComponent)
271                     }
272                     for (buildStep in list) {
273                         def component = buildStep
274                         parallelSteps[component] = {
275                             dir("$component") {
276                                 println("Fetching artifact for ${component}")
277                                 step([$class: 'CopyArtifact',
278                                        projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
279
280                                 // grab the archives from the stage_2 builds
281                                 // (ie. this will be the artifacts stored based on a merge)
282                                 packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
283                                     component,
284                                     GERRIT_BRANCH,
285                                     "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
286                                     ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
287                                 packageList.addAll(packages)
288                                 println("Fetched ${component}: ${packages}")
289                                 sh 'rm -rf dists'
290                             }
291                         }
292                     }
293                     lock('Artifactory') {
294                         parallel parallelSteps
295                     }
296
297 ///////////////////////////////////////////////////////////////////////////////////////
298 // Create Devops APT repository
299 ///////////////////////////////////////////////////////////////////////////////////////
300                     sh 'mkdir -p pool'
301                     for (component in [ 'devops', 'IM', 'osmclient' ]) {
302                         sh "ls -al ${component}/pool/"
303                         sh "cp -r ${component}/pool/* pool/"
304                         sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
305                         sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
306                         sh("""apt-ftparchive packages pool/${component} \
307                            > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
308                         sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
309                     }
310
311                     // create and sign the release file
312                     sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
313                     sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
314                        -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
315
316                     // copy the public key into the release folder
317                     // this pulls the key from the home dir of the current user (jenkins)
318                     sh "cp ~/${REPO_KEY_NAME} 'OSM ETSI Release Key.gpg'"
319                     sh "cp ~/${REPO_KEY_NAME} ."
320                 }
321
322                 // start an apache server to serve up the packages
323                 http_server_name = "${containerName}-apache"
324
325                 pwd = sh(returnStdout:true,  script: 'pwd').trim()
326                 repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
327                                'print(s.getsockname()[1]); s.close()\');',
328                                returnStdout: true).trim()
329                 internal_docker_http_server_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
330                 NODE_IP_ADDRESS = sh(returnStdout: true, script:
331                     "echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
332                 ci_helper.check_status_http_server(NODE_IP_ADDRESS, repo_port)
333             }
334
335             sh "tree -fD repo"
336
337             // Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
338             osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
339             devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
340             println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
341             sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
342             OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
343             // Convert URLs from stage 2 packages to arguments that can be passed to docker build
344             for (remotePath in packageList) {
345                 packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
346                 packageName = packageName[0 .. packageName.indexOf('_') - 1]
347                 builtModules[packageName] = remotePath
348             }
349         }
350
351 ///////////////////////////////////////////////////////////////////////////////////////
352 // Build docker containers
353 ///////////////////////////////////////////////////////////////////////////////////////
354         dir(OSM_DEVOPS) {
355             Map remote = [:]
356             error = null
357             if ( params.DO_BUILD ) {
358                 withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
359                                 usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
360                     sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
361                 }
362                 datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
363                 moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
364                 for (packageName in builtModules.keySet()) {
365                     envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
366                     moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
367                 }
368                 dir('docker') {
369                     stage('Build') {
370                         containerList = sh(returnStdout: true, script:
371                             "find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
372                         containerList = Arrays.asList(containerList.split('\n'))
373                         print(containerList)
374                         parallelSteps = [:]
375                         for (buildStep in containerList) {
376                             def module = buildStep
377                             def moduleName = buildStep.toLowerCase()
378                             def moduleTag = containerName
379                             parallelSteps[module] = {
380                                 dir("$module") {
381                                     sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
382                                     -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
383                                     println("Tagging ${moduleName}:${moduleTag}")
384                                     sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
385                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
386                                     sh("""docker push \
387                                     ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
388                                 }
389                             }
390                         }
391                         parallel parallelSteps
392                     }
393                 }
394             } // if (params.DO_BUILD)
395
396             if (params.DO_INSTALL) {
397 ///////////////////////////////////////////////////////////////////////////////////////
398 // Launch VM
399 ///////////////////////////////////////////////////////////////////////////////////////
400                 stage('Spawn Remote VM') {
401                     println('Launching new VM')
402                     output = sh(returnStdout: true, script: """#!/bin/sh -e
403                         for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
404                         openstack server create --flavor ${OPENSTACK_OSM_FLAVOR} \
405                                                 --image ${OPENSTACK_BASE_IMAGE} \
406                                                 --key-name CICD \
407                                                 --property build_url="${BUILD_URL}" \
408                                                 --nic net-id=osm-ext \
409                                                 ${containerName}
410                     """).trim()
411
412                     server_id = get_value('id', output)
413
414                     if (server_id == null) {
415                         println('VM launch output: ')
416                         println(output)
417                         throw new Exception('VM Launch failed')
418                     }
419                     println("Target VM is ${server_id}, waiting for IP address to be assigned")
420
421                     IP_ADDRESS = ''
422
423                     while (IP_ADDRESS == '') {
424                         output = sh(returnStdout: true, script: """#!/bin/sh -e
425                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
426                             openstack server show ${server_id}
427                         """).trim()
428                         IP_ADDRESS = get_value('addresses', output)
429                     }
430                     IP_ADDRESS = IP_ADDRESS.split('=')[1]
431                     println("Waiting for VM at ${IP_ADDRESS} to be reachable")
432
433                     alive = false
434                     timeout(time: 1, unit: 'MINUTES') {
435                         while (!alive) {
436                             output = sh(
437                                 returnStatus: true,
438                                 script: "ssh -T -i ${SSH_KEY} " +
439                                     "-o StrictHostKeyChecking=no " +
440                                     "-o UserKnownHostsFile=/dev/null " +
441                                     "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
442                             alive = (output == 0)
443                         }
444                     }
445                     println('VM is ready and accepting ssh connections')
446
447                     //////////////////////////////////////////////////////////////////////////////////////////////
448                     println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins...')
449
450                     sh( returnStatus: true,
451                         script: "ssh -T -i ${SSH_KEY} " +
452                             "-o StrictHostKeyChecking=no " +
453                             "-o UserKnownHostsFile=/dev/null " +
454                             "ubuntu@${IP_ADDRESS} " +
455                             "'echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
456                     sh( returnStatus: true,
457                         script: "ssh -T -i ${SSH_KEY} " +
458                             "-o StrictHostKeyChecking=no " +
459                             "-o UserKnownHostsFile=/dev/null " +
460                             "ubuntu@${IP_ADDRESS} " +
461                             "'echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
462                     sh( returnStatus: true,
463                         script: "ssh -T -i ${SSH_KEY} " +
464                             "-o StrictHostKeyChecking=no " +
465                             "-o UserKnownHostsFile=/dev/null " +
466                             "ubuntu@${IP_ADDRESS} " +
467                             "'sudo systemctl restart sshd'")
468                     //////////////////////////////////////////////////////////////////////////////////////////////
469
470                 } // stage("Spawn Remote VM")
471
472 ///////////////////////////////////////////////////////////////////////////////////////
473 // Checks before installation
474 ///////////////////////////////////////////////////////////////////////////////////////
475                 stage('Checks before installation') {
476                     remote = [
477                         name: containerName,
478                         host: IP_ADDRESS,
479                         user: 'ubuntu',
480                         identityFile: SSH_KEY,
481                         allowAnyHosts: true,
482                         logLevel: 'INFO',
483                         pty: true
484                     ]
485
486                     // Ensure the VM is ready
487                     sshCommand remote: remote, command: 'cloud-init status --wait'
488                     // Force time sync to avoid clock drift and invalid certificates
489                     sshCommand remote: remote, command: 'sudo apt-get -y update'
490                     sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
491                     sshCommand remote: remote, command: 'sudo service chrony stop'
492                     sshCommand remote: remote, command: 'sudo chronyd -vq'
493                     sshCommand remote: remote, command: 'sudo service chrony start'
494
495                  } // stage("Checks before installation")
496 ///////////////////////////////////////////////////////////////////////////////////////
497 // Installation
498 ///////////////////////////////////////////////////////////////////////////////////////
499                 stage('Install') {
500                     commit_id = ''
501                     repo_distro = ''
502                     repo_key_name = ''
503                     release = ''
504
505                     if (params.COMMIT_ID) {
506                         commit_id = "-b ${params.COMMIT_ID}"
507                     }
508                     if (params.REPO_DISTRO) {
509                         repo_distro = "-r ${params.REPO_DISTRO}"
510                     }
511                     if (params.REPO_KEY_NAME) {
512                         repo_key_name = "-k ${params.REPO_KEY_NAME}"
513                     }
514                     if (params.RELEASE) {
515                         release = "-R ${params.RELEASE}"
516                     }
517                     if (params.REPOSITORY_BASE) {
518                         repo_base_url = "-u ${params.REPOSITORY_BASE}"
519                     } else {
520                         repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
521                     }
522
523                     remote = [
524                         name: containerName,
525                         host: IP_ADDRESS,
526                         user: 'ubuntu',
527                         identityFile: SSH_KEY,
528                         allowAnyHosts: true,
529                         logLevel: 'INFO',
530                         pty: true
531                     ]
532
533                     sshCommand remote: remote, command: '''
534                         wget https://osm-download.etsi.org/ftp/osm-13.0-thirteen/install_osm.sh
535                         chmod +x ./install_osm.sh
536                         sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
537                     '''
538
539                     Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
540                                                 credentialsId: 'gitlab-registry',
541                                                 usernameVariable: 'USERNAME',
542                                                 passwordVariable: 'PASSWORD']
543                     if (useCharmedInstaller) {
544                         // Use local proxy for docker hub
545                         sshCommand remote: remote, command: '''
546                             sudo snap install microk8s --classic --channel=1.19/stable
547                             sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
548                             /var/snap/microk8s/current/args/containerd-template.toml
549                             sudo systemctl restart snap.microk8s.daemon-containerd.service
550                             sudo snap alias microk8s.kubectl kubectl
551                         '''
552
553                         withCredentials([gitlabCredentialsMap]) {
554                             sshCommand remote: remote, command: """
555                                 ./install_osm.sh -y \
556                                     ${repo_base_url} \
557                                     ${repo_key_name} \
558                                     ${release} -r unstable \
559                                     --charmed  \
560                                     --registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
561                                     --tag ${containerName}
562                             """
563                         }
564                         prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
565                         prometheusPort = 80
566                         osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
567                     } else {
568                         // Run -k8s installer here specifying internal docker registry and docker proxy
569                         osm_installation_options = ""
570                         if (params.TRY_OLD_SERVICE_ASSURANCE) {
571                             osm_installation_options = "--old-sa"
572                         }
573                         withCredentials([gitlabCredentialsMap]) {
574                             sshCommand remote: remote, command: """
575                                 ./install_osm.sh -y \
576                                     ${repo_base_url} \
577                                     ${repo_key_name} \
578                                     ${release} -r unstable \
579                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
580                                     -p ${INTERNAL_DOCKER_PROXY} \
581                                     -t ${containerName} \
582                                     ${osm_installation_options}
583                             """
584                         }
585                         prometheusHostname = IP_ADDRESS
586                         prometheusPort = 9091
587                         osmHostname = IP_ADDRESS
588                     }
589                 } // stage("Install")
590 ///////////////////////////////////////////////////////////////////////////////////////
591 // Health check of installed OSM in remote vm
592 ///////////////////////////////////////////////////////////////////////////////////////
593                 stage('OSM Health') {
594                     // if this point is reached, logs should be archived
595                     ARCHIVE_LOGS_FLAG = true
596                     stackName = 'osm'
597                     sshCommand remote: remote, command: """
598                         /usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
599                     """
600                 } // stage("OSM Health")
601             } // if ( params.DO_INSTALL )
602
603
604 ///////////////////////////////////////////////////////////////////////////////////////
605 // Execute Robot tests
606 ///////////////////////////////////////////////////////////////////////////////////////
607             stage_archive = false
608             if ( params.DO_ROBOT ) {
609                 try {
610                     stage('System Integration Test') {
611                         if (useCharmedInstaller) {
612                             tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
613                             sh(script: "touch ${tempdir}/hosts")
614                             hostfile = "${tempdir}/hosts"
615                             sh """cat << EOF > ${hostfile}
616 127.0.0.1           localhost
617 ${remote.host}      prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
618 EOF"""
619                         } else {
620                             hostfile = null
621                         }
622
623                         jujuPassword = sshCommand remote: remote, command: '''
624                             echo `juju gui 2>&1 | grep password | cut -d: -f2`
625                         '''
626
627                         run_robot_systest(
628                             containerName,
629                             params.ROBOT_TAG_NAME,
630                             osmHostname,
631                             prometheusHostname,
632                             prometheusPort,
633                             params.ROBOT_VIM,
634                             params.ROBOT_PORT_MAPPING_VIM,
635                             params.KUBECONFIG,
636                             params.CLOUDS,
637                             hostfile,
638                             jujuPassword,
639                             SSH_KEY,
640                             params.ROBOT_PASS_THRESHOLD,
641                             params.ROBOT_UNSTABLE_THRESHOLD
642                         )
643                     } // stage("System Integration Test")
644                 } finally {
645                     stage('After System Integration test') {
646                         if (currentBuild.result != 'FAILURE') {
647                             stage_archive = keep_artifacts
648                         } else {
649                             println('Systest test failed, throwing error')
650                             error = new Exception('Systest test failed')
651                             currentBuild.result = 'FAILURE'
652                             throw error
653                         }
654                     }
655                 }
656             } // if ( params.DO_ROBOT )
657
658             if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
659                 stage('Archive') {
660                     // Archive the tested repo
661                     dir("${RELEASE_DIR}") {
662                         ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
663                     }
664                     if (params.DO_DOCKERPUSH) {
665                         stage('Publish to Dockerhub') {
666                             parallelSteps = [:]
667                             for (buildStep in containerList) {
668                                 def module = buildStep
669                                 def moduleName = buildStep.toLowerCase()
670                                 def dockerTag = params.DOCKER_TAG
671                                 def moduleTag = containerName
672
673                                 parallelSteps[module] = {
674                                     dir("$module") {
675                                         sh("docker pull ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}")
676                                         sh("""docker tag ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag} \
677                                            opensourcemano/${moduleName}:${dockerTag}""")
678                                         sh "docker push opensourcemano/${moduleName}:${dockerTag}"
679                                     }
680                                 }
681                             }
682                             parallel parallelSteps
683                         }
684                         stage('Snap promotion') {
685                             withCredentials([string(credentialsId: 'Snapstore', variable: 'SNAPCRAFT_STORE_CREDENTIALS')]) {
686                                 snaps = ['osmclient']
687                                 for (snap in snaps) {
688                                     channel = 'latest/'
689                                     if (BRANCH_NAME.startsWith('v')) {
690                                         channel = BRANCH_NAME.substring(1) + '/'
691                                     } else if (BRANCH_NAME != 'master') {
692                                         channel += '/' + BRANCH_NAME.replaceAll('/', '-')
693                                     }
694                                     track = channel + 'edge\\*'
695                                     edge_rev = sh(returnStdout: true,
696                                         script: "snapcraft revisions $snap | " +
697                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
698                                     track = channel + 'beta\\*'
699                                     beta_rev = sh(returnStdout: true,
700                                         script: "snapcraft revisions $snap | " +
701                                         "grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
702
703                                     print "Edge: $edge_rev, Beta: $beta_rev"
704
705                                     if (edge_rev != beta_rev) {
706                                         print "Promoting $edge_rev to beta in place of $beta_rev"
707                                         beta_track = channel + 'beta'
708                                         sh "snapcraft release $snap $edge_rev $beta_track"
709                                     }
710                                 }
711                             }
712                         } // stage('Snap promotion')
713                         stage('Charm promotion') {
714                             charms = [
715                                 'osm', // bundle
716                                 'osm-ha', // bundle
717                                 'osm-grafana',
718                                 'osm-mariadb',
719                                 'mongodb-exporter-k8s',
720                                 'mysqld-exporter-k8s',
721                                 'osm-lcm',
722                                 'osm-mon',
723                                 'osm-nbi',
724                                 'osm-ng-ui',
725                                 'osm-pol',
726                                 'osm-ro',
727                                 'osm-prometheus',
728                                 'osm-update-db-operator',
729                                 'osm-vca-integrator',
730                             ]
731                             for (charm in charms) {
732
733                                 channel = 'latest'
734                                 if (BRANCH_NAME.startsWith('v')) {
735                                     channel = BRANCH_NAME.substring(1)
736                                 } else if (BRANCH_NAME != 'master') {
737                                     channel += '/' + BRANCH_NAME.replaceAll('/', '-')
738                                 }
739
740                                 withCredentials([string(credentialsId: 'Charmstore', variable: 'CHARMCRAFT_AUTH')]) {
741                                     sh "charmcraft status $charm --format json > ${charm}.json"
742                                     isCharm = sh(returnStdout: true, script: "grep architecture ${charm}.json | wc -l").trim() as int
743                                     resourceArgument = ""
744                                     if (isCharm) {
745                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
746                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
747                                         index=0
748                                         while (index < 5) {
749                                             resourceNameScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].name'|head -1"
750                                             resourceRevsScript = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"20.04\") | .releases[] | select(.channel==\"$channel/edge/merged\")| .resources[$index].revision'|head -1"
751                                             resourceName = sh(returnStdout: true, script: resourceNameScript).trim()
752                                             resourceRevs = sh(returnStdout: true, script: resourceRevsScript).trim()
753                                             if (resourceName != "null") {
754                                                 resourceArgument += " --resource ${resourceName}:${resourceRevs}"
755                                             } else {
756                                                 break
757                                             }
758                                             index ++
759                                         }
760                                     } else {
761                                         jqScriptEdge = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/edge/merged\")| .version'|head -1"
762                                         jqScriptBeta = "cat ${charm}.json | jq -r '.[] | select(.track==\"$channel\") | .mappings[].releases[] | select(.channel==\"$channel/beta\")| .version'|head -1"
763                                     }
764                                     // edge/merged is used in place of /edge as 10.1.0 LTS uses latest/edge
765                                     edge_rev = sh(returnStdout: true, script: jqScriptEdge).trim()
766                                     beta_rev = sh(returnStdout: true, script: jqScriptBeta).trim()
767                                     try { edge_rev = edge_rev as int } catch (NumberFormatException nfe) {edge_rev = 0}
768                                     try { beta_rev = beta_rev as int } catch (NumberFormatException nfe) {beta_rev = 0}
769
770                                     print "Edge: $edge_rev, Beta: $beta_rev $resourceArgument"
771
772                                     if (edge_rev > beta_rev) {
773                                         print "Promoting $edge_rev to beta in place of $beta_rev"
774                                         beta_track = channel + 'beta'
775                                         sh "charmcraft release ${charm} --revision=${edge_rev}  ${resourceArgument} --channel=${channel}/beta"
776                                     }
777
778                                 }
779                             }
780                         } // stage('Charm promotion')
781                     } // if (params.DO_DOCKERPUSH)
782                 } // stage('Archive')
783             } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
784         } // dir(OSM_DEVOPS)
785     } finally {
786         // stage('Debug') {
787         //     sleep 900
788         // }
789         stage('Archive Container Logs') {
790             if ( ARCHIVE_LOGS_FLAG ) {
791                 try {
792                     // Archive logs
793                     remote = [
794                         name: containerName,
795                         host: IP_ADDRESS,
796                         user: 'ubuntu',
797                         identityFile: SSH_KEY,
798                         allowAnyHosts: true,
799                         logLevel: 'INFO',
800                         pty: true
801                     ]
802                     println('Archiving container logs')
803                     archive_logs(remote)
804                 } catch (Exception e) {
805                     println('Error fetching logs: '+ e.getMessage())
806                 }
807             } // end if ( ARCHIVE_LOGS_FLAG )
808         }
809         stage('Cleanup') {
810             if ( params.DO_INSTALL && server_id != null) {
811                 delete_vm = true
812                 if (error && params.SAVE_CONTAINER_ON_FAIL ) {
813                     delete_vm = false
814                 }
815                 if (!error && params.SAVE_CONTAINER_ON_PASS ) {
816                     delete_vm = false
817                 }
818
819                 if ( delete_vm ) {
820                     if (server_id != null) {
821                         println("Deleting VM: $server_id")
822                         sh """#!/bin/sh -e
823                             for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
824                             openstack server delete ${server_id}
825                         """
826                     } else {
827                         println("Saved VM $server_id in ETSI VIM")
828                     }
829                 }
830             }
831             if ( http_server_name != null ) {
832                 sh "docker stop ${http_server_name} || true"
833                 sh "docker rm ${http_server_name} || true"
834             }
835
836             if ( devopstempdir != null ) {
837                 sh "rm -rf ${devopstempdir}"
838             }
839         }
840     }
841 }