Add option in Jenkins stage3 to test old SA
[osm/devops.git] / jenkins / ci-pipelines / ci_stage_3.groovy
index fb5120d..bc08891 100644 (file)
@@ -31,7 +31,8 @@ properties([
         string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
         string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
         string(defaultValue: 'testing-daily', description: '', name: 'DOCKER_TAG'),
-        string(defaultValue: 'ubuntu20.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
+        string(defaultValue: 'ubuntu22.04', description: '', name: 'OPENSTACK_BASE_IMAGE'),
+        booleanParam(defaultValue: false, description: '', name: 'TRY_OLD_SERVICE_ASSURANCE'),
         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_FAIL'),
         booleanParam(defaultValue: false, description: '', name: 'SAVE_CONTAINER_ON_PASS'),
         booleanParam(defaultValue: true, description: '', name: 'SAVE_ARTIFACTS_ON_SMOKE_SUCCESS'),
@@ -124,13 +125,13 @@ void run_robot_systest(String tagName,
 
 void archive_logs(Map remote) {
 
-    sshCommand remote: remote, command: '''mkdir -p logs'''
+    sshCommand remote: remote, command: '''mkdir -p logs/dags'''
     if (useCharmedInstaller) {
         sshCommand remote: remote, command: '''
-            for container in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
-                logfile=`echo $container | cut -d- -f1`
+            for pod in `kubectl get pods -n osm | grep -v operator | grep -v NAME| awk '{print $1}'`; do
+                logfile=`echo $pod | cut -d- -f1`
                 echo "Extracting log for $logfile"
-                kubectl logs -n osm $container --timestamps=true 2>&1 > logs/$logfile.log
+                kubectl logs -n osm $pod --timestamps=true 2>&1 > logs/$logfile.log
             done
         '''
     } else {
@@ -148,12 +149,17 @@ void archive_logs(Map remote) {
                 > logs/$statefulset.log
             done
         '''
+        sshCommand remote: remote, command: '''
+            schedulerPod="$(kubectl get pods -n osm | grep airflow-scheduler| awk '{print $1; exit}')"; \
+            echo "Extracting logs from Airflow DAGs from pod ${schedulerPod}"; \
+            kubectl cp -n osm ${schedulerPod}:/opt/airflow/logs/scheduler/latest/dags logs/dags -c scheduler
+        '''
     }
 
     sh 'rm -rf logs'
     sshCommand remote: remote, command: '''ls -al logs'''
     sshGet remote: remote, from: 'logs', into: '.', override: true
-    sh 'cp logs/* .'
+    sh 'cp logs/*.log logs/dags/*.log .'
     archiveArtifacts artifacts: '*.log'
 }
 
@@ -437,6 +443,30 @@ node("${params.NODE}") {
                         }
                     }
                     println('VM is ready and accepting ssh connections')
+
+                    //////////////////////////////////////////////////////////////////////////////////////////////
+                    println('Applying sshd config workaround for Ubuntu 22.04 and old jsch client in Jenkins...')
+
+                    sh( returnStatus: true,
+                        script: "ssh -T -i ${SSH_KEY} " +
+                            "-o StrictHostKeyChecking=no " +
+                            "-o UserKnownHostsFile=/dev/null " +
+                            "ubuntu@${IP_ADDRESS} " +
+                            "'echo HostKeyAlgorithms +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
+                    sh( returnStatus: true,
+                        script: "ssh -T -i ${SSH_KEY} " +
+                            "-o StrictHostKeyChecking=no " +
+                            "-o UserKnownHostsFile=/dev/null " +
+                            "ubuntu@${IP_ADDRESS} " +
+                            "'echo PubkeyAcceptedKeyTypes +ssh-rsa | sudo tee -a /etc/ssh/sshd_config'")
+                    sh( returnStatus: true,
+                        script: "ssh -T -i ${SSH_KEY} " +
+                            "-o StrictHostKeyChecking=no " +
+                            "-o UserKnownHostsFile=/dev/null " +
+                            "ubuntu@${IP_ADDRESS} " +
+                            "'sudo systemctl restart sshd'")
+                    //////////////////////////////////////////////////////////////////////////////////////////////
+
                 } // stage("Spawn Remote VM")
 
 ///////////////////////////////////////////////////////////////////////////////////////
@@ -456,8 +486,8 @@ node("${params.NODE}") {
                     // Ensure the VM is ready
                     sshCommand remote: remote, command: 'cloud-init status --wait'
                     // Force time sync to avoid clock drift and invalid certificates
-                    sshCommand remote: remote, command: 'sudo apt-get update'
-                    sshCommand remote: remote, command: 'sudo apt-get install -y chrony'
+                    sshCommand remote: remote, command: 'sudo apt-get -y update'
+                    sshCommand remote: remote, command: 'sudo apt-get -y install chrony'
                     sshCommand remote: remote, command: 'sudo service chrony stop'
                     sshCommand remote: remote, command: 'sudo chronyd -vq'
                     sshCommand remote: remote, command: 'sudo service chrony start'
@@ -536,6 +566,10 @@ node("${params.NODE}") {
                         osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
                     } else {
                         // Run -k8s installer here specifying internal docker registry and docker proxy
+                        osm_installation_options = ""
+                        if (params.TRY_OLD_SERVICE_ASSURANCE) {
+                            osm_installation_options = "--old-sa"
+                        }
                         withCredentials([gitlabCredentialsMap]) {
                             sshCommand remote: remote, command: """
                                 ./install_osm.sh -y \
@@ -544,7 +578,8 @@ node("${params.NODE}") {
                                     ${release} -r unstable \
                                     -d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
                                     -p ${INTERNAL_DOCKER_PROXY} \
-                                    -t ${containerName}
+                                    -t ${containerName} \
+                                    ${osm_installation_options}
                             """
                         }
                         prometheusHostname = IP_ADDRESS
@@ -690,6 +725,7 @@ EOF"""
                                 'osm-pol',
                                 'osm-ro',
                                 'osm-prometheus',
+                                'osm-update-db-operator',
                                 'osm-vca-integrator',
                             ]
                             for (charm in charms) {
@@ -747,7 +783,10 @@ EOF"""
             } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
         } // dir(OSM_DEVOPS)
     } finally {
-       stage('Archive Container Logs') {
+        // stage('Debug') {
+        //     sleep 900
+        // }
+        stage('Archive Container Logs') {
             if ( ARCHIVE_LOGS_FLAG ) {
                 try {
                     // Archive logs