string(defaultValue: 'release', description: '', name: 'RELEASE'),
string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NAME'),
string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
- string(defaultValue: '', description: '', name: 'UPSTREAM_JOB_NUMBER'),
string(defaultValue: 'OSMETSI', description: '', name: 'GPG_KEY_NAME'),
string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
string(defaultValue: 'osm-stage_4', description: '', name: 'DOWNSTREAM_STAGE_NAME'),
booleanParam(defaultValue: false, description: '', name: 'SAVE_ARTIFACTS_OVERRIDE'),
string(defaultValue: '/home/jenkins/hive/openstack-etsi.rc', description: '', name: 'HIVE_VIM_1'),
booleanParam(defaultValue: true, description: '', name: 'DO_ROBOT'),
- string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options', name: 'ROBOT_TAG_NAME'),
+ string(defaultValue: 'sanity', description: 'sanity/regression/daily are the common options',
+ name: 'ROBOT_TAG_NAME'),
string(defaultValue: '/home/jenkins/hive/robot-systest.cfg', description: '', name: 'ROBOT_VIM'),
- string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml', description: 'Port mapping file for SDN assist in ETSI VIM', name: 'ROBOT_PORT_MAPPING_VIM'),
+ string(defaultValue: '/home/jenkins/hive/port-mapping-etsi-vim.yaml',
+ description: 'Port mapping file for SDN assist in ETSI VIM',
+ name: 'ROBOT_PORT_MAPPING_VIM'),
string(defaultValue: '/home/jenkins/hive/kubeconfig.yaml', description: '', name: 'KUBECONFIG'),
string(defaultValue: '/home/jenkins/hive/clouds.yaml', description: '', name: 'CLOUDS'),
string(defaultValue: 'Default', description: '', name: 'INSTALLER'),
- string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed', name: 'ROBOT_PASS_THRESHOLD'),
- string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable (if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
+ string(defaultValue: '100.0', description: '% passed Robot tests to mark the build as passed',
+ name: 'ROBOT_PASS_THRESHOLD'),
+ string(defaultValue: '80.0', description: '% passed Robot tests to mark the build as unstable ' +
+ '(if lower, it will be failed)', name: 'ROBOT_UNSTABLE_THRESHOLD'),
])
])
-
////////////////////////////////////////////////////////////////////////////////////////
// Helper Functions
////////////////////////////////////////////////////////////////////////////////////////
String hostfile=null,
String jujuPassword=null,
String osmRSAfile=null,
- String pass_th='0.0',
- String unstable_th='0.0') {
+ String passThreshold='0.0',
+ String unstableThreshold='0.0') {
tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
- if ( !envfile ) {
+ String environmentFile = ''
+ if (envfile) {
+ environmentFile = envfile
+ } else {
sh(script: "touch ${tempdir}/env")
- envfile="${tempdir}/env"
+ environmentFile = "${tempdir}/env"
}
- PROMETHEUS_PORT_VAR = ""
- if ( prometheusPort != null) {
- PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT="+prometheusPort
+ PROMETHEUS_PORT_VAR = ''
+ if (prometheusPort != null) {
+ PROMETHEUS_PORT_VAR = "--env PROMETHEUS_PORT=${prometheusPort}"
}
- hostfilemount=""
- if ( hostfile ) {
- hostfilemount="-v "+hostfile+":/etc/hosts"
+ hostfilemount = ''
+ if (hostfile) {
+ hostfilemount = "-v ${hostfile}:/etc/hosts"
}
- JUJU_PASSWORD_VAR = ""
- if ( jujuPassword != null) {
- JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD="+jujuPassword
+ JUJU_PASSWORD_VAR = ''
+ if (jujuPassword != null) {
+ JUJU_PASSWORD_VAR = "--env JUJU_PASSWORD=${jujuPassword}"
}
try {
- sh "docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${envfile} -v ${clouds}:/etc/openstack/clouds.yaml -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} -c -t ${testName}"
+ sh("""docker run --env OSM_HOSTNAME=${osmHostname} --env PROMETHEUS_HOSTNAME=${prometheusHostname} \
+ ${PROMETHEUS_PORT_VAR} ${JUJU_PASSWORD_VAR} --env-file ${environmentFile} \
+ -v ${clouds}:/etc/openstack/clouds.yaml \
+ -v ${osmRSAfile}:/root/osm_id_rsa -v ${kubeconfig}:/root/.kube/config -v ${tempdir}:/robot-systest/reports \
+ -v ${portmappingfile}:/root/port-mapping.yaml ${hostfilemount} opensourcemano/tests:${tagName} \
+ -c -t ${testName}""")
} finally {
- sh "cp ${tempdir}/* ."
- outputDirectory = sh(returnStdout: true, script: "pwd").trim()
- println ("Present Directory is : ${outputDirectory}")
+ sh("cp ${tempdir}/* .")
+ outputDirectory = sh(returnStdout: true, script: 'pwd').trim()
+ println("Present Directory is : ${outputDirectory}")
step([
$class : 'RobotPublisher',
outputPath : "${outputDirectory}",
- outputFileName : "*.xml",
+ outputFileName : '*.xml',
disableArchiveOutput : false,
- reportFileName : "report.html",
- logFileName : "log.html",
- passThreshold : pass_th,
- unstableThreshold: unstable_th,
- otherFiles : "*.png",
+ reportFileName : 'report.html',
+ logFileName : 'log.html',
+ passThreshold : passThreshold,
+ unstableThreshold: unstableThreshold,
+ otherFiles : '*.png',
])
}
}
-def archive_logs(remote) {
+void archive_logs(Map remote) {
sshCommand remote: remote, command: '''mkdir -p logs'''
if (useCharmedInstaller) {
sshCommand remote: remote, command: '''
for deployment in `kubectl -n osm get deployments | grep -v operator | grep -v NAME| awk '{print $1}'`; do
echo "Extracting log for $deployment"
- kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 > logs/$deployment.log
+ kubectl -n osm logs deployments/$deployment --timestamps=true --all-containers 2>&1 \
+ > logs/$deployment.log
done
'''
sshCommand remote: remote, command: '''
for statefulset in `kubectl -n osm get statefulsets | grep -v operator | grep -v NAME| awk '{print $1}'`; do
echo "Extracting log for $statefulset"
- kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 > logs/$statefulset.log
+ kubectl -n osm logs statefulsets/$statefulset --timestamps=true --all-containers 2>&1 \
+ > logs/$statefulset.log
done
'''
}
- sh "rm -rf logs"
+ sh 'rm -rf logs'
sshCommand remote: remote, command: '''ls -al logs'''
sshGet remote: remote, from: 'logs', into: '.', override: true
- sh "cp logs/* ."
+ sh 'cp logs/* .'
archiveArtifacts artifacts: '*.log'
}
-def get_value(key, output) {
+String get_value(String key, String output) {
for (String line : output.split( '\n' )) {
data = line.split( '\\|' )
if (data.length > 1) {
INTERNAL_DOCKER_REGISTRY = 'osm.etsi.org:5050/devops/cicd/'
INTERNAL_DOCKER_PROXY = 'http://172.21.1.1:5000'
+ APT_PROXY = 'http://172.21.1.1:3142'
SSH_KEY = '~/hive/cicd_rsa'
sh 'env'
- tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./,"")
+ tag_or_branch = params.GERRIT_BRANCH.replaceAll(/\./, '')
- stage("Checkout") {
+ stage('Checkout') {
checkout scm
}
- ci_helper = load "jenkins/ci-pipelines/ci_helper.groovy"
+ ci_helper = load 'jenkins/ci-pipelines/ci_helper.groovy'
- def upstream_main_job = params.UPSTREAM_SUFFIX
+ def upstreamMainJob = params.UPSTREAM_SUFFIX
// upstream jobs always use merged artifacts
- upstream_main_job += '-merge'
- container_name_prefix = "osm-${tag_or_branch}"
- container_name = "${container_name_prefix}"
+ upstreamMainJob += '-merge'
+ containerNamePrefix = "osm-${tag_or_branch}"
+ containerName = "${containerNamePrefix}"
keep_artifacts = false
if ( JOB_NAME.contains('merge') ) {
- container_name += "-merge"
+ containerName += '-merge'
// On a merge job, we keep artifacts on smoke success
keep_artifacts = params.SAVE_ARTIFACTS_ON_SMOKE_SUCCESS
}
- container_name += "-${BUILD_NUMBER}"
+ containerName += "-${BUILD_NUMBER}"
server_id = null
http_server_name = null
devopstempdir = null
- useCharmedInstaller = params.INSTALLER.equalsIgnoreCase("charmed")
+ useCharmedInstaller = params.INSTALLER.equalsIgnoreCase('charmed')
try {
builtModules = [:]
///////////////////////////////////////////////////////////////////////////////////////
// Fetch stage 2 .deb artifacts
///////////////////////////////////////////////////////////////////////////////////////
- stage("Copy Artifacts") {
+ stage('Copy Artifacts') {
// cleanup any previous repo
sh 'rm -rf repo'
- dir("repo") {
+ dir('repo') {
packageList = []
dir("${RELEASE}") {
RELEASE_DIR = sh(returnStdout:true, script: 'pwd').trim()
// check if an upstream artifact based on specific build number has been requested
- // This is the case of a merge build and the upstream merge build is not yet complete (it is not deemed
- // a successful build yet). The upstream job is calling this downstream job (with the its build artifiact)
- def upstreamComponent=""
- if ( params.UPSTREAM_JOB_NAME ) {
+ // This is the case of a merge build and the upstream merge build is not yet complete
+ // (it is not deemed a successful build yet). The upstream job is calling this downstream
+ // job (with the its build artifact)
+ def upstreamComponent = ''
+ if (params.UPSTREAM_JOB_NAME) {
println("Fetching upstream job artifact from ${params.UPSTREAM_JOB_NAME}")
lock('Artifactory') {
step ([$class: 'CopyArtifact',
def buildNumber = ci_helper.get_env_value('build.env','BUILD_NUMBER')
dir("$upstreamComponent") {
// the upstream job name contains suffix with the project. Need this stripped off
- def project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
- def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+ project_without_branch = params.UPSTREAM_JOB_NAME.split('/')[0]
+ packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
upstreamComponent,
GERRIT_BRANCH,
"${project_without_branch} :: ${GERRIT_BRANCH}",
}
parallelSteps = [:]
- def list = ["RO", "osmclient", "IM", "devops", "MON", "N2VC", "NBI", "common", "LCM", "POL", "NG-UI", "PLA", "tests"]
- if (upstreamComponent.length()>0) {
- println("Skipping upstream fetch of "+upstreamComponent)
+ list = ['RO', 'osmclient', 'IM', 'devops', 'MON', 'N2VC', 'NBI',
+ 'common', 'LCM', 'POL', 'NG-UI', 'PLA', 'tests']
+ if (upstreamComponent.length() > 0) {
+ println("Skipping upstream fetch of ${upstreamComponent}")
list.remove(upstreamComponent)
}
for (buildStep in list) {
parallelSteps[component] = {
dir("$component") {
println("Fetching artifact for ${component}")
- step ([$class: 'CopyArtifact',
- projectName: "${component}${upstream_main_job}/${GERRIT_BRANCH}"])
+ step([$class: 'CopyArtifact',
+ projectName: "${component}${upstreamMainJob}/${GERRIT_BRANCH}"])
- // grab the archives from the stage_2 builds (ie. this will be the artifacts stored based on a merge)
- def packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
+ // grab the archives from the stage_2 builds
+ // (ie. this will be the artifacts stored based on a merge)
+ packages = ci_helper.get_archive(params.ARTIFACTORY_SERVER,
component,
GERRIT_BRANCH,
- "${component}${upstream_main_job} :: ${GERRIT_BRANCH}",
- ci_helper.get_env_value('build.env','BUILD_NUMBER'))
+ "${component}${upstreamMainJob} :: ${GERRIT_BRANCH}",
+ ci_helper.get_env_value('build.env', 'BUILD_NUMBER'))
packageList.addAll(packages)
println("Fetched ${component}: ${packages}")
- sh "rm -rf dists"
+ sh 'rm -rf dists'
}
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Create Devops APT repository
///////////////////////////////////////////////////////////////////////////////////////
- sh "mkdir -p pool"
- for (component in [ "devops", "IM", "osmclient" ]) {
+ sh 'mkdir -p pool'
+ for (component in [ 'devops', 'IM', 'osmclient' ]) {
sh "ls -al ${component}/pool/"
sh "cp -r ${component}/pool/* pool/"
sh "dpkg-sig --sign builder -k ${GPG_KEY_NAME} pool/${component}/*"
sh "mkdir -p dists/${params.REPO_DISTRO}/${component}/binary-amd64/"
- sh "apt-ftparchive packages pool/${component} > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
+ sh("""apt-ftparchive packages pool/${component} \
+ > dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages""")
sh "gzip -9fk dists/${params.REPO_DISTRO}/${component}/binary-amd64/Packages"
}
// create and sign the release file
sh "apt-ftparchive release dists/${params.REPO_DISTRO} > dists/${params.REPO_DISTRO}/Release"
- sh "gpg --yes -abs -u ${GPG_KEY_NAME} -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release"
+ sh("""gpg --yes -abs -u ${GPG_KEY_NAME} \
+ -o dists/${params.REPO_DISTRO}/Release.gpg dists/${params.REPO_DISTRO}/Release""")
// copy the public key into the release folder
// this pulls the key from the home dir of the current user (jenkins)
}
// start an apache server to serve up the packages
- http_server_name = "${container_name}-apache"
+ http_server_name = "${containerName}-apache"
pwd = sh(returnStdout:true, script: 'pwd').trim()
- repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()\');', returnStdout: true).trim()
- repo_base_url = ci_helper.start_http_server(pwd,http_server_name,repo_port)
- NODE_IP_ADDRESS=sh(returnStdout: true, script:
+ repo_port = sh(script: 'echo $(python -c \'import socket; s=socket.socket(); s.bind(("", 0));' +
+ 'print(s.getsockname()[1]); s.close()\');',
+ returnStdout: true).trim()
+ repo_base_url = ci_helper.start_http_server(pwd, http_server_name, repo_port)
+ NODE_IP_ADDRESS = sh(returnStdout: true, script:
"echo ${SSH_CONNECTION} | awk '{print \$3}'").trim()
}
// Unpack devops package into temporary location so that we use it from upstream if it was part of a patch
- osm_devops_dpkg = sh(returnStdout: true, script: "find ./repo/release/pool/ -name osm-devops*.deb").trim()
- devopstempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+ osm_devops_dpkg = sh(returnStdout: true, script: 'find ./repo/release/pool/ -name osm-devops*.deb').trim()
+ devopstempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
println("Extracting local devops package ${osm_devops_dpkg} into ${devopstempdir} for docker build step")
sh "dpkg -x ${osm_devops_dpkg} ${devopstempdir}"
- OSM_DEVOPS="${devopstempdir}/usr/share/osm-devops"
+ OSM_DEVOPS = "${devopstempdir}/usr/share/osm-devops"
// Convert URLs from stage 2 packages to arguments that can be passed to docker build
for (remotePath in packageList) {
- packageName=remotePath.substring(remotePath.lastIndexOf('/')+1)
- packageName=packageName.substring(0,packageName.indexOf('_'))
- builtModules[packageName]=remotePath
+ packageName = remotePath[remotePath.lastIndexOf('/') + 1 .. -1]
+ packageName = packageName[0 .. packageName.indexOf('_') - 1]
+ builtModules[packageName] = remotePath
}
}
// Build docker containers
///////////////////////////////////////////////////////////////////////////////////////
dir(OSM_DEVOPS) {
- def remote = [:]
+ Map remote = [:]
error = null
if ( params.DO_BUILD ) {
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
sh "docker login ${INTERNAL_DOCKER_REGISTRY} -u ${USERNAME} -p ${PASSWORD}"
}
- datetime = sh(returnStdout: true, script: "date +%Y-%m-%d:%H:%M:%S").trim()
+ datetime = sh(returnStdout: true, script: 'date +%Y-%m-%d:%H:%M:%S').trim()
moduleBuildArgs = " --build-arg CACHE_DATE=${datetime}"
for (packageName in builtModules.keySet()) {
- envName=packageName.replaceAll("-","_").toUpperCase()+"_URL"
+ envName = packageName.replaceAll('-', '_').toUpperCase() + '_URL'
moduleBuildArgs += " --build-arg ${envName}=" + builtModules[packageName]
}
- dir ("docker") {
- stage("Build") {
+ dir('docker') {
+ stage('Build') {
containerList = sh(returnStdout: true, script:
"find . -name Dockerfile -printf '%h\\n' | sed 's|\\./||'")
- containerList=Arrays.asList(containerList.split("\n"))
+ containerList = Arrays.asList(containerList.split('\n'))
print(containerList)
parallelSteps = [:]
for (buildStep in containerList) {
def module = buildStep
def moduleName = buildStep.toLowerCase()
- def moduleTag = container_name
+ def moduleTag = containerName
parallelSteps[module] = {
dir("$module") {
- sh "docker build -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} ."
+ sh("""docker build --build-arg APT_PROXY=${APT_PROXY} \
+ -t opensourcemano/${moduleName}:${moduleTag} ${moduleBuildArgs} .""")
println("Tagging ${moduleName}:${moduleTag}")
- sh "docker tag opensourcemano/${moduleName}:${moduleTag} ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
- sh "docker push ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}"
+ sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
+ ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
+ sh("""docker push \
+ ${INTERNAL_DOCKER_REGISTRY}opensourcemano/${moduleName}:${moduleTag}""")
}
}
}
parallel parallelSteps
}
}
- } // if ( params.DO_BUILD )
+ } // if (params.DO_BUILD)
- if ( params.DO_INSTALL ) {
+ if (params.DO_INSTALL) {
///////////////////////////////////////////////////////////////////////////////////////
// Launch VM
///////////////////////////////////////////////////////////////////////////////////////
- stage("Spawn Remote VM") {
- println("Launching new VM")
- output=sh(returnStdout: true, script: """#!/bin/sh -e
+ stage('Spawn Remote VM') {
+ println('Launching new VM')
+ output = sh(returnStdout: true, script: """#!/bin/sh -e
for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
openstack server create --flavor osm.sanity \
--image ${OPENSTACK_BASE_IMAGE} \
--key-name CICD \
--property build_url="${BUILD_URL}" \
--nic net-id=osm-ext \
- ${container_name}
+ ${containerName}
""").trim()
server_id = get_value('id', output)
if (server_id == null) {
- println("VM launch output: ")
+ println('VM launch output: ')
println(output)
- throw new Exception("VM Launch failed")
+ throw new Exception('VM Launch failed')
}
println("Target VM is ${server_id}, waiting for IP address to be assigned")
- IP_ADDRESS = ""
+ IP_ADDRESS = ''
- while (IP_ADDRESS == "") {
- output=sh(returnStdout: true, script: """#!/bin/sh -e
+ while (IP_ADDRESS == '') {
+ output = sh(returnStdout: true, script: """#!/bin/sh -e
for line in `grep OS ~/hive/robot-systest.cfg | grep -v OS_CLOUD` ; do export \$line ; done
openstack server show ${server_id}
""").trim()
println("Waiting for VM at ${IP_ADDRESS} to be reachable")
alive = false
- while (! alive) {
- output=sh(returnStdout: true, script: "sleep 1 ; nc -zv ${IP_ADDRESS} 22 2>&1 || true").trim()
- println("output is [$output]")
- alive = output.contains("succeeded")
+ timeout(time: 1, unit: 'MINUTES') {
+ while (!alive) {
+ output = sh(
+ returnStatus: true,
+ script: "ssh -T -i ${SSH_KEY} " +
+ "-o StrictHostKeyChecking=no " +
+ "-o UserKnownHostsFile=/dev/null " +
+ "-o ConnectTimeout=5 ubuntu@${IP_ADDRESS} 'echo Alive'")
+ alive = (output == 0)
+ }
}
- println("VM is ready and accepting ssh connections")
+ println('VM is ready and accepting ssh connections')
} // stage("Spawn Remote VM")
///////////////////////////////////////////////////////////////////////////////////////
+// Checks before installation
+///////////////////////////////////////////////////////////////////////////////////////
+ stage('Checks before installation') {
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+
+ // Force time sync to avoid clock drift and invalid certificates
+ sshCommand remote: remote, command: 'sudo apt-get update'
+ sshCommand remote: remote, command: 'sudo apt-get install -y ntp'
+ sshCommand remote: remote, command: 'sudo service ntp stop'
+ sshCommand remote: remote, command: 'sudo ntpd -gq'
+ sshCommand remote: remote, command: 'sudo service ntp start'
+
+ } // stage("Checks before installation")
+///////////////////////////////////////////////////////////////////////////////////////
// Installation
///////////////////////////////////////////////////////////////////////////////////////
- stage("Install") {
+ stage('Install') {
commit_id = ''
repo_distro = ''
repo_key_name = ''
release = ''
- if ( params.COMMIT_ID )
- {
+ if (params.COMMIT_ID) {
commit_id = "-b ${params.COMMIT_ID}"
}
-
- if ( params.REPO_DISTRO )
- {
+ if (params.REPO_DISTRO) {
repo_distro = "-r ${params.REPO_DISTRO}"
}
-
- if ( params.REPO_KEY_NAME )
- {
+ if (params.REPO_KEY_NAME) {
repo_key_name = "-k ${params.REPO_KEY_NAME}"
}
-
- if ( params.RELEASE )
- {
+ if (params.RELEASE) {
release = "-R ${params.RELEASE}"
}
-
- if ( params.REPOSITORY_BASE )
- {
+ if (params.REPOSITORY_BASE) {
repo_base_url = "-u ${params.REPOSITORY_BASE}"
- }
- else
- {
+ } else {
repo_base_url = "-u http://${NODE_IP_ADDRESS}:${repo_port}"
}
- remote.name = container_name
- remote.host = IP_ADDRESS
- remote.user = 'ubuntu'
- remote.identityFile = SSH_KEY
- remote.allowAnyHosts = true
- remote.logLevel = 'INFO'
- remote.pty = true
-
- // Force time sync to avoid clock drift and invalid certificates
- sshCommand remote: remote, command: """
- sudo apt update
- sudo apt install -y ntp
- sudo service ntp stop
- sudo ntpd -gq
- sudo service ntp start
- """
-
- sshCommand remote: remote, command: """
+ remote = [
+ name: containerName,
+ host: IP_ADDRESS,
+ user: 'ubuntu',
+ identityFile: SSH_KEY,
+ allowAnyHosts: true,
+ logLevel: 'INFO',
+ pty: true
+ ]
+
+ sshCommand remote: remote, command: '''
wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh
chmod +x ./install_osm.sh
- sed -i '1 i\\export PATH=/snap/bin:\${PATH}' ~/.bashrc
- """
-
- if ( useCharmedInstaller ) {
+ sed -i '1 i\\export PATH=/snap/bin:\$PATH' ~/.bashrc
+ '''
+
+ Map gitlabCredentialsMap = [$class: 'UsernamePasswordMultiBinding',
+ credentialsId: 'gitlab-registry',
+ usernameVariable: 'USERNAME',
+ passwordVariable: 'PASSWORD']
+ if (useCharmedInstaller) {
// Use local proxy for docker hub
sshCommand remote: remote, command: '''
sudo snap install microk8s --classic --channel=1.19/stable
- sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" /var/snap/microk8s/current/args/containerd-template.toml
+ sudo sed -i "s|https://registry-1.docker.io|http://172.21.1.1:5000|" \
+ /var/snap/microk8s/current/args/containerd-template.toml
sudo systemctl restart snap.microk8s.daemon-containerd.service
sudo snap alias microk8s.kubectl kubectl
'''
- withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
- usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ withCredentials([gitlabCredentialsMap]) {
sshCommand remote: remote, command: """
./install_osm.sh -y \
${repo_base_url} \
${release} -r unstable \
--charmed \
--registry ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
- --tag ${container_name}
+ --tag ${containerName}
"""
}
- prometheusHostname = "prometheus."+IP_ADDRESS+".nip.io"
+ prometheusHostname = "prometheus.${IP_ADDRESS}.nip.io"
prometheusPort = 80
- osmHostname = "nbi."+IP_ADDRESS+".nip.io:443"
+ osmHostname = "nbi.${IP_ADDRESS}.nip.io:443"
} else {
// Run -k8s installer here specifying internal docker registry and docker proxy
- withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'gitlab-registry',
- usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
+ withCredentials([gitlabCredentialsMap]) {
sshCommand remote: remote, command: """
./install_osm.sh -y \
${repo_base_url} \
${release} -r unstable \
-d ${USERNAME}:${PASSWORD}@${INTERNAL_DOCKER_REGISTRY} \
-p ${INTERNAL_DOCKER_PROXY} \
- -t ${container_name}
+ -t ${containerName}
"""
}
prometheusHostname = IP_ADDRESS
///////////////////////////////////////////////////////////////////////////////////////
// Health check of installed OSM in remote vm
///////////////////////////////////////////////////////////////////////////////////////
- stage("OSM Health") {
- stackName = "osm"
+ stage('OSM Health') {
+ stackName = 'osm'
sshCommand remote: remote, command: """
/usr/share/osm-devops/installers/osm_health.sh -k -s ${stackName}
"""
stage_archive = false
if ( params.DO_ROBOT ) {
try {
- stage("System Integration Test") {
- if ( useCharmedInstaller ) {
- tempdir = sh(returnStdout: true, script: "mktemp -d").trim()
+ stage('System Integration Test') {
+ if (useCharmedInstaller) {
+ tempdir = sh(returnStdout: true, script: 'mktemp -d').trim()
sh(script: "touch ${tempdir}/hosts")
- hostfile="${tempdir}/hosts"
+ hostfile = "${tempdir}/hosts"
sh """cat << EOF > ${hostfile}
127.0.0.1 localhost
${remote.host} prometheus.${remote.host}.nip.io nbi.${remote.host}.nip.io
EOF"""
} else {
- hostfile=null
+ hostfile = null
}
- jujuPassword=sshCommand remote: remote, command: """
+ jujuPassword = sshCommand remote: remote, command: '''
echo `juju gui 2>&1 | grep password | cut -d: -f2`
- """
+ '''
run_robot_systest(
- container_name,
+ containerName,
params.ROBOT_TAG_NAME,
osmHostname,
prometheusHostname,
)
} // stage("System Integration Test")
} finally {
- stage("Archive Container Logs") {
+ stage('Archive Container Logs') {
// Archive logs to containers_logs.txt
archive_logs(remote)
- if ( ! currentBuild.result.equals('FAILURE') ) {
+ if (currentBuild.result != 'FAILURE') {
stage_archive = keep_artifacts
} else {
- println ("Systest test failed, throwing error")
- error = new Exception("Systest test failed")
+ println('Systest test failed, throwing error')
+ error = new Exception('Systest test failed')
currentBuild.result = 'FAILURE'
throw error
}
}
} // if ( params.DO_ROBOT )
- if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive ) {
- stage("Archive") {
- sh "echo ${container_name} > build_version.txt"
- archiveArtifacts artifacts: "build_version.txt", fingerprint: true
+ if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive) {
+ stage('Archive') {
+ sh "echo ${containerName} > build_version.txt"
+ archiveArtifacts artifacts: 'build_version.txt', fingerprint: true
// Archive the tested repo
dir("${RELEASE_DIR}") {
- ci_helper.archive(params.ARTIFACTORY_SERVER,RELEASE,GERRIT_BRANCH,'tested')
+ ci_helper.archive(params.ARTIFACTORY_SERVER, RELEASE, GERRIT_BRANCH, 'tested')
}
- if ( params.DO_DOCKERPUSH ) {
- stage("Publish to Dockerhub") {
+ if (params.DO_DOCKERPUSH) {
+ stage('Publish to Dockerhub') {
parallelSteps = [:]
for (buildStep in containerList) {
def module = buildStep
def moduleName = buildStep.toLowerCase()
def dockerTag = params.DOCKER_TAG
- def moduleTag = container_name
+ def moduleTag = containerName
parallelSteps[module] = {
dir("$module") {
- sh "docker tag opensourcemano/${moduleName}:${moduleTag} opensourcemano/${moduleName}:${dockerTag}"
+ sh("""docker tag opensourcemano/${moduleName}:${moduleTag} \
+ opensourcemano/${moduleName}:${dockerTag}""")
sh "docker push opensourcemano/${moduleName}:${dockerTag}"
}
}
parallel parallelSteps
}
- stage("Snap promotion") {
- def snaps = ["osmclient"]
- sh "snapcraft login --with ~/.snapcraft/config"
+ stage('Snap promotion') {
+ snaps = ['osmclient']
+ sh 'snapcraft login --with ~/.snapcraft/config'
for (snap in snaps) {
- channel="latest/"
- if (BRANCH_NAME.startsWith("v")) {
- channel=BRANCH_NAME.substring(1)+"/"
- } else if (BRANCH_NAME!="master") {
- channel+="/"+BRANCH_NAME.replaceAll('/','-')
+ channel = 'latest/'
+ if (BRANCH_NAME.startsWith('v')) {
+ channel = BRANCH_NAME.substring(1) + '/'
+ } else if (BRANCH_NAME != 'master') {
+ channel += '/' + BRANCH_NAME.replaceAll('/', '-')
}
- track=channel+"edge\\*"
- edge_rev=sh(returnStdout: true,
+ track = channel + 'edge\\*'
+ edge_rev = sh(returnStdout: true,
script: "snapcraft revisions $snap | " +
"grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
print "edge rev is $edge_rev"
- track=channel+"beta\\*"
- beta_rev=sh(returnStdout: true,
+ track = channel + 'beta\\*'
+ beta_rev = sh(returnStdout: true,
script: "snapcraft revisions $snap | " +
"grep \"$track\" | tail -1 | awk '{print \$1}'").trim()
print "beta rev is $beta_rev"
- if ( edge_rev != beta_rev ) {
+ if (edge_rev != beta_rev) {
print "Promoting $edge_rev to beta in place of $beta_rev"
- beta_track=channel+"beta"
+ beta_track = channel + 'beta'
sh "snapcraft release $snap $edge_rev $beta_track"
}
}
- } // stage("Snap promotion")
- } // if ( params.DO_DOCKERPUSH )
- } // stage("Archive")
- } // if ( params.SAVE_ARTIFACTS_OVERRIDE || stage_archive )
+ } // stage('Snap promotion')
+ } // if (params.DO_DOCKERPUSH)
+ } // stage('Archive')
+ } // if (params.SAVE_ARTIFACTS_OVERRIDE || stage_archive)
} // dir(OSM_DEVOPS)
} finally {
if ( params.DO_INSTALL && server_id != null) {