X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=installers%2Ffull_install_osm.sh;h=89a0ba660d9b3ce1286207369c8946a9ecef761e;hb=bc0d6073931b1639a905030ad356ae340c9597f8;hp=2f124011b85cffa0c6151e2b9d44e0432f2d071f;hpb=c119fb304ecd4d1085fa14b5f2ef288d4713a379;p=osm%2Fdevops.git diff --git a/installers/full_install_osm.sh b/installers/full_install_osm.sh index 2f124011..79fb0772 100755 --- a/installers/full_install_osm.sh +++ b/installers/full_install_osm.sh @@ -1,5 +1,4 @@ #!/bin/bash -# Copyright 2016 Telefónica Investigación y Desarrollo S.A.U. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# function usage(){ + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function echo -e "usage: $0 [OPTIONS]" echo -e "Install OSM from binaries or source code (by default, from binaries)" echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" echo -e " -r : use specified repository name for osm packages" echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" echo -e " -u : use specified repository url for osm packages" @@ -26,787 +29,940 @@ function usage(){ echo -e " -b v2.0 (v2.0 branch)" echo -e " -b tags/v1.1.0 (a specific tag)" echo -e " ..." - echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" - echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" - echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)" - echo -e " -o : do not install OSM, but ONLY one of the addons (vimemu, elk_stack, pm_stack) (assumes OSM is already installed)" + echo -e " -a : use this apt proxy url when downloading apt packages (air-gapped installation)" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -A use VCA/juju API proxy" + echo -e " --pla: install the PLA module for placement support" + echo -e " --ng-sa: install Airflow and Pushgateway to get VNF and NS status (experimental)" + echo -e " -m : install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (k8s_monitor, ng-sa)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -f : Public SSH key to use to deploy OSM to OpenStack" + echo -e " -F : Cloud-Init userdata file to deploy OSM to OpenStack" echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --debug: debug mode" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" echo -e " --source: install OSM from source code using the latest stable tag" - echo -e " --lxdimages: download lxd images from OSM repository instead of creating them from scratch" - echo -e " -l : use specified repository url for lxd images" - echo -e " -p : use specified repository path for lxd images" - echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)" echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" -# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)" - echo -e " --nat: install only NAT rules" - echo -e " --noconfigure: DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules" -# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" echo -e " --showopts: print chosen options and exit (only for debugging)" - echo -e " -y: do not prompt for confirmation, assumes yes" - echo -e " -h / --help: print this help" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function +} + +# takes a juju/accounts.yaml file and returns the password specific +# for a controller. I wrote this using only bash tools to minimize +# additions of other packages +function parse_juju_password { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + password_file="${HOME}/.local/share/juju/accounts.yaml" + local controller_name=$1 + local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file | + awk -F$fs -v controller=$controller_name '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i" fi - echo -e "\nDeleting imported lxd images if they exist" - lxc image show osm-ro &>/dev/null && lxc image delete osm-ro - lxc image show osm-vca &>/dev/null && lxc image delete osm-vca - lxc image show osm-soui &>/dev/null && lxc image delete osm-soui + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function return 0 } -function remove_stack() { - stack=$1 - if $(sg docker -c "docker stack ps ${stack}"); then - echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}" - COUNTER=0 - result=1 - while [ ${COUNTER} -lt 30 ]; do - result=$(sg docker -c "docker stack ps ${stack}" | wc -l) - #echo "Dockers running: $result" - if [ "${result}" == "0" ]; then - break +function docker_login() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + echo "Docker login" + [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}" + sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function +} + +function generate_docker_images() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + echo "Pulling and generating docker images" + [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login + + echo "Pulling docker images" + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then + sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image" + sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then + sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then + sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then + sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image" + sg docker -c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL "cannot get kiwigrid k8s-sidecar docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then + sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then + sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image" + fi + + if [ -n "$PULL_IMAGES" ]; then + echo "Pulling OSM docker images" + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue fi - let COUNTER=COUNTER+1 - sleep 1 + module_tag="${OSM_DOCKER_TAG}" + if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then + module_tag="${MODULE_DOCKER_TAG}" + fi + echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image" + sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image" done - if [ "${result}" == "0" ]; then - echo "All dockers of the stack ${stack} were removed" - else - FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it." + else + _build_from=$COMMIT_ID + [ -z "$_build_from" ] && _build_from="latest" + echo "OSM Docker images generated from $_build_from" + LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")" + trap 'rm -rf "${LWTEMPDIR}"' EXIT + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module + git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID} + sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image" + fi + done + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then + BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY") + BUILD_ARGS+=(--build-arg RELEASE="$RELEASE") + BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY") + BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE") + sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ." fi - sleep 5 + echo "Finished generation of docker images" fi -} -#Uninstall lightweight OSM: remove dockers -function uninstall_lightweight() { - echo -e "\nUninstalling lightweight OSM" - remove_stack osm - echo "Now osm docker images and volumes will be deleted" - newgrp docker << EONG - docker image rm osm/ro - docker image rm osm/lcm - docker image rm osm/light-ui - docker image rm osm/nbi - docker image rm osm/mon - docker image rm osm/pm - docker volume rm osm_mon_db - docker volume rm osm_mongo_db - docker volume rm osm_osm_packages - docker volume rm osm_ro_db -EONG - return 0 + echo "Finished pulling and generating docker images" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -#Configure NAT rules, based on the current IP addresses of containers -function nat(){ - echo -e "\nChecking required packages: iptables-persistent" - dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \ - sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent - echo -e "\nConfiguring NAT rules" - echo -e " Required root privileges" - sudo $OSM_DEVOPS/installers/nat_osm +function cmp_overwrite() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + file1="$1" + file2="$2" + if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then + if [ -f "${file2}" ]; then + ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2} + else + cp -b ${file1} ${file2} + fi + fi + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function FATAL(){ - echo "FATAL error: Cannot install OSM due to \"$1\"" - exit 1 +function generate_k8s_manifest_files() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + #Kubernetes resources + sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -#Update RO, SO and UI: -function update(){ - echo -e "\nUpdating components" - - echo -e " Updating RO" - CONTAINER="RO" - MDG="RO" - INSTALL_FOLDER="/opt/openmano" - echo -e " Fetching the repo" - lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all - BRANCH="" - BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'` - [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'" - CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1` - CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD` - echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)" - # COMMIT_ID either was previously set with -b option, or is an empty string - CHECKOUT_ID=$COMMIT_ID - [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS" - [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH" - if [[ $CHECKOUT_ID == "tags/"* ]]; then - REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID` - else - REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID` +function generate_docker_env_files() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + echo "Doing a backup of existing env files" + sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~} + sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~} + sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~} + sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~} + sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~} + sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~} + sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~} + sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~} + if [ -n "${INSTALL_NGSA}" ]; then + sudo cp $OSM_DOCKER_WORK_DIR/ngsa.env{,~} fi - echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)" - if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then - echo " Nothing to be done." + + echo "Generating docker env files" + # LCM + if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then + echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env else - echo " Update required." - lxc exec $CONTAINER -- service osm-ro stop - lxc exec $CONTAINER -- git -C /opt/openmano stash - lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase - lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID - lxc exec $CONTAINER -- git -C /opt/openmano stash pop - lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh - lxc exec $CONTAINER -- service osm-ro start + sudo sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env fi - echo - - echo -e " Updating SO and UI" - CONTAINER="SO-ub" - MDG="SO" - INSTALL_FOLDER="" # To be filled in - echo -e " Fetching the repo" - lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all - BRANCH="" - BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'` - [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'" - CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1` - CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD` - echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)" - # COMMIT_ID either was previously set with -b option, or is an empty string - CHECKOUT_ID=$COMMIT_ID - [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS" - [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH" - if [[ $CHECKOUT_ID == "tags/"* ]]; then - REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID` + + if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env else - REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID` + sudo sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env fi - echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)" - if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then - echo " Nothing to be done." + + if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env else - echo " Update required." - # Instructions to be added - # lxc exec SO-ub -- ... + sudo sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env fi - echo - echo -e "Updating MON Container" - CONTAINER="MON" - MDG="MON" - INSTALL_FOLDER="/root/MON" - echo -e " Fetching the repo" - lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all - BRANCH="" - BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'` - [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'" - CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1` - CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD` - echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)" - # COMMIT_ID either was previously set with -b option, or is an empty string - CHECKOUT_ID=$COMMIT_ID - [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS" - [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH" - if [[ $CHECKOUT_ID == "tags/"* ]]; then - REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID` + + if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env else - REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID` + sudo sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if [ -n "$OSM_VCA_APIPROXY" ]; then + if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + sudo sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + fi + + if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env fi - echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)" - if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then - echo " Nothing to be done." + + if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env else - echo " Update required." + sudo sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env fi - echo -} -function so_is_up() { - if [ -n "$1" ]; then - SO_IP=$1 + if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env else - SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'` + sudo sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env fi - time=0 - step=5 - timelength=300 - while [ $time -le $timelength ] - do - if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \ - -H 'accept: application/vnd.yang.data+json' \ - -H 'authorization: Basic YWRtaW46YWRtaW4=' \ - -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]] - then - echo "RW.Restconf running....SO is up" - return 0 + if [ -n "${OSM_BEHIND_PROXY}" ]; then + if ! grep -Fq "HTTP_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "HTTP_PROXY=${HTTP_PROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + sudo sed -i "s|HTTP_PROXY.*|HTTP_PROXY=${HTTP_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + if ! grep -Fq "HTTPS_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "HTTPS_PROXY=${HTTPS_PROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + sudo sed -i "s|HTTPS_PROXY.*|HTTPS_PROXY=${HTTPS_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + if ! grep -Fq "NO_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "NO_PROXY=${NO_PROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + sudo sed -i "s|NO_PROXY.*|NO_PROXY=${NO_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env fi + fi - sleep $step - echo -n "." - time=$((time+step)) - done + # RO + MYSQL_ROOT_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then + echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro.env + fi + if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then + echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/ro.env + fi - FATAL "OSM Failed to startup. SO failed to startup" -} + # Keystone + KEYSTONE_DB_PASSWORD=$(generate_secret) + SERVICE_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then + echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone.env + echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env + echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env + fi -function vca_is_up() { - if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then - echo "VCA is up and running" - return 0 + # NBI + if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then + echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/nbi.env + echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/nbi.env fi - FATAL "OSM Failed to startup. VCA failed to startup" -} + # MON + if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then + echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env + fi -function mon_is_up() { - if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then - echo "MON is up and running" - return 0 + if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OS_NOTIFIER_URI=http://${OSM_DEFAULT_IP}:8662" |sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + sudo sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$OSM_DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env fi - FATAL "OSM Failed to startup. MON failed to startup" -} + if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + sudo sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env + fi -function ro_is_up() { - if [ -n "$1" ]; then - RO_IP=$1 + if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env else - RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'` + sudo sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env fi - time=0 - step=2 - timelength=20 - while [ $time -le $timelength ]; do - if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then - echo "RO is up and running" - return 0 - fi - sleep $step - echo -n "." - time=$((time+step)) - done - FATAL "OSM Failed to startup. RO failed to startup" -} + if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + sudo sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + # POL + if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then + echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo tee -a $OSM_DOCKER_WORK_DIR/pol.env + fi -function configure_RO(){ - . $OSM_DEVOPS/installers/export_ips - echo -e " Configuring RO" - lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg - lxc exec RO -- service osm-ro restart + # NG-SA + if [ -n "${INSTALL_NGSA}" ] && [ ! -f $OSM_DOCKER_WORK_DIR/ngsa.env ]; then + echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/ngsa.env + fi - ro_is_up + echo "Finished generation of docker env files" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function +} - lxc exec RO -- openmano tenant-delete -f osm >/dev/null - lxc exec RO -- openmano tenant-create osm > /dev/null - lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc - lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc - lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc' +#creates secrets from env files which will be used by containers +function kube_secrets(){ + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + kubectl create ns $OSM_STACK_NAME + kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env + kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env + kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env + kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env + kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env + kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env + kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env + if [ -n "${INSTALL_NGSA}" ]; then + kubectl create secret generic ngsa-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ngsa.env + fi + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function configure_VCA(){ - echo -e " Configuring VCA" - JUJU_PASSWD=`date +%s | sha256sum | base64 | head -c 32` - echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password +#deploys osm pods and services +function deploy_osm_services() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function configure_SOUI(){ - . $OSM_DEVOPS/installers/export_ips - JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'` - RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'` - - echo -e " Configuring MON" - #Information to be added about SO socket for logging - - echo -e " Configuring SO" - sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP - sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP - sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local - sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local - # make journaling persistent - lxc exec SO-ub -- mkdir -p /var/log/journal - lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal - lxc exec SO-ub -- systemctl restart systemd-journald - - echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad - - lxc exec SO-ub -- systemctl restart launchpad - - so_is_up $SO_CONTAINER_IP - - #delete existing config agent (could be there on reconfigure) - curl -k --request DELETE \ - --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \ - --header 'accept: application/vnd.yang.data+json' \ - --header 'authorization: Basic YWRtaW46YWRtaW4=' \ - --header 'cache-control: no-cache' \ - --header 'content-type: application/vnd.yang.data+json' &> /dev/null - - result=$(curl -k --request POST \ - --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \ - --header 'accept: application/vnd.yang.data+json' \ - --header 'authorization: Basic YWRtaW46YWRtaW4=' \ - --header 'cache-control: no-cache' \ - --header 'content-type: application/vnd.yang.data+json' \ - --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}') - [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result" - - #R1/R2 config line - #result=$(curl -k --request PUT \ - # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \ - # --header 'accept: application/vnd.yang.data+json' \ - # --header 'authorization: Basic YWRtaW46YWRtaW4=' \ - # --header 'cache-control: no-cache' \ - # --header 'content-type: application/vnd.yang.data+json' \ - # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }') - - result=$(curl -k --request PUT \ - --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \ - --header 'accept: application/vnd.yang.data+json' \ - --header 'authorization: Basic YWRtaW46YWRtaW4=' \ - --header 'cache-control: no-cache' \ - --header 'content-type: application/vnd.yang.data+json' \ - --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}') - [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result" - - result=$(curl -k --request PATCH \ - --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \ - --header 'accept: application/vnd.yang.data+json' \ - --header 'authorization: Basic YWRtaW46YWRtaW4=' \ - --header 'cache-control: no-cache' \ - --header 'content-type: application/vnd.yang.data+json' \ - --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }') - [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result" - - result=$(curl -k --request PATCH \ - --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \ - --header 'accept: application/vnd.yang.data+json' \ - --header 'authorization: Basic YWRtaW46YWRtaW4=' \ - --header 'cache-control: no-cache' \ - --header 'content-type: application/vnd.yang.data+json' \ - --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }') - [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result" - - lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg < tenant:osm, logs to be sent to SO -# VCA -> juju-password -# SO -> route to Juju Controller, add RO account, add VCA account -function configure(){ - #Configure components - echo -e "\nConfiguring components" - configure_RO - configure_VCA - configure_SOUI +function deploy_osm_pla_service() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + # corresponding to deploy_osm_services + kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function install_lxd() { - sudo apt-get update - sudo apt-get install -y lxd - newgrp lxd - lxd init --auto - lxd waitready - lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false - DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}') - DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') - lxc profile device set default eth0 mtu $DEFAULT_MTU - #sudo systemctl stop lxd-bridge - #sudo systemctl --system daemon-reload - #sudo systemctl enable lxd-bridge - #sudo systemctl start lxd-bridge +function install_osm_ngsa_service() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + $OSM_DEVOPS/installers/install_ngsa.sh -d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} ${DEBUG_INSTALL} || \ + FATAL_TRACK install_osm_ngsa_service "install_ngsa.sh failed" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function ask_user(){ - # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive - # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed - # Return: true(0) if user type 'yes'; false (1) if user type 'no' - read -e -p "$1" USER_CONFIRMATION - while true ; do - [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0 - [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1 - [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0 - [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1 - read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION +function parse_yaml() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + TAG=$1 + shift + services=$@ + for module in $services; do + if [ "$module" == "pla" ]; then + if [ -n "$INSTALL_PLA" ]; then + echo "Updating K8s manifest file from opensourcemano\/pla:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/pla:${TAG}" + sudo sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml + fi + else + image=${module} + if [ "$module" == "ng-prometheus" ]; then + image="prometheus" + fi + echo "Updating K8s manifest file from opensourcemano\/${image}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${image}:${TAG}" + sudo sed -i "s#opensourcemano/${image}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${image}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml + fi done + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function launch_container_from_lxd(){ - export OSM_MDG=$1 - OSM_load_config - export OSM_BASE_IMAGE=$2 - if ! container_exists $OSM_BUILD_CONTAINER; then - CONTAINER_OPTS="" - [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true" - [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true" - create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS - wait_container_up $OSM_BUILD_CONTAINER +function update_manifest_files() { + osm_services="nbi lcm ro pol mon ng-ui keystone pla prometheus ng-prometheus" + list_of_services="" + for module in $osm_services; do + module_upper="${module^^}" + if ! echo $TO_REBUILD | grep -q $module_upper ; then + list_of_services="$list_of_services $module" + fi + done + if [ ! "$OSM_DOCKER_TAG" == "13" ]; then + parse_yaml $OSM_DOCKER_TAG $list_of_services fi + if [ -n "$MODULE_DOCKER_TAG" ]; then + parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild + fi + # The manifest for prometheus is prometheus.yaml or ng-prometheus.yaml, depending on the installation option + if [ -n "$INSTALL_NGSA" ]; then + sudo rm -f ${OSM_K8S_WORK_DIR}/prometheus.yaml + else + sudo rm -f ${OSM_K8S_WORK_DIR}/ng-prometheus.yaml + fi + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function install_osmclient(){ - CLIENT_RELEASE=${RELEASE#"-R "} - CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" - CLIENT_REPOSITORY=${REPOSITORY#"-r "} - [ -z "$REPOSITORY_BASE" ] && REPOSITORY_BASE="-u https://osm-download.etsi.org/repository/osm/debian" - CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "} - key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY - curl $key_location | sudo apt-key add - - sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient" - sudo apt-get update - sudo apt-get install -y python-pip - sudo -H pip install pip==9.0.3 - sudo -H pip install python-magic - sudo apt-get install -y python-osmclient - #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc - #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc - #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc - [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'` - [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'` - [ -n "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=127.0.0.1 - echo -e "\nOSM client installed" - echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:" - echo " export OSM_HOSTNAME=${OSM_HOSTNAME}" - [ -n "$INSTALL_LIGHTWEIGHT" ] && echo " export OSM_SOL005=True" - [ -z "$INSTALL_LIGHTWEIGHT" ] && echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}" - return 0 +function namespace_vol() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + # List of services with a volume mounted in path /var/lib/osm + osm_services="mysql" + for osm in $osm_services; do + if [ -f "$OSM_K8S_WORK_DIR/$osm.yaml" ] ; then + sudo sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml + fi + done + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function install_from_lxdimages(){ - LXD_RELEASE=${RELEASE#"-R "} - if [ -n "$LXD_REPOSITORY_PATH" ]; then - LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH" +function add_local_k8scluster() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + /usr/bin/osm --all-projects vim-create \ + --name _system-osm-vim \ + --account_type dummy \ + --auth_url http://dummy \ + --user osm --password osm --tenant osm \ + --description "dummy" \ + --config '{management_network_name: mgmt}' + /usr/bin/osm --all-projects k8scluster-add \ + --creds ${HOME}/.kube/config \ + --vim _system-osm-vim \ + --k8s-nets '{"net1": null}' \ + --version '1.15' \ + --description "OSM Internal Cluster" \ + _system-osm-k8s + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function +} + +function configure_apt_proxy() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + OSM_APT_PROXY=$1 + OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt" + echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}" + if [ ! -f ${OSM_APT_PROXY_FILE} ]; then + sudo bash -c "cat < ${OSM_APT_PROXY} +Acquire::http { Proxy \"${OSM_APT_PROXY}\"; } +EOF" else - LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")" - trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT + sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE} fi - echo -e "\nDeleting previous lxd images if they exist" - lxc image show osm-ro &>/dev/null && lxc image delete osm-ro - lxc image show osm-vca &>/dev/null && lxc image delete osm-vca - lxc image show osm-soui &>/dev/null && lxc image delete osm-soui - echo -e "\nImporting osm-ro" - [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz - lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro - rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz - echo -e "\nImporting osm-vca" - [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz - lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca - rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz - echo -e "\nImporting osm-soui" - [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz - lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui - rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz - launch_container_from_lxd RO osm-ro - ro_is_up && track RO - launch_container_from_lxd VCA osm-vca - vca_is_up && track VCA - launch_container_from_lxd MON osm-mon - mon_is_up && track MON - launch_container_from_lxd SO osm-soui - #so_is_up && track SOUI - track SOUI + sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}" + track prereq apt_proxy_configured_ok + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function install_docker_ce() { - # installs and configures Docker CE - echo "Installing Docker CE ..." - sudo apt-get -qq update - sudo apt-get install -y apt-transport-https ca-certificates software-properties-common - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - sudo apt-get -qq update - sudo apt-get install -y docker-ce - echo "Adding user to group 'docker'" - sudo groupadd -f docker - sudo usermod -aG docker $USER - sleep 2 - sudo service docker restart - echo "... restarted Docker service" - sg docker -c "docker version" || FATAL "Docker installation failed" - echo "... Docker CE installation done" - return 0 -} +function ask_proceed() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function -function install_docker_compose() { - # installs and configures docker-compose - echo "Installing Docker Compose ..." - sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose - sudo chmod +x /usr/local/bin/docker-compose - echo "... Docker Compose installation done" -} + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 -function install_juju() { - echo "Installing juju" - sudo snap install juju --classic - sudo dpkg-reconfigure -p medium lxd - sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost osm" - [ $(sg lxd -c "juju status" |grep "osm" |wc -l) -eq 1 ] || FATAL "Juju installation failed" - echo "Finished installation of juju" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function generate_docker_images() { - echo "Pulling and generating docker images" - newgrp docker << EONG - docker pull wurstmeister/kafka - docker pull wurstmeister/zookeeper - docker pull mongo - docker pull mysql:5 -EONG - git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON - git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID} - git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI - git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID} - git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO - git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID} - git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM - git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID} - git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI - git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID} - sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t osm/mon --no-cache" || FATAL "cannot build MON docker image" - sg docker -c "docker build ${LWTEMPDIR}/MON/policy_module -f ${LWTEMPDIR}/MON/policy_module/Dockerfile -t osm/pm --no-cache" || FATAL "cannot build PM docker image" - sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t osm/nbi --no-cache" || FATAL "cannot build NBI docker image" - sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/docker/Dockerfile-local -t osm/ro --no-cache" || FATAL "cannot build RO docker image" - sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t osm/lcm --no-cache" || FATAL "cannot build LCM docker image" - sg docker -c "docker build ${LWTEMPDIR}/LW-UI -t osm/light-ui -f ${LWTEMPDIR}/LW-UI/Dockerfile --no-cache" || FATAL "cannot build LW-UI docker image" - echo "Finished generation of docker images" +function check_osm_behind_proxy() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + + export OSM_BEHIND_PROXY="" + export OSM_PROXY_ENV_VARIABLES="" + [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy" + [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy" + [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY" + [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY" + [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy" + [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY" + + echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}" + echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}" + + if [ -n "${OSM_BEHIND_PROXY}" ]; then + [ -z "$ASSUME_YES" ] && ! ask_user " +The following env variables have been found for the current user: +${OSM_PROXY_ENV_VARIABLES}. + +This suggests that this machine is behind a proxy and a special configuration is required. +The installer will install Docker CE, LXD and Juju to work behind a proxy using those +env variables. + +Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap. +Depending on the program, the env variables to work behind a proxy might be different +(e.g. http_proxy vs HTTP_PROXY). + +For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY +and HTTPS_PROXY are defined. + +Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that +those env variables are also set for root user. If you are not sure whether those variables +are configured for the root user, you can stop the installation now. + +Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1 + else + echo "This machine is not behind a proxy" + fi + + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function cmp_overwrite() { - file1="$1" - file2="$2" - if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then - if [ -f "${file2}" ]; then - ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && sudo cp -f ${file1} ${file2} +function find_devops_folder() { + if [ -z "$OSM_DEVOPS" ]; then + if [ -n "$TEST_INSTALLER" ]; then + echo -e "\nUsing local devops repo for OSM installation" + OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))" else - sudo cp ${file1} ${file2} + echo -e "\nCreating temporary dir for OSM installation" + OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")" + trap 'rm -rf "$OSM_DEVOPS"' EXIT + git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS fi fi } -function generate_config_log_folders() { - echo "Generating config and log folders" - sudo mkdir -p /etc/osm/docker - sudo cp ${DEVOPS}/installers/docker/docker-compose.yaml /etc/osm/docker/docker-compose.yaml - sudo mkdir -p /var/log/osm - echo "Finished generation of config and log folders" -} +function install_osm() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function -function generate_docker_env_files() { - echo "Generating docker env files" - OSMLCM_VCA_HOST=`sg lxd -c "juju show-controller"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` - OSMLCM_VCA_SECRET=`grep password ${HOME}/.local/share/juju/accounts.yaml |awk '{print $2}'` - echo "OSMLCM_VCA_HOST=${OSMLCM_VCA_HOST}" |sudo tee /etc/osm/docker/lcm.env - echo "OSMLCM_VCA_SECRET=${OSMLCM_VCA_SECRET}" |sudo tee -a /etc/osm/docker/lcm.env - MYSQL_ROOT_PASSWORD=`date +%s | sha256sum | base64 | head -c 32` - if [ ! -f /etc/osm/docker/ro-db.env ]; then - echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee /etc/osm/docker/ro-db.env - fi - if [ ! -f /etc/osm/docker/ro.env ]; then - echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee /etc/osm/docker/ro.env + trap ctrl_c INT + + # TODO: move this under start + [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url + + check_osm_behind_proxy + track checks proxy_ok + + check_packages "git wget curl tar snapd" + + sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works" + + find_devops_folder + + # TODO: the use of stacks come from docker-compose. We should probably remove + [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME" + + track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none + + track checks checkingroot_ok + [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges." + track checks noroot_ok + + ask_proceed + track checks proceed_ok + + echo "Installing OSM" + + echo "Determining IP address of the interface with the default route" + [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + [ -z "$OSM_DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0" + OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'` + [ -z "$OSM_DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route" + + # configure apt proxy + [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL + + # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to + if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then + LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}" + [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P" + $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed" fi - echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo tee ${OSM_DEVOPS}/installers/docker/mon.env - cmp_overwrite ${DEVOPS}/installers/docker/mon.env /etc/osm/docker/mon.env - echo "Finished generation of docker env files" -} -function deploy_lightweight() { - echo "Deploying lightweight build" - if [ "${DEFAULT_MTU}" != "1500" ]; then - DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s` - DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'` - sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge" + track prereq prereqok_ok + + if [ ! -n "$INSTALL_NODOCKER" ]; then + DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}" + [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}" + [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -P" + $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed" fi - sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}" - sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} netOSM" - remove_stack osm - sg docker -c "docker stack deploy -c /etc/osm/docker/docker-compose.yaml osm" - #docker-compose -f /etc/osm/docker/docker-compose.yaml up -d - echo "Finished deployment of lightweight build" -} -function deploy_elk() { - sudo mkdir -p /etc/osm/docker/osm_elk - sudo cp ${DEVOPS}/installers/docker/osm_elk/* /etc/osm/docker/osm_elk - remove_stack osm_elk - echo "Deploying ELK stack" - sg docker -c "docker stack deploy -c /etc/osm/docker/osm_elk/docker-compose.yml osm_elk" - echo "Waiting for ELK stack to be up and running" - time=0 - step=2 - timelength=20 - elk_is_up=1 - while [ $time -le $timelength ]; do - if [[ $(curl -XGET http://127.0.0.1:5601/status -I | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then - elk_is_up=0 - break - fi - sleep $step - time=$((time+step)) - done - if [ $elk_is_up -eq 0 ]; then - echo "ELK is up and running. Trying to create index pattern..." - #Create index pattern - curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ - "http://127.0.0.1:5601/api/saved_objects/index-pattern/logstash-*" \ - -d"{\"attributes\":{\"title\":\"logstash-*\",\"timeFieldName\":\"@timestamp\"}}" - #Make it the default index - curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ - "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ - -d"{\"value\":\"logstash-*\"}" - else - echo "Cannot connect to Kibana to create index pattern." - echo "Once Kibana is running, you can use the following instructions to create index pattern:" - echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ - "http://127.0.0.1:5601/api/saved_objects/index-pattern/logstash-*" \ - -d"{\"attributes\":{\"title\":\"logstash-*\",\"timeFieldName\":\"@timestamp\"}}"' - echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ - "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ - -d"{\"value\":\"logstash-*\"}"' + track docker_ce docker_ce_ok + + echo "Creating folders for installation" + [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR + [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla + sudo cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml + + $OSM_DEVOPS/installers/install_kubeadm_cluster.sh -i ${OSM_DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \ + FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed" + track k8scluster k8scluster_ok + + JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}" + [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}" + [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}" + [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}" + [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}" + [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P" + $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed" + set_vca_variables + track juju juju_ok + + if [ -z "$OSM_DATABASE_COMMONKEY" ]; then + OSM_DATABASE_COMMONKEY=$(generate_secret) + [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret" fi - echo "Finished deployment of ELK stack" - return 0 -} -function deploy_perfmon() { - echo "Generating osm/kafka-exporter docker image" - sg docker -c "docker build ${OSM_DEVOPS}/installers/docker/osm_metrics/kafka-exporter -f ${OSM_DEVOPS}/installers/docker/osm_metrics/kafka-exporter/Dockerfile -t osm/kafka-exporter --no-cache" || FATAL "cannot build kafka-exporter docker image" - echo "Finished generation of osm/kafka-exporter docker image" - sudo mkdir -p /etc/osm/docker/osm_metrics - sudo cp ${DEVOPS}/installers/docker/osm_metrics/*.yml /etc/osm/docker/osm_metrics - sudo cp ${DEVOPS}/installers/docker/osm_metrics/*.json /etc/osm/docker/osm_metrics - remove_stack osm_metrics - echo "Deploying PM stack (Kafka exporter + Prometheus + Grafana)" - sg docker -c "docker stack deploy -c /etc/osm/docker/osm_metrics/docker-compose.yml osm_metrics" - echo "Finished deployment of PM stack" - return 0 -} + # Deploy OSM services + [ -z "$DOCKER_NOBUILD" ] && generate_docker_images + track docker_images docker_images_ok -function install_lightweight() { - [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges." - echo "Installing lightweight build of OSM" - LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")" - trap 'rm -rf "${LWTEMPDIR}"' EXIT - DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'` - DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'` - DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') - need_packages_lw="lxd" - echo -e "Checking required packages: $need_packages_lw" - dpkg -l $need_packages_lw &>/dev/null \ - || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ - || sudo apt-get update \ - || FATAL "failed to run apt-get update" - dpkg -l $need_packages_lw &>/dev/null \ - || ! echo -e "Installing $need_packages_lw requires root privileges." \ - || sudo apt-get install -y $need_packages_lw \ - || FATAL "failed to install $need_packages_lw" - install_juju - track juju - install_docker_ce - track docker_ce - #install_docker_compose - generate_docker_images - track docker_build + generate_k8s_manifest_files + track osm_files manifest_files_ok generate_docker_env_files - deploy_lightweight - track docker_deploy - [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu - [ -n "$INSTALL_ELK" ] && deploy_elk && track elk - [ -n "$INSTALL_PERFMON" ] && deploy_perfmon && track perfmon - install_osmclient - track osmclient - wget -q -O- https://osm-download.etsi.org/ftp/osm-4.0-four/README2.txt &> /dev/null + track osm_files env_files_ok + + deploy_charmed_services + track deploy_osm deploy_charmed_services_ok + kube_secrets + track deploy_osm kube_secrets_ok + update_manifest_files + track deploy_osm update_manifest_files_ok + namespace_vol + track deploy_osm namespace_vol_ok + deploy_osm_services + track deploy_osm deploy_osm_services_k8s_ok + if [ -n "$INSTALL_PLA" ]; then + # optional PLA install + deploy_osm_pla_service + track deploy_osm deploy_osm_pla_ok + fi + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # install OSM MONITORING + install_k8s_monitoring + track deploy_osm install_k8s_monitoring_ok + fi + if [ -n "$INSTALL_NGSA" ]; then + # optional PLA install + install_osm_ngsa_service + track deploy_osm install_osm_ngsa_ok + fi + + [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient + track osmclient osmclient_ok + + echo -e "Checking OSM health state..." + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \ + (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \ + track healthchecks osm_unhealthy didnotconverge) + track healthchecks after_healthcheck_ok + + add_local_k8scluster + track final_ops add_local_k8scluster_ok + + wget -q -O- https://osm-download.etsi.org/ftp/osm-13.0-thirteen/README2.txt &> /dev/null track end + sudo find /etc/osm + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function return 0 } -function install_vimemu() { - echo "\nInstalling vim-emu" - EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")" - trap 'rm -rf "${EMUTEMPDIR}"' EXIT - # clone vim-emu repository (attention: branch is currently master only) - echo "Cloning vim-emu repository ..." - git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR - # build vim-emu docker - echo "Building vim-emu Docker container..." - sudo docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/ || FATAL "cannot build vim-emu-img docker image" - # start vim-emu container as daemon - echo "Starting vim-emu Docker container 'vim-emu' ..." - if [ -n "$INSTALL_LIGHTWEIGHT" ]; then - # in lightweight mode, the emulator needs to be attached to netOSM - sudo docker run --name vim-emu -t -d --rm --privileged --pid='host' --network=netOSM -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py +function install_to_openstack() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + + if [ -z "$2" ]; then + FATAL "OpenStack installer requires a valid external network name" + fi + + # Install Pip for Python3 + sudo apt install -y python3-pip python3-venv + sudo -H LC_ALL=C python3 -m pip install -U pip + + # Create a venv to avoid conflicts with the host installation + python3 -m venv $OPENSTACK_PYTHON_VENV + + source $OPENSTACK_PYTHON_VENV/bin/activate + + # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train + python -m pip install -U wheel + python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11" + + # Install the Openstack cloud module (ansible>=2.10) + ansible-galaxy collection install openstack.cloud + + export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg" + + OSM_INSTALLER_ARGS="${REPO_ARGS[@]}" + + ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME" + + if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then + ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE" + fi + + if [ -n "$OPENSTACK_USERDATA_FILE" ]; then + ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE" + fi + + # Execute the Ansible playbook based on openrc or clouds.yaml + if [ -e "$1" ]; then + . $1 + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + $OSM_DEVOPS/installers/openstack/site.yml else - # classic build mode - sudo docker run --name vim-emu -t -d --rm --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml fi - echo "Waiting for 'vim-emu' container to start ..." - sleep 5 - export VIMEMU_HOSTNAME=$(sudo docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu) - echo "vim-emu running at ${VIMEMU_HOSTNAME} ..." - # print vim-emu connection info - echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:" - echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}" - echo -e "To add the emulated VIM to OSM you should do:" - echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack" + + # Exit from venv + deactivate + + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function + return 0 +} + +function install_k8s_monitoring() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + # install OSM monitoring + sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh + sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } function dump_vars(){ + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + echo "APT_PROXY_URL=$APT_PROXY_URL" echo "DEVELOP=$DEVELOP" + echo "DEBUG_INSTALL=$DEBUG_INSTALL" + echo "DOCKER_NOBUILD=$DOCKER_NOBUILD" + echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL" + echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL" + echo "DOCKER_USER=$DOCKER_USER" + echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES" echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE" - echo "UNINSTALL=$UNINSTALL" - echo "NAT=$NAT" - echo "UPDATE=$UPDATE" - echo "RECONFIGURE=$RECONFIGURE" - echo "TEST_INSTALLER=$TEST_INSTALLER" - echo "INSTALL_VIMEMU=$INSTALL_VIMEMU" - echo "INSTALL_LXD=$INSTALL_LXD" - echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES" - echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE" - echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH" + echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR" echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT" + echo "INSTALL_LXD=$INSTALL_LXD" + echo "INSTALL_NGSA=$INSTALL_NGSA" + echo "INSTALL_NODOCKER=$INSTALL_NODOCKER" + echo "INSTALL_NOJUJU=$INSTALL_NOJUJU" + echo "INSTALL_NOLXD=$INSTALL_NOLXD" echo "INSTALL_ONLY=$INSTALL_ONLY" - echo "INSTALL_ELK=$INSTALL_ELK" - echo "INSTALL_PERFMON=$INSTALL_PERFMON" + echo "INSTALL_PLA=$INSTALL_PLA" + echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK" + echo "INSTALL_VIMEMU=$INSTALL_VIMEMU" + echo "NO_HOST_PORTS=$NO_HOST_PORTS" + echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME" + echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD" + echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME" + echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE" + echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE" + echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME" + echo "OSM_DEVOPS=$OSM_DEVOPS" + echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG" + echo "OSM_DOCKER_WORK_DIR=$OSM_DOCKER_WORK_DIR" + echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR" + echo "OSM_K8S_WORK_DIR=$OSM_K8S_WORK_DIR" + echo "OSM_STACK_NAME=$OSM_STACK_NAME" + echo "OSM_VCA_HOST=$OSM_VCA_HOST" + echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY" + echo "OSM_VCA_SECRET=$OSM_VCA_SECRET" + echo "OSM_WORK_DIR=$OSM_WORK_DIR" + echo "PULL_IMAGES=$PULL_IMAGES" + echo "RECONFIGURE=$RECONFIGURE" echo "RELEASE=$RELEASE" echo "REPOSITORY=$REPOSITORY" echo "REPOSITORY_BASE=$REPOSITORY_BASE" echo "REPOSITORY_KEY=$REPOSITORY_KEY" - echo "NOCONFIGURE=$NOCONFIGURE" echo "SHOWOPTS=$SHOWOPTS" + echo "TEST_INSTALLER=$TEST_INSTALLER" + echo "TO_REBUILD=$TO_REBUILD" + echo "UNINSTALL=$UNINSTALL" + echo "UPDATE=$UPDATE" echo "Install from specific refspec (-b): $COMMIT_ID" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } -function track(){ - ctime=`date +%s` - duration=$((ctime - SESSION_ID)) - url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}" - #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}" - event_name="bin" - [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc" - [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd" - [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw" - event_name="${event_name}_$1" - url="${url}&event=${event_name}&ce_duration=${duration}" - wget -q -O /dev/null $url +function parse_docker_registry_url() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}') + DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}') + DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}') + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function +} + +function ctrl_c() { + [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function + echo "** Trapped CTRL-C" + FATAL "User stopped the installation" + [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function } UNINSTALL="" DEVELOP="" -NAT="" UPDATE="" RECONFIGURE="" TEST_INSTALLER="" @@ -814,83 +970,230 @@ INSTALL_LXD="" SHOWOPTS="" COMMIT_ID="" ASSUME_YES="" +APT_PROXY_URL="" INSTALL_FROM_SOURCE="" -RELEASE="-R ReleaseTHREE" -REPOSITORY="-r stable" +DEBUG_INSTALL="" +RELEASE="ReleaseTEN" +REPOSITORY="stable" +INSTALL_K8S_MONITOR="" +INSTALL_NGSA="" +INSTALL_PLA="" INSTALL_VIMEMU="" -INSTALL_FROM_LXDIMAGES="" LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd" LXD_REPOSITORY_PATH="" INSTALL_LIGHTWEIGHT="y" +INSTALL_TO_OPENSTACK="" +OPENSTACK_OPENRC_FILE_OR_CLOUD="" +OPENSTACK_PUBLIC_NET_NAME="" +OPENSTACK_ATTACH_VOLUME="false" +OPENSTACK_SSH_KEY_FILE="" +OPENSTACK_USERDATA_FILE="" +OPENSTACK_VM_NAME="server-osm" +OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm" INSTALL_ONLY="" -INSTALL_ELK="" -INSTALL_PERFMON="" -NOCONFIGURE="" -RELEASE_DAILY="" -SESSION_ID=`date +%s` +TO_REBUILD="" +INSTALL_NOLXD="" +INSTALL_NODOCKER="" +INSTALL_NOJUJU="" +INSTALL_NOHOSTCLIENT="" +INSTALL_CACHELXDIMAGES="" OSM_DEVOPS= - -while getopts ":hy-:b:r:k:u:R:l:p:D:o:" o; do +OSM_VCA_HOST= +OSM_VCA_SECRET= +OSM_VCA_PUBKEY= +OSM_VCA_CLOUDNAME="localhost" +OSM_VCA_K8S_CLOUDNAME="k8scloud" +OSM_STACK_NAME=osm +NO_HOST_PORTS="" +DOCKER_NOBUILD="" +REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" +REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian" +OSM_WORK_DIR="/etc/osm" +OSM_DOCKER_WORK_DIR="${OSM_WORK_DIR}/docker" +OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods" +OSM_HELM_WORK_DIR="${OSM_WORK_DIR}/helm" +OSM_HOST_VOL="/var/lib/osm" +OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +OSM_DOCKER_TAG=latest +DOCKER_USER=opensourcemano +PULL_IMAGES="y" +KAFKA_TAG=2.11-1.0.2 +KIWIGRID_K8S_SIDECAR_TAG="1.15.6" +PROMETHEUS_TAG=v2.28.1 +GRAFANA_TAG=8.1.1 +PROMETHEUS_NODE_EXPORTER_TAG=0.18.1 +PROMETHEUS_CADVISOR_TAG=latest +KEYSTONEDB_TAG=10 +OSM_DATABASE_COMMONKEY= +ELASTIC_VERSION=6.4.2 +ELASTIC_CURATOR_VERSION=5.5.4 +POD_NETWORK_CIDR=10.244.0.0/16 +K8S_MANIFEST_DIR="/etc/kubernetes/manifests" +RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' +DOCKER_REGISTRY_URL= +DOCKER_PROXY_URL= +MODULE_DOCKER_TAG= +OSM_INSTALLATION_TYPE="Default" + +while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do case "${o}" in - h) - usage && exit 0 + a) + APT_PROXY_URL=${OPTARG} ;; b) COMMIT_ID=${OPTARG} + PULL_IMAGES="" ;; r) - REPOSITORY="-r ${OPTARG}" - ;; - R) - RELEASE="-R ${OPTARG}" + REPOSITORY="${OPTARG}" + REPO_ARGS+=(-r "$REPOSITORY") ;; k) - REPOSITORY_KEY="-k ${OPTARG}" + REPOSITORY_KEY="${OPTARG}" + REPO_ARGS+=(-k "$REPOSITORY_KEY") ;; u) - REPOSITORY_BASE="-u ${OPTARG}" + REPOSITORY_BASE="${OPTARG}" + REPO_ARGS+=(-u "$REPOSITORY_BASE") ;; - l) - LXD_REPOSITORY_BASE="${OPTARG}" - ;; - p) - LXD_REPOSITORY_PATH="${OPTARG}" + R) + RELEASE="${OPTARG}" + REPO_ARGS+=(-R "$RELEASE") ;; D) OSM_DEVOPS="${OPTARG}" ;; o) INSTALL_ONLY="y" - [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue - [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue - [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue + ;; + O) + INSTALL_TO_OPENSTACK="y" + if [ -n "${OPTARG}" ]; then + OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}" + else + echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2 + usage && exit 1 + fi + ;; + f) + OPENSTACK_SSH_KEY_FILE="${OPTARG}" + ;; + F) + OPENSTACK_USERDATA_FILE="${OPTARG}" + ;; + N) + OPENSTACK_PUBLIC_NET_NAME="${OPTARG}" + ;; + m) + [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue + [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue + [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue + [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue + [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue + [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue + [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue + [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue + [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue + [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue + [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue + [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue + [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue + [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue + [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue + ;; + H) + OSM_VCA_HOST="${OPTARG}" + ;; + S) + OSM_VCA_SECRET="${OPTARG}" + ;; + s) + OSM_STACK_NAME="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0 + ;; + t) + OSM_DOCKER_TAG="${OPTARG}" + REPO_ARGS+=(-t "$OSM_DOCKER_TAG") + ;; + U) + DOCKER_USER="${OPTARG}" + ;; + P) + OSM_VCA_PUBKEY=$(cat ${OPTARG}) + ;; + A) + OSM_VCA_APIPROXY="${OPTARG}" + ;; + l) + LXD_CLOUD_FILE="${OPTARG}" + ;; + L) + LXD_CRED_FILE="${OPTARG}" + ;; + K) + CONTROLLER_NAME="${OPTARG}" + ;; + d) + DOCKER_REGISTRY_URL="${OPTARG}" + ;; + p) + DOCKER_PROXY_URL="${OPTARG}" + ;; + T) + MODULE_DOCKER_TAG="${OPTARG}" ;; -) [ "${OPTARG}" == "help" ] && usage && exit 0 - [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && continue + [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue + [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue - [ "${OPTARG}" == "nat" ] && NAT="y" && continue [ "${OPTARG}" == "update" ] && UPDATE="y" && continue [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue - [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue - [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue - [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue - [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue - [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue - [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue - [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue + [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue + [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue - [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue + [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue + [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="--nojuju" && continue + [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue + [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue + [ "${OPTARG}" == "pullimages" ] && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue + [ "${OPTARG}" == "bundle" ] && continue + [ "${OPTARG}" == "k8s" ] && continue + [ "${OPTARG}" == "lxd" ] && continue + [ "${OPTARG}" == "lxd-cred" ] && continue + [ "${OPTARG}" == "microstack" ] && continue + [ "${OPTARG}" == "overlay" ] && continue + [ "${OPTARG}" == "only-vca" ] && continue + [ "${OPTARG}" == "small-profile" ] && continue + [ "${OPTARG}" == "vca" ] && continue + [ "${OPTARG}" == "ha" ] && continue + [ "${OPTARG}" == "tag" ] && continue + [ "${OPTARG}" == "registry" ] && continue + [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue + [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue + [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue + [ "${OPTARG}" == "nocachelxdimages" ] && continue + [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue echo -e "Invalid option: '--$OPTARG'\n" >&2 usage && exit 1 ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; \?) echo -e "Invalid option: '-$OPTARG'\n" >&2 usage && exit 1 ;; + h) + usage && exit 0 + ;; y) ASSUME_YES="y" ;; @@ -900,138 +1203,58 @@ while getopts ":hy-:b:r:k:u:R:l:p:D:o:" o; do esac done -[ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui" -[ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui" -[ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui" - -if [ -n "$SHOWOPTS" ]; then - dump_vars - exit 0 -fi - -[ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master" - -# if develop, we force master -[ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master" +source $OSM_DEVOPS/common/all_funcs -# forcing source from master removed. Now only install from source when explicit -# [ -n "$COMMIT_ID" ] && [ "$COMMIT_ID" == "master" ] && INSTALL_FROM_SOURCE="y" +[ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on +[ -n "$SHOWOPTS" ] && dump_vars && exit 0 -if [ -z "$OSM_DEVOPS" ]; then - if [ -n "$TEST_INSTALLER" ]; then - echo -e "\nUsing local devops repo for OSM installation" - TEMPDIR="$(dirname $(realpath $(dirname $0)))" +# Uninstall if "--uninstall" +if [ -n "$UNINSTALL" ]; then + if [ -n "$CHARMED" ]; then + ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \ + FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed" else - echo -e "\nCreating temporary dir for OSM installation" - TEMPDIR="$(mktemp -d -q --tmpdir "installosm.XXXXXX")" - trap 'rm -rf "$TEMPDIR"' EXIT + ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \ + FATAL_TRACK community_uninstall "uninstall_osm.sh failed" fi + echo -e "\nDONE" + exit 0 fi -need_packages="git jq wget curl tar" -echo -e "Checking required packages: $need_packages" -dpkg -l $need_packages &>/dev/null \ - || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ - || sudo apt-get update \ - || FATAL "failed to run apt-get update" -dpkg -l $need_packages &>/dev/null \ - || ! echo -e "Installing $need_packages requires root privileges." \ - || sudo apt-get install -y $need_packages \ - || FATAL "failed to install $need_packages" - -if [ -z "$OSM_DEVOPS" ]; then - if [ -z "$TEST_INSTALLER" ]; then - echo -e "\nCloning devops repo temporarily" - git clone https://osm.etsi.org/gerrit/osm/devops.git $TEMPDIR - RC_CLONE=$? - fi - - echo -e "\nGuessing the current stable release" - LATEST_STABLE_DEVOPS=`git -C $TEMPDIR tag -l v[0-9].* | sort -V | tail -n1` - [ -z "$COMMIT_ID" ] && [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0 - echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS" - [ -z "$COMMIT_ID" ] && [ -n "$LATEST_STABLE_DEVOPS" ] && COMMIT_ID="tags/$LATEST_STABLE_DEVOPS" - - if [ -n "$RELEASE_DAILY" ]; then - echo "Using master/HEAD devops" - git -C $TEMPDIR checkout master - elif [ -z "$TEST_INSTALLER" ]; then - git -C $TEMPDIR checkout tags/$LATEST_STABLE_DEVOPS - fi - OSM_DEVOPS=$TEMPDIR -fi - -OSM_JENKINS="$OSM_DEVOPS/jenkins" -. $OSM_JENKINS/common/all_funcs - -[ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0 -[ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0 -[ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0 -[ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0 -[ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0 -[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk -[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon -[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu -[ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0 - -#Installation starts here -wget -q -O- https://osm-download.etsi.org/ftp/osm-4.0-four/README.txt &> /dev/null -track start - -[ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0 -echo -e "\nInstalling OSM from refspec: $COMMIT_ID" -if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then - ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1 +# Charmed installation +if [ -n "$CHARMED" ]; then + sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works" + export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)" + track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none + ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \ + FATAL_TRACK charmed_install "charmed_install.sh failed" + wget -q -O- https://osm-download.etsi.org/ftp/osm-13.0-thirteen/README2.txt &> /dev/null + track end installation_type $OSM_INSTALLATION_TYPE + echo -e "\nDONE" + exit 0 fi -echo -e "Checking required packages: lxd" -lxd --version &>/dev/null || FATAL "lxd not present, exiting." -[ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd - -# use local devops for containers -export OSM_USE_LOCAL_DEVOPS=true -if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source - echo -e "\nCreating the containers and building from source ..." - $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')" - ro_is_up && track RO - $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed" - vca_is_up && track VCA - $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed" - mon_is_up && track MON - $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')" - $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')" - #so_is_up && track SOUI - track SOUI -elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo - echo -e "\nInstalling from lxd images ..." - install_from_lxdimages -else #install from binaries - echo -e "\nCreating the containers and installing from binaries ..." - $OSM_DEVOPS/jenkins/host/install RO $REPOSITORY $RELEASE $REPOSITORY_KEY $REPOSITORY_BASE || FATAL "RO install failed" - ro_is_up && track RO - $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed" - vca_is_up && track VCA - $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed" - mon_is_up && track MON - $OSM_DEVOPS/jenkins/host/install SO $REPOSITORY $RELEASE $REPOSITORY_KEY $REPOSITORY_BASE || FATAL "SO install failed" - $OSM_DEVOPS/jenkins/host/install UI $REPOSITORY $RELEASE $REPOSITORY_KEY $REPOSITORY_BASE || FATAL "UI install failed" - #so_is_up && track SOUI - track SOUI +# Installation to Openstack +if [ -n "$INSTALL_TO_OPENSTACK" ]; then + install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME + echo -e "\nDONE" + exit 0 fi -#Install iptables-persistent and configure NAT rules -[ -z "$NOCONFIGURE" ] && nat - -#Configure components -[ -z "$NOCONFIGURE" ] && configure +# Community_installer -#Install osmclient -[ -z "$NOCONFIGURE" ] && install_osmclient - -#Install vim-emu (optional) -[ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options" +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option" +# if develop, we force master +[ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master" +OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service +[ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0 -wget -q -O- https://osm-download.etsi.org/ftp/osm-4.0-four/README2.txt &> /dev/null -track end +#Installation starts here +wget -q -O- https://osm-download.etsi.org/ftp/osm-13.0-thirteen/README.txt &> /dev/null +export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)" +install_osm echo -e "\nDONE" - +exit 0