echo -e " --pla: install the PLA module for placement support"
echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
- echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
+ echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
+ echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
+ echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
echo -e " -D <devops path> use local devops installation path"
echo -e " -w <work dir> Location to store runtime installation"
echo -e " -t <docker tag> specify osm docker tag (default is latest)"
if [ -z "$DEFAULT_IP" ]; then
DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
- [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
+ [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
[ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
[ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
function remove_helm() {
if [ "$(helm ls -q)" == "" ] ; then
sudo helm reset --force
- kubectl delete --namespace kube-system serviceaccount tiller
- kubectl delete clusterrolebinding tiller-cluster-rule
sudo rm /usr/local/bin/helm
rm -rf $HOME/.helm
fi
[ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
fi
remove_crontab_job
+
+ # Cleanup Openstack installer venv
+ if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
+ rm -r $OPENSTACK_PYTHON_VENV
+ fi
+
[ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
echo "Some docker images will be kept in case they are used by other docker stacks"
echo "To remove them, just run 'docker image prune' in a terminal"
sudo -H LC_ALL=C python3 -m pip install -U pip
sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
sudo apt-get install -y python3-osm-im python3-osmclient
+ if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
+ python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
+ fi
+ if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
+ sudo apt-get install -y libcurl4-openssl-dev libssl-dev
+ python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
+ fi
#sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
#echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
#echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
echo "... restarted Docker service"
if [ -n "${DOCKER_PROXY_URL}" ]; then
echo "Configuring docker proxy ..."
- if [ -f daemon.json ]; then
- if grep -q registry-mirrors daemon.json; then
- sudo sed -Ei 's/("registry-mirrors".*\[)(.*)\]/\1\2, \"'"${DOCKER_PROXY_URL}"'\"\]/' daemon.json
+ if [ -f /etc/docker/daemon.json ]; then
+ if grep -q registry-mirrors /etc/docker/daemon.json; then
+ sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
else
- sudo sed -i 's/{/{\n "registry-mirrors": [\"'"${DOCKER_PROXY_URL}"'\"]",/' daemon.json
+ sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
fi
else
- sudo cat << EOF > daemon.json
+ sudo bash -c "cat << EOF > /etc/docker/daemon.json
{
- "registry-mirrors": ["${DOCKER_PROXY_URL}"]
+ \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
}
-EOF
+EOF"
fi
- sudo systemctl daemon-reload
+ sudo systemctl daemon-reload
sudo service docker restart
echo "... restarted Docker service again"
fi
# MON
if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
+ echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
fi
sudo apt-get update
echo "Installing Kubernetes Packages ..."
sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
+ sudo apt-mark hold kubelet kubeadm kubectl
}
#initializes kubernetes control plane
}
function install_k8s_storageclass() {
- kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
- local storageclass_timeout=300
+ echo "Installing OpenEBS"
+ kubectl create ns openebs
+ helm repo add openebs https://openebs.github.io/charts
+ helm repo update
+ helm install --namespace openebs openebs openebs/openebs --version 1.12.0
+ helm ls -n openebs
+ local storageclass_timeout=400
local counter=0
+ local storageclass_ready=""
echo "Waiting for storageclass"
while (( counter < storageclass_timeout ))
do
if [ $? -eq 0 ] ; then
echo "Storageclass available"
+ storageclass_ready="y"
break
else
counter=$((counter + 15))
sleep 15
fi
done
+ [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
}
kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
}
-#Install helm and tiller
+#Install Helm v3
function install_helm() {
helm > /dev/null 2>&1
if [ $? != 0 ] ; then
# Helm is not installed. Install helm
echo "Helm is not installed, installing ..."
- curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
- tar -zxvf helm-v2.15.2.tar.gz
+ curl https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz --output helm-v3.6.3.tar.gz
+ tar -zxvf helm-v3.6.3.tar.gz
sudo mv linux-amd64/helm /usr/local/bin/helm
rm -r linux-amd64
- rm helm-v2.15.2.tar.gz
- fi
-
- # Checking if tiller has being configured
- kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
- if [ $? == 1 ] ; then
- # tiller account for kubernetes
- kubectl --namespace kube-system create serviceaccount tiller
- kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
- # HELM initialization
- helm init --service-account tiller
-
- # Wait for Tiller to be up and running. If timeout expires, continue installing
- tiller_timeout=120;
- counter=0;
- tiller_status=""
- while (( counter < tiller_timeout ))
- do
- tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
- ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
- counter=$((counter + 5))
- sleep 5
- done
- [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
+ rm helm-v3.6.3.tar.gz
+ helm repo add stable https://charts.helm.sh/stable
+ helm repo update
fi
}
if [ "$module" == "pla" ]; then
if [ -n "$INSTALL_PLA" ]; then
echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
- $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/${DOCKER_REGISTRY_URL}${DOCKER_USER}\/\/pla:${OSM_DOCKER_TAG}/g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
+ $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
fi
else
echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
- $WORKDIR_SUDO sed -i "s/opensourcemano\/${module}:.*/${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}/g" ${OSM_K8S_WORK_DIR}/${module}.yaml
+ $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
fi
done
}
function update_manifest_files() {
- osm_services="nbi lcm ro pol mon light-ui ng-ui keystone pla"
+ if [ -n "$NGUI" ]; then
+ osm_services="nbi lcm ro pol mon ng-ui keystone pla"
+ else
+ osm_services="nbi lcm ro pol mon light-ui keystone pla"
+ fi
list_of_services=""
for module in $osm_services; do
module_upper="${module^^}"
fi
done
list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
- if [ ! "$OSM_DOCKER_TAG" == "8" ]; then
+ if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
parse_yaml $OSM_DOCKER_TAG $list_of_services
fi
if [ -n "$MODULE_DOCKER_TAG" ]; then
DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
[ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
[ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
- DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
+ DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
[ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
remove_k8s_namespace $OSM_STACK_NAME
deploy_cni_provider
taint_master_node
+ install_helm
+ track install_helm
install_k8s_storageclass
track k8s_storageclass
install_k8s_metallb
track deploy_osm_pla
fi
track deploy_osm_services_k8s
- install_helm
- track install_helm
if [ -n "$INSTALL_K8S_MONITOR" ]; then
# install OSM MONITORING
install_k8s_monitoring
fi
# Install Pip for Python3
- $WORKDIR_SUDO apt install -y python3-pip
+ $WORKDIR_SUDO apt install -y python3-pip python3-venv
$WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
- # Install Ansible, OpenStack client and SDK
- $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
+ # Create a venv to avoid conflicts with the host installation
+ python3 -m venv $OPENSTACK_PYTHON_VENV
+
+ source $OPENSTACK_PYTHON_VENV/bin/activate
+
+ # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
+ python -m pip install -U wheel
+ python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
+
+ # Install the Openstack cloud module (ansible>=2.10)
+ ansible-galaxy collection install openstack.cloud
export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
+ ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
+
+ if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
+ ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
+ fi
+
+ if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
+ ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
+ fi
+
# Execute the Ansible playbook based on openrc or clouds.yaml
if [ -e "$1" ]; then
. $1
- ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
- -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
+ ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
+ $OSM_DEVOPS/installers/openstack/site.yml
else
- ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
- -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
+ ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
+ -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
fi
+ # Exit from venv
+ deactivate
+
return 0
}
echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
+ echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
+ echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
+ echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
echo "TO_REBUILD=$TO_REBUILD"
echo "INSTALL_NOLXD=$INSTALL_NOLXD"
DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
}
-JUJU_AGENT_VERSION=2.8.6
+JUJU_AGENT_VERSION=2.8.8
UNINSTALL=""
DEVELOP=""
UPDATE=""
OPENSTACK_OPENRC_FILE_OR_CLOUD=""
OPENSTACK_PUBLIC_NET_NAME=""
OPENSTACK_ATTACH_VOLUME="false"
+OPENSTACK_SSH_KEY_FILE=""
+OPENSTACK_USERDATA_FILE=""
+OPENSTACK_VM_NAME="server-osm"
+OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
INSTALL_ONLY=""
INSTALL_ELK=""
TO_REBUILD=""
DOCKER_PROXY_URL=
MODULE_DOCKER_TAG=
-while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:-: hy" o; do
+while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
case "${o}" in
b)
COMMIT_ID=${OPTARG}
REPO_ARGS+=(-r "$REPOSITORY")
;;
c)
- [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && continue
+ [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
[ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
usage && exit 1
;;
n)
- [ "${OPTARG}" == "lwui" ] && NGUI="" && continue
+ [ "${OPTARG}" == "lwui" ] && NGUI="" && REPO_ARGS+=(-n "${OPTARG}") && continue
[ "${OPTARG}" == "ngui" ] && continue
echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
usage && exit 1
usage && exit 1
fi
;;
+ f)
+ OPENSTACK_SSH_KEY_FILE="${OPTARG}"
+ ;;
+ F)
+ OPENSTACK_USERDATA_FILE="${OPTARG}"
+ ;;
N)
OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
;;
wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
track end
echo -e "\nDONE"
-
-