3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -c <kubernetes engine>: use a specific kubernetes engine (options: kubeadm, k3s, microk8s), default is kubeadm"
34 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
35 echo -e " -H <VCA host> use specific juju host controller IP"
36 echo -e " -S <VCA secret> use VCA/juju secret key"
37 echo -e " -P <VCA pubkey> use VCA/juju public key file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " --old-sa: install old Service Assurance framework (MON, POL); do not install Airflow and Pushgateway"
41 echo -e " --ng-sa: install new Service Assurance framework (Airflow, AlertManager and Pushgateway) (enabled by default)"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --debug: debug mode"
58 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
59 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
60 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
61 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
62 echo -e " --nojuju: do not juju, assumes already installed"
63 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
64 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
65 echo -e " --nohostclient: do not install the osmclient"
66 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
67 echo -e " --source: install OSM from source code using the latest stable tag"
68 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
69 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
70 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
71 echo -e " --volume: create a VM volume when installing to OpenStack"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
78 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
79 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
80 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
81 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
82 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
83 echo -e " [--tag]: Docker image tag. (--charmed option)"
84 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
85 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
88 # takes a juju/accounts.yaml file and returns the password specific
89 # for a controller. I wrote this using only bash tools to minimize
90 # additions of other packages
91 function parse_juju_password
{
92 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
93 password_file
="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name
=$1
95 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller
=$controller_name '{
100 indent = length($1)/2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
110 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
113 function set_vca_variables
() {
114 OSM_VCA_CLOUDNAME
="lxd-cloud"
115 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
116 if [ -z "$OSM_VCA_HOST" ]; then
117 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
121 if [ -z "$OSM_VCA_SECRET" ]; then
122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_NAMESPACE)
123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
124 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
126 if [ -z "$OSM_VCA_PUBKEY" ]; then
127 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
130 if [ -z "$OSM_VCA_CACERT" ]; then
131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
133 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
137 function generate_secret
() {
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
139 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
140 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
143 function check_packages
() {
145 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
146 for PACKAGE
in ${NEEDED_PACKAGES} ; do
148 if [ $?
-ne 0 ]; then
149 echo -e "Package ${PACKAGE} is not installed."
150 echo -e "Updating apt-cache ..."
152 echo -e "Installing ${PACKAGE} ..."
153 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
156 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
160 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
161 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
162 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
163 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
164 read -e -p "$1" USER_CONFIRMATION
166 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
167 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
168 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
169 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
170 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
172 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
175 function install_osmclient
(){
176 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
177 CLIENT_RELEASE
=${RELEASE#"-R "}
178 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
179 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
180 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
181 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
182 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
183 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
184 sudo apt-get
-y update
185 sudo DEBIAN_FRONTEND
=noninteractive apt-get
install -y python3-pip
186 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
187 sudo DEBIAN_FRONTEND
=noninteractive apt-get
install -y python3-osm-im python3-osmclient
188 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
189 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
191 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
192 sudo DEBIAN_FRONTEND
=noninteractive apt-get
install -y libmagic1
193 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
195 echo -e "\nOSM client installed"
196 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
197 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
198 echo " export OSM_HOSTNAME=nbi.${OSM_DEFAULT_IP}.nip.io"
199 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
203 function docker_login
() {
204 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
206 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
207 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
208 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
211 function generate_docker_images
() {
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
213 echo "Generating docker images"
214 _build_from
=$COMMIT_ID
215 [ -z "$_build_from" ] && _build_from
="latest"
216 echo "OSM Docker images generated from $_build_from"
217 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
218 trap 'rm -rf "${LWTEMPDIR}"' EXIT
219 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
220 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
221 module_lower
=${module,,}
222 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
225 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
226 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
227 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
230 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
231 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
232 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
233 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
234 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
235 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
237 echo "Finished generation of docker images"
238 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
241 function cmp_overwrite
() {
242 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
245 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
246 if [ -f "${file2}" ]; then
247 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
249 cp -b ${file1} ${file2}
252 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
255 #deploys osm pods and services
256 function deploy_osm_services() {
257 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
258 # helm is already installed as part of install_kubeadm_cluster.sh
260 # Generate helm values to be passed with -f osm-values.yaml
261 sudo mkdir -p ${OSM_HELM_WORK_DIR}
262 if [ -n "${INSTALL_JUJU}" ]; then
263 sudo bash -c "cat << EOF > ${OSM_HELM_WORK_DIR}/osm-values.yaml
265 pubkey: \"${OSM_VCA_PUBKEY}\"
269 # Generate helm values to be passed with --set
271 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set nbi.useOsmSecret
=false
"
272 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set mysql.dbHostPath
=${OSM_NAMESPACE_VOL}" # not needed as mysql is now bitnami helm chart
274 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.repositoryBase=${DOCKER_REGISTRY_URL}${DOCKER_USER}"
275 [ ! "$OSM_DOCKER_TAG" == "testing-daily
" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set-string global.image.tag
=${OSM_DOCKER_TAG}"
276 [ ! "$OSM_DOCKER_TAG" == "testing-daily
" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set prometheus.server.sidecarContainers.prometheus-config-sidecar.image=${DOCKER_REGISTRY_URL}${DOCKER_USER}/prometheus:${OSM_DOCKER_TAG}"
278 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.hostname
=${OSM_DEFAULT_IP}.nip.io
"
279 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set grafana.ingress.hosts={grafana.${OSM_DEFAULT_IP}.nip.io}"
280 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set prometheus.server.ingress.hosts={prometheus.${OSM_DEFAULT_IP}.nip.io}"
281 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set prometheus.alertmanager.ingress.hosts={alertmanager.${OSM_DEFAULT_IP}.nip.io}"
283 if [ -n "${INSTALL_JUJU}" ]; then
284 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.enabled
=true
"
285 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.
host=${OSM_VCA_HOST}"
286 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.secret
=${OSM_VCA_SECRET}"
287 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.cacert
=${OSM_VCA_CACERT}"
289 [ -n "$OSM_VCA_APIPROXY" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set lcm.config.OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}"
291 [ -n "${INSTALL_NGSA}" ] || OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.oldServiceAssurance
=true
"
292 if [ -n "${OSM_BEHIND_PROXY}" ]; then
293 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.behindHttpProxy
=true
"
294 [ -n "${HTTP_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTP_PROXY=\"${HTTP_PROXY}\""
295 [ -n "${HTTPS_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTPS_PROXY=\"${HTTPS_PROXY}\""
296 if [ -n "${NO_PROXY}" ]; then
297 if [[ ! "${NO_PROXY}" =~ .*".svc
".* ]]; then
298 NO_PROXY="${NO_PROXY},.svc
"
300 if [[ ! "${NO_PROXY}" =~ .*".cluster.
local".* ]]; then
301 NO_PROXY="${NO_PROXY},.cluster.
local"
303 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.NO_PROXY
=\"${NO_PROXY//,/\,}\""
307 if [ -n "${INSTALL_JUJU}" ]; then
308 OSM_HELM_OPTS="-f ${OSM_HELM_WORK_DIR}/osm-values.yaml
${OSM_HELM_OPTS}"
310 echo "helm upgrade
--install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers
/helm
/osm
${OSM_HELM_OPTS}"
311 helm upgrade --install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}
312 # Override existing values.yaml with the final values.yaml used to install OSM
313 helm -n $OSM_NAMESPACE get values $OSM_NAMESPACE | sudo tee -a ${OSM_HELM_WORK_DIR}/osm-values.yaml
314 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
317 #deploy charmed services
318 function deploy_charmed_services() {
319 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
320 juju add-model $OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
321 juju deploy ch:mongodb-k8s -m $OSM_NAMESPACE --channel latest/stable
322 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
326 function deploy_mongodb() {
327 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
328 MONGO_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
329 [ -n "${DOCKER_REGISTRY_URL}" ] && MONGO_OPTS="${MONGO_OPTS} -r ${DOCKER_REGISTRY_URL}"
330 $OSM_DEVOPS/installers/install_mongodb.sh ${MONGO_OPTS} || \
331 FATAL_TRACK install_osm_mongodb_service "install_mongodb.sh failed
"
332 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
335 function install_osm_ngsa_service() {
336 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
337 NGSA_OPTS="-i ${OSM_DEFAULT_IP} -d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
338 [ -n "${DOCKER_REGISTRY_URL}" ] && NGSA_OPTS="${NGSA_OPTS} -r ${DOCKER_REGISTRY_URL}"
339 $OSM_DEVOPS/installers/install_ngsa.sh ${NGSA_OPTS} || \
340 FATAL_TRACK install_osm_ngsa_service "install_ngsa.sh failed
"
341 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
344 function add_local_k8scluster() {
345 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
346 # OSM_HOSTNAME=$(kubectl get --namespace osm -o jsonpath="{.spec.rules
[0].
host}" ingress nbi-ingress)
347 OSM_HOSTNAME="nbi.
${OSM_DEFAULT_IP}.nip.io
:443"
348 /usr/bin/osm --hostname ${OSM_HOSTNAME} --all-projects vim-create \
349 --name _system-osm-vim \
350 --account_type dummy \
351 --auth_url http://dummy \
352 --user osm --password osm --tenant osm \
353 --description "dummy
" \
354 --config '{management_network_name: mgmt}'
355 /usr/bin/osm --hostname ${OSM_HOSTNAME} --all-projects k8scluster-add \
356 --creds ${HOME}/.kube/config \
357 --vim _system-osm-vim \
358 --k8s-nets '{"net1
": null}' \
360 --description "OSM Internal Cluster
" \
362 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
365 function configure_apt_proxy() {
366 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
368 OSM_APT_PROXY_FILE="/etc
/apt
/apt.conf.d
/osm-apt
"
369 echo "Configuring apt proxy
in file ${OSM_APT_PROXY_FILE}"
370 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
371 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
372 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
375 sudo sed -i "s|Proxy.
*|Proxy
\"${OSM_APT_PROXY}\"; }|
" ${OSM_APT_PROXY_FILE}
377 sudo apt-get update || FATAL "Configured apt proxy
, but couldn
't run 'apt-get update
'. Check ${OSM_APT_PROXY_FILE}"
378 track prereq apt_proxy_configured_ok
379 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
382 function ask_proceed() {
383 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
385 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
386 1. Install and configure LXD
389 4. Disable swap space
390 5. Install and initialize Kubernetes
392 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
394 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
397 function check_osm_behind_proxy() {
398 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
400 export OSM_BEHIND_PROXY=""
401 export OSM_PROXY_ENV_VARIABLES=""
402 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
403 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
404 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
405 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
406 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
407 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
409 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
410 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
412 if [ -n "${OSM_BEHIND_PROXY}" ]; then
413 [ -z "$ASSUME_YES" ] && ! ask_user "
414 The following env variables have been found for the current user:
415 ${OSM_PROXY_ENV_VARIABLES}.
417 This suggests that this machine is behind a proxy and a special configuration is required.
418 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
421 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
422 Depending on the program, the env variables to work behind a proxy might be different
423 (e.g. http_proxy vs HTTP_PROXY).
425 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
426 and HTTPS_PROXY are defined.
428 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
429 those env variables are also set for root user. If you are not sure whether those variables
430 are configured for the root user, you can stop the installation now.
432 Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1
434 echo "This machine is not behind a proxy"
437 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
440 function find_devops_folder() {
441 if [ -z "$OSM_DEVOPS" ]; then
442 if [ -n "$TEST_INSTALLER" ]; then
443 echo -e "\nUsing local devops repo for OSM installation"
444 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
446 echo -e "\nCreating temporary dir for OSM installation"
447 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
448 trap 'rm -rf "$OSM_DEVOPS"' EXIT
449 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
454 function install_osm() {
455 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
460 check_osm_behind_proxy
461 check_packages "git wget curl tar snapd"
462 if [ -n "${INSTALL_JUJU}" ]; then
463 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
467 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none os_info $os_info none none
469 track checks checkingroot_ok
470 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
471 track checks noroot_ok
473 track checks proceed_ok
475 echo "Installing OSM"
477 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
479 echo "Determining IP address of the interface with the default route"
480 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print
$5; exit}')
481 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~
/^
0.0.0.0/ {print
$8; exit}')
482 [ -z "$OSM_DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
483 OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a
,"/"); print a
[1]; exit}'`
484 [ -z "$OSM_DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
486 # configure apt proxy
487 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
489 # if lxd is requested, we will install it
490 if [ -n "$INSTALL_LXD" ]; then
491 LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
492 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P"
493 $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed"
496 track prereq prereqok_ok
498 if [ -n "$INSTALL_DOCKER" ] || [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
499 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
500 echo "Kubeadm requires docker, so docker will be installed."
502 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
503 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
504 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -P"
505 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
507 track docker_ce docker_ce_ok
509 $OSM_DEVOPS/installers/install_helm_client.sh -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
510 FATAL_TRACK k8scluster "install_helm_client.sh failed"
511 track helm_client install_helm_client_ok
513 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
514 KUBEADM_INSTALL_OPTS="-d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
515 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh ${KUBEADM_INSTALL_OPTS} || \
516 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
517 K8SCLUSTER_ADDONS_INSTALL_OPTS="-i ${OSM_DEFAULT_IP} -d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
518 $OSM_DEVOPS/installers/install_cluster_addons.sh ${K8SCLUSTER_ADDONS_INSTALL_OPTS} || \
519 FATAL_TRACK k8scluster "install_cluster_addons.sh failed"
521 track k8scluster k8scluster_ok
523 if [ -n "${INSTALL_JUJU}" ]; then
524 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_CACHELXDIMAGES}"
525 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
526 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
527 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
528 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
529 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P"
530 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
535 # Deploy OSM services
536 [ -z "$DOCKER_NOBUILD" ] && [ -z "$PULL_IMAGES" ] && generate_docker_images
537 track docker_images docker_images_ok
540 track deploy_osm deploy_mongodb_ok
542 track deploy_osm deploy_osm_services_k8s_ok
543 if [ -n "$INSTALL_K8S_MONITOR" ]; then
544 # install OSM MONITORING
545 install_k8s_monitoring
546 track deploy_osm install_k8s_monitoring_ok
548 if [ -n "$INSTALL_NGSA" ]; then
549 # optional NGSA install
550 install_osm_ngsa_service
551 track deploy_osm install_osm_ngsa_ok
554 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
555 track osmclient osmclient_ok
557 echo -e "Checking OSM health state..."
558 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_NAMESPACE} -k || \
559 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
560 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
561 track healthchecks osm_unhealthy didnotconverge)
562 track healthchecks after_healthcheck_ok
565 track final_ops add_local_k8scluster_ok
567 # if lxd is requested, iptables firewall is updated to work with both docker and LXD
568 if [ -n "$INSTALL_LXD" ]; then
569 arrange_docker_default_network_policy
572 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
575 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
579 function install_to_openstack() {
580 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
583 FATAL "OpenStack installer requires a valid external network name"
586 # Install Pip for Python3
587 sudo apt install -y python3-pip python3-venv
588 sudo -H LC_ALL=C python3 -m pip install -U pip
590 # Create a venv to avoid conflicts with the host installation
591 python3 -m venv $OPENSTACK_PYTHON_VENV
593 source $OPENSTACK_PYTHON_VENV/bin/activate
595 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
596 python -m pip install -U wheel
597 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
599 # Install the Openstack cloud module (ansible>=2.10)
600 ansible-galaxy collection install openstack.cloud
602 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
604 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
606 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
608 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
609 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
612 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
613 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
616 # Execute the Ansible playbook based on openrc or clouds.yaml
619 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
620 $OSM_DEVOPS/installers/openstack/site.yml
622 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
623 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
629 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
633 function arrange_docker_default_network_policy() {
634 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
635 sudo iptables -I DOCKER-USER -j ACCEPT
636 sudo iptables-save | sudo tee /etc/iptables/rules.v4
637 sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
640 function install_k8s_monitoring() {
641 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
642 # install OSM monitoring
643 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
644 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
645 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
648 function dump_vars(){
649 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
650 echo "APT_PROXY_URL=$APT_PROXY_URL"
651 echo "K8S_CLUSTER_ENGINE=$K8S_CLUSTER_ENGINE"
652 echo "DEVELOP=$DEVELOP"
653 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
654 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
655 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
656 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
657 echo "DOCKER_USER=$DOCKER_USER"
658 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
659 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
660 echo "INSTALL_JUJU=$INSTALL_JUJU"
661 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
662 echo "INSTALL_LXD=$INSTALL_LXD"
663 echo "INSTALL_NGSA=$INSTALL_NGSA"
664 echo "INSTALL_DOCKER=$INSTALL_DOCKER"
665 echo "INSTALL_ONLY=$INSTALL_ONLY"
666 echo "INSTALL_PLA=$INSTALL_PLA"
667 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
668 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
669 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
670 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
671 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
672 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
673 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
674 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
675 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
676 echo "OSM_DEVOPS=$OSM_DEVOPS"
677 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
678 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
679 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
680 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
681 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
682 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
683 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
684 echo "PULL_IMAGES=$PULL_IMAGES"
685 echo "RECONFIGURE=$RECONFIGURE"
686 echo "RELEASE=$RELEASE"
687 echo "REPOSITORY=$REPOSITORY"
688 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
689 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
690 echo "SHOWOPTS=$SHOWOPTS"
691 echo "TEST_INSTALLER=$TEST_INSTALLER"
692 echo "TO_REBUILD=$TO_REBUILD"
693 echo "UNINSTALL=$UNINSTALL"
694 echo "UPDATE=$UPDATE"
695 echo "Install from specific refspec (-b): $COMMIT_ID"
696 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
699 function parse_docker_registry_url() {
700 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
701 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a
,"@"); split(a
[1],b
,":"); print b
[1]}')
702 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a
,"@"); split(a
[1],b
,":"); print b
[2]}')
703 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a
,"@"); print a
[2]}')
704 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
708 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
709 echo "** Trapped CTRL-C"
710 FATAL "User stopped the installation"
711 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
724 K8S_CLUSTER_ENGINE="kubeadm"
725 INSTALL_FROM_SOURCE=""
727 RELEASE="testing-daily"
729 INSTALL_K8S_MONITOR=""
733 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
734 LXD_REPOSITORY_PATH=""
735 INSTALL_TO_OPENSTACK=""
736 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
737 OPENSTACK_PUBLIC_NET_NAME=""
738 OPENSTACK_ATTACH_VOLUME="false"
739 OPENSTACK_SSH_KEY_FILE=""
740 OPENSTACK_USERDATA_FILE=""
741 OPENSTACK_VM_NAME="server-osm"
742 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
747 INSTALL_NOHOSTCLIENT=""
748 INSTALL_CACHELXDIMAGES=""
753 OSM_VCA_CLOUDNAME="localhost"
754 OSM_VCA_K8S_CLOUDNAME="k8scloud"
758 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
759 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
760 OSM_WORK_DIR="/etc/osm"
761 OSM_HELM_WORK_DIR="${OSM_WORK_DIR}/helm"
762 OSM_HOST_VOL="/var/lib/osm"
763 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
764 OSM_DOCKER_TAG="testing-daily"
765 DOCKER_USER=opensourcemano
768 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
769 PROMETHEUS_TAG=v2.28.1
771 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
772 PROMETHEUS_CADVISOR_TAG=latest
774 OSM_DATABASE_COMMONKEY=
775 ELASTIC_VERSION=6.4.2
776 ELASTIC_CURATOR_VERSION=5.5.4
777 POD_NETWORK_CIDR=10.244.0.0/16
778 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
779 RE_CHECK='^
[a-z0-9
]([-a-z0-9]*[a-z0-9
])?$
'
783 OSM_INSTALLATION_TYPE="Default"
785 while getopts ":a:b:c:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
788 APT_PROXY_URL=${OPTARG}
795 K8S_CLUSTER_ENGINE=${OPTARG}
796 [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ] && continue
797 [ "${K8S_CLUSTER_ENGINE}" == "k3s" ] && continue
798 [ "${K8S_CLUSTER_ENGINE}" == "microk8s" ] && continue
799 echo -e "Invalid argument for -c : ' ${K8S_CLUSTER_ENGINE}'\n" >&2
803 REPOSITORY="${OPTARG}"
804 REPO_ARGS+=(-r "$REPOSITORY")
807 REPOSITORY_KEY="${OPTARG}"
808 REPO_ARGS+=(-k "$REPOSITORY_KEY")
811 REPOSITORY_BASE="${OPTARG}"
812 REPO_ARGS+=(-u "$REPOSITORY_BASE")
816 REPO_ARGS+=(-R "$RELEASE")
819 OSM_DEVOPS="${OPTARG}"
823 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
824 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
827 INSTALL_TO_OPENSTACK="y"
828 if [ -n "${OPTARG}" ]; then
829 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
831 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
836 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
839 OPENSTACK_USERDATA_FILE="${OPTARG}"
842 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
845 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
846 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
847 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
848 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
849 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
850 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
851 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
852 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
853 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
854 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
855 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
856 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
857 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
858 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
859 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
862 OSM_VCA_HOST="${OPTARG}"
865 OSM_VCA_SECRET="${OPTARG}"
868 OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
871 OSM_DOCKER_TAG="${OPTARG}"
872 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
875 DOCKER_USER="${OPTARG}"
878 OSM_VCA_PUBKEY=$(cat ${OPTARG})
881 OSM_VCA_APIPROXY="${OPTARG}"
884 LXD_CLOUD_FILE="${OPTARG}"
887 LXD_CRED_FILE="${OPTARG}"
890 CONTROLLER_NAME="${OPTARG}"
893 DOCKER_REGISTRY_URL="${OPTARG}"
896 DOCKER_PROXY_URL="${OPTARG}"
899 MODULE_DOCKER_TAG="${OPTARG}"
902 [ "${OPTARG}" == "help" ] && usage && exit 0
903 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
904 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
905 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
906 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
907 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
908 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
909 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
910 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
911 [ "${OPTARG}" == "lxd" ] && INSTALL_LXD="y" && continue
912 [ "${OPTARG}" == "nolxd" ] && INSTALL_LXD="" && continue
913 [ "${OPTARG}" == "docker" ] && INSTALL_DOCKER="y" && continue
914 [ "${OPTARG}" == "nodocker" ] && INSTALL_DOCKER="" && continue
915 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
916 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
917 [ "${OPTARG}" == "juju" ] && INSTALL_JUJU="y" && continue
918 [ "${OPTARG}" == "nojuju" ] && INSTALL_JUJU="" && continue
919 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
920 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
921 [ "${OPTARG}" == "pullimages" ] && continue
922 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
923 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
924 [ "${OPTARG}" == "bundle" ] && continue
925 [ "${OPTARG}" == "k8s" ] && continue
926 [ "${OPTARG}" == "lxd-cred" ] && continue
927 [ "${OPTARG}" == "microstack" ] && continue
928 [ "${OPTARG}" == "overlay" ] && continue
929 [ "${OPTARG}" == "only-vca" ] && continue
930 [ "${OPTARG}" == "small-profile" ] && continue
931 [ "${OPTARG}" == "vca" ] && continue
932 [ "${OPTARG}" == "ha" ] && continue
933 [ "${OPTARG}" == "tag" ] && continue
934 [ "${OPTARG}" == "registry" ] && continue
935 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
936 [ "${OPTARG}" == "old-sa" ] && INSTALL_NGSA="" && continue
937 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
938 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
939 [ "${OPTARG}" == "nocachelxdimages" ] && continue
940 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
941 echo -e "Invalid option: '--$OPTARG'\n" >&2
945 echo "Option -$OPTARG requires an argument" >&2
949 echo -e "Invalid option: '-$OPTARG'\n" >&2
964 source $OSM_DEVOPS/common/all_funcs
966 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
967 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
969 # Uninstall if "--uninstall"
970 if [ -n "$UNINSTALL" ]; then
971 if [ -n "$CHARMED" ]; then
972 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
973 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
975 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
976 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
982 # Installation starts here
984 # Get README and create OSM_TRACK_INSTALLATION_ID
985 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README.txt &> /dev/null
986 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
988 # Get OS info to be tracked
989 os_distro=$(lsb_release -i 2>/dev/null | awk '{print
$3}')
991 os_release=$(lsb_release -r 2>/dev/null | awk '{print
$2}')
993 os_info="${os_distro}_${os_release}"
994 os_info="${os_info// /_}"
996 if [ -n "$CHARMED" ]; then
997 # Charmed installation
998 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
999 ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \
1000 FATAL_TRACK charmed_install "charmed_install.sh failed"
1001 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
1004 elif [ -n "$INSTALL_TO_OPENSTACK" ]; then
1005 # Installation to Openstack
1006 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1010 # Community_installer
1011 # Check incompatible options
1012 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1013 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1014 # Special cases go first
1015 # if develop, we force master
1016 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1017 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1018 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1019 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1020 # This is where installation starts