Add namespace option for mongodb, ngsa and cluster monitoring installation
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 function usage(){
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM"
20 echo -e " OPTIONS"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " ..."
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -c <kubernetes engine>: use a specific kubernetes engine (options: kubeadm, k3s, microk8s), default is kubeadm"
34 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
35 echo -e " -H <VCA host> use specific juju host controller IP"
36 echo -e " -S <VCA secret> use VCA/juju secret key"
37 echo -e " -P <VCA pubkey> use VCA/juju public key file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " --old-sa: install old Service Assurance framework (MON, POL); do not install Airflow and Pushgateway"
41 echo -e " --ng-sa: install new Service Assurance framework (Airflow, AlertManager and Pushgateway) (enabled by default)"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --debug: debug mode"
58 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
59 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
60 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
61 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
62 echo -e " --nojuju: do not juju, assumes already installed"
63 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
64 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
65 echo -e " --nohostclient: do not install the osmclient"
66 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
67 echo -e " --source: install OSM from source code using the latest stable tag"
68 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
69 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
70 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
71 echo -e " --volume: create a VM volume when installing to OpenStack"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
78 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
79 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
80 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
81 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
82 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
83 echo -e " [--tag]: Docker image tag. (--charmed option)"
84 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
85 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
86 }
87
88 # takes a juju/accounts.yaml file and returns the password specific
89 # for a controller. I wrote this using only bash tools to minimize
90 # additions of other packages
91 function parse_juju_password {
92 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
93 password_file="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name=$1
95 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller=$controller_name '{
100 indent = length($1)/2;
101 vname[indent] = $2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
106 printf("%s",$3);
107 }
108 }
109 }'
110 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
111 }
112
113 function set_vca_variables() {
114 OSM_VCA_CLOUDNAME="lxd-cloud"
115 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME="localhost"
116 if [ -z "$OSM_VCA_HOST" ]; then
117 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
120 fi
121 if [ -z "$OSM_VCA_SECRET" ]; then
122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_NAMESPACE)
123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
124 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
125 fi
126 if [ -z "$OSM_VCA_PUBKEY" ]; then
127 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
129 fi
130 if [ -z "$OSM_VCA_CACERT" ]; then
131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
133 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
134 fi
135 }
136
137 function generate_secret() {
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
139 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
140 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
141 }
142
143 function check_packages() {
144 NEEDED_PACKAGES="$1"
145 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
146 for PACKAGE in ${NEEDED_PACKAGES} ; do
147 dpkg -L ${PACKAGE}
148 if [ $? -ne 0 ]; then
149 echo -e "Package ${PACKAGE} is not installed."
150 echo -e "Updating apt-cache ..."
151 sudo apt-get update
152 echo -e "Installing ${PACKAGE} ..."
153 sudo apt-get install -y ${PACKAGE} || FATAL "failed to install ${PACKAGE}"
154 fi
155 done
156 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
157 }
158
159 function ask_user(){
160 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
161 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
162 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
163 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
164 read -e -p "$1" USER_CONFIRMATION
165 while true ; do
166 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
167 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
168 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
169 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
170 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
171 done
172 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
173 }
174
175 function install_osmclient(){
176 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
177 CLIENT_RELEASE=${RELEASE#"-R "}
178 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
179 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
180 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
181 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
182 curl $key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add -
183 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
184 sudo apt-get -y update
185 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-pip
186 sudo -H LC_ALL=C python3 -m pip install -U pip
187 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-osm-im python3-osmclient
188 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
189 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
190 fi
191 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
192 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libmagic1
193 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
194 fi
195 echo -e "\nOSM client installed"
196 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
197 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
198 echo " export OSM_HOSTNAME=nbi.${OSM_DEFAULT_IP}.nip.io"
199 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
200 return 0
201 }
202
203 function docker_login() {
204 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
205 echo "Docker login"
206 [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}"
207 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
208 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
209 }
210
211 function generate_docker_images() {
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
213 echo "Generating docker images"
214 _build_from=$COMMIT_ID
215 [ -z "$_build_from" ] && _build_from="latest"
216 echo "OSM Docker images generated from $_build_from"
217 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
218 trap 'rm -rf "${LWTEMPDIR}"' EXIT
219 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
220 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
221 module_lower=${module,,}
222 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
223 continue
224 fi
225 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
226 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
227 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
228 fi
229 done
230 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
231 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
232 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
233 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
234 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
235 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
236 fi
237 echo "Finished generation of docker images"
238 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
239 }
240
241 function cmp_overwrite() {
242 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
243 file1="$1"
244 file2="$2"
245 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
246 if [ -f "${file2}" ]; then
247 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
248 else
249 cp -b ${file1} ${file2}
250 fi
251 fi
252 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
253 }
254
255 #deploys osm pods and services
256 function deploy_osm_services() {
257 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
258 # Generate helm values to be passed with -f osm-values.yaml
259 sudo mkdir -p ${OSM_HELM_WORK_DIR}
260 if [ -n "${INSTALL_JUJU}" ]; then
261 sudo bash -c "cat << EOF > ${OSM_HELM_WORK_DIR}/osm-values.yaml
262 vca:
263 pubkey: \"${OSM_VCA_PUBKEY}\"
264 EOF"
265 fi
266
267 # Generate helm values to be passed with --set
268 OSM_HELM_OPTS=""
269 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set nbi.useOsmSecret=false"
270 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set mysql.dbHostPath=${OSM_NAMESPACE_VOL}" # not needed as mysql is now bitnami helm chart
271
272 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.repositoryBase=${DOCKER_REGISTRY_URL}${DOCKER_USER}"
273 [ ! "$OSM_DOCKER_TAG" == "testing-daily" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set-string global.image.tag=${OSM_DOCKER_TAG}"
274 [ ! "$OSM_DOCKER_TAG" == "testing-daily" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set prometheus.server.sidecarContainers.prometheus-config-sidecar.image=${DOCKER_REGISTRY_URL}${DOCKER_USER}/prometheus:${OSM_DOCKER_TAG}"
275
276 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.hostname=${OSM_DEFAULT_IP}.nip.io"
277 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set grafana.ingress.hosts={grafana.${OSM_DEFAULT_IP}.nip.io}"
278 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set prometheus.server.ingress.hosts={prometheus.${OSM_DEFAULT_IP}.nip.io}"
279 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set prometheus.alertmanager.ingress.hosts={alertmanager.${OSM_DEFAULT_IP}.nip.io}"
280
281 if [ -n "${INSTALL_JUJU}" ]; then
282 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.enabled=true"
283 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.host=${OSM_VCA_HOST}"
284 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.secret=${OSM_VCA_SECRET}"
285 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.cacert=${OSM_VCA_CACERT}"
286 fi
287 [ -n "$OSM_VCA_APIPROXY" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set lcm.config.OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}"
288
289 [ -n "${INSTALL_NGSA}" ] || OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.oldServiceAssurance=true"
290 if [ -n "${OSM_BEHIND_PROXY}" ]; then
291 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.behindHttpProxy=true"
292 [ -n "${HTTP_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTP_PROXY=\"${HTTP_PROXY}\""
293 [ -n "${HTTPS_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTPS_PROXY=\"${HTTPS_PROXY}\""
294 if [ -n "${NO_PROXY}" ]; then
295 if [[ ! "${NO_PROXY}" =~ .*".svc".* ]]; then
296 NO_PROXY="${NO_PROXY},.svc"
297 fi
298 if [[ ! "${NO_PROXY}" =~ .*".cluster.local".* ]]; then
299 NO_PROXY="${NO_PROXY},.cluster.local"
300 fi
301 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.NO_PROXY=\"${NO_PROXY//,/\,}\""
302 fi
303 fi
304
305 if [ -n "${INSTALL_JUJU}" ]; then
306 OSM_HELM_OPTS="-f ${OSM_HELM_WORK_DIR}/osm-values.yaml ${OSM_HELM_OPTS}"
307 fi
308 echo "helm upgrade --install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}"
309 helm upgrade --install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}
310 # Override existing values.yaml with the final values.yaml used to install OSM
311 helm -n $OSM_NAMESPACE get values $OSM_NAMESPACE | sudo tee -a ${OSM_HELM_WORK_DIR}/osm-values.yaml
312 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
313 }
314
315 #deploy charmed services
316 function deploy_charmed_services() {
317 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
318 juju add-model $OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
319 juju deploy ch:mongodb-k8s -m $OSM_NAMESPACE --channel latest/stable
320 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
321 }
322
323 #deploy mongodb
324 function deploy_mongodb() {
325 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
326 MONGO_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
327 [ -n "${DOCKER_REGISTRY_URL}" ] && MONGO_OPTS="${MONGO_OPTS} -r ${DOCKER_REGISTRY_URL}"
328 $OSM_DEVOPS/installers/install_mongodb.sh ${MONGO_OPTS} || \
329 FATAL_TRACK install_osm_mongodb_service "install_mongodb.sh failed"
330 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
331 }
332
333 function install_osm_ngsa_service() {
334 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
335 NGSA_OPTS="-i ${OSM_DEFAULT_IP} -d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
336 [ -n "${DOCKER_REGISTRY_URL}" ] && NGSA_OPTS="${NGSA_OPTS} -r ${DOCKER_REGISTRY_URL}"
337 $OSM_DEVOPS/installers/install_ngsa.sh ${NGSA_OPTS} || \
338 FATAL_TRACK install_osm_ngsa_service "install_ngsa.sh failed"
339 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
340 }
341
342 function add_local_k8scluster() {
343 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
344 # OSM_HOSTNAME=$(kubectl get --namespace osm -o jsonpath="{.spec.rules[0].host}" ingress nbi-ingress)
345 OSM_HOSTNAME="nbi.${OSM_DEFAULT_IP}.nip.io:443"
346 /usr/bin/osm --hostname ${OSM_HOSTNAME} --all-projects vim-create \
347 --name _system-osm-vim \
348 --account_type dummy \
349 --auth_url http://dummy \
350 --user osm --password osm --tenant osm \
351 --description "dummy" \
352 --config '{management_network_name: mgmt}'
353 /usr/bin/osm --hostname ${OSM_HOSTNAME} --all-projects k8scluster-add \
354 --creds ${HOME}/.kube/config \
355 --vim _system-osm-vim \
356 --k8s-nets '{"net1": null}' \
357 --version '1.29' \
358 --description "OSM Internal Cluster" \
359 _system-osm-k8s
360 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
361 }
362
363 function configure_apt_proxy() {
364 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
365 OSM_APT_PROXY=$1
366 OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt"
367 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
368 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
369 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
370 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
371 EOF"
372 else
373 sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
374 fi
375 sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
376 track prereq apt_proxy_configured_ok
377 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
378 }
379
380 function ask_proceed() {
381 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
382
383 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
384 1. Install and configure LXD
385 2. Install juju
386 3. Install docker CE
387 4. Disable swap space
388 5. Install and initialize Kubernetes
389 as pre-requirements.
390 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
391
392 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
393 }
394
395 function check_osm_behind_proxy() {
396 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
397
398 export OSM_BEHIND_PROXY=""
399 export OSM_PROXY_ENV_VARIABLES=""
400 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
401 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
402 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
403 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
404 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
405 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
406
407 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
408 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
409
410 if [ -n "${OSM_BEHIND_PROXY}" ]; then
411 [ -z "$ASSUME_YES" ] && ! ask_user "
412 The following env variables have been found for the current user:
413 ${OSM_PROXY_ENV_VARIABLES}.
414
415 This suggests that this machine is behind a proxy and a special configuration is required.
416 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
417 env variables.
418
419 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
420 Depending on the program, the env variables to work behind a proxy might be different
421 (e.g. http_proxy vs HTTP_PROXY).
422
423 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
424 and HTTPS_PROXY are defined.
425
426 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
427 those env variables are also set for root user. If you are not sure whether those variables
428 are configured for the root user, you can stop the installation now.
429
430 Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1
431 else
432 echo "This machine is not behind a proxy"
433 fi
434
435 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
436 }
437
438 function find_devops_folder() {
439 if [ -z "$OSM_DEVOPS" ]; then
440 if [ -n "$TEST_INSTALLER" ]; then
441 echo -e "\nUsing local devops repo for OSM installation"
442 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
443 else
444 echo -e "\nCreating temporary dir for OSM installation"
445 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
446 trap 'rm -rf "$OSM_DEVOPS"' EXIT
447 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
448 fi
449 fi
450 }
451
452 function install_osm() {
453 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
454
455 trap ctrl_c INT
456
457
458 check_osm_behind_proxy
459 check_packages "git wget curl tar snapd"
460 if [ -n "${INSTALL_JUJU}" ]; then
461 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
462 fi
463 find_devops_folder
464
465 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none os_info $os_info none none
466
467 track checks checkingroot_ok
468 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
469 track checks noroot_ok
470 ask_proceed
471 track checks proceed_ok
472
473 echo "Installing OSM"
474
475 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
476
477 echo "Determining IP address of the interface with the default route"
478 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
479 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
480 [ -z "$OSM_DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
481 OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
482 [ -z "$OSM_DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
483
484 # configure apt proxy
485 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
486
487 # if lxd is requested, we will install it
488 if [ -n "$INSTALL_LXD" ]; then
489 LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
490 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P"
491 $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed"
492 fi
493
494 track prereq prereqok_ok
495
496 if [ -n "$INSTALL_DOCKER" ] || [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
497 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
498 echo "Kubeadm requires docker, so docker will be installed."
499 fi
500 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
501 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
502 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -P"
503 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
504 fi
505 track docker_ce docker_ce_ok
506
507 $OSM_DEVOPS/installers/install_helm_client.sh -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
508 FATAL_TRACK k8scluster "install_helm_client.sh failed"
509 track helm_client install_helm_client_ok
510
511 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
512 KUBEADM_INSTALL_OPTS="-d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
513 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh ${KUBEADM_INSTALL_OPTS} || \
514 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
515 K8SCLUSTER_ADDONS_INSTALL_OPTS="-i ${OSM_DEFAULT_IP} -d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
516 $OSM_DEVOPS/installers/install_cluster_addons.sh ${K8SCLUSTER_ADDONS_INSTALL_OPTS} || \
517 FATAL_TRACK k8scluster "install_cluster_addons.sh failed"
518 fi
519 track k8scluster k8scluster_ok
520
521 if [ -n "${INSTALL_JUJU}" ]; then
522 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_CACHELXDIMAGES}"
523 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
524 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
525 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
526 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
527 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P"
528 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
529 set_vca_variables
530 fi
531 track juju juju_ok
532
533 # Deploy OSM services
534 [ -z "$DOCKER_NOBUILD" ] && [ -z "$PULL_IMAGES" ] && generate_docker_images
535 track docker_images docker_images_ok
536
537 deploy_mongodb
538 track deploy_osm deploy_mongodb_ok
539 deploy_osm_services
540 track deploy_osm deploy_osm_services_k8s_ok
541 if [ -n "$INSTALL_K8S_MONITOR" ]; then
542 # install OSM MONITORING
543 install_k8s_monitoring
544 track deploy_osm install_k8s_monitoring_ok
545 fi
546 if [ -n "$INSTALL_NGSA" ]; then
547 # optional NGSA install
548 install_osm_ngsa_service
549 track deploy_osm install_osm_ngsa_ok
550 fi
551
552 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
553 track osmclient osmclient_ok
554
555 echo -e "Checking OSM health state..."
556 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_NAMESPACE} -k || \
557 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
558 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
559 track healthchecks osm_unhealthy didnotconverge)
560 track healthchecks after_healthcheck_ok
561
562 add_local_k8scluster
563 track final_ops add_local_k8scluster_ok
564
565 # if lxd is requested, iptables firewall is updated to work with both docker and LXD
566 if [ -n "$INSTALL_LXD" ]; then
567 arrange_docker_default_network_policy
568 fi
569
570 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
571 track end
572 sudo find /etc/osm
573 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
574 return 0
575 }
576
577 function install_to_openstack() {
578 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
579
580 if [ -z "$2" ]; then
581 FATAL "OpenStack installer requires a valid external network name"
582 fi
583
584 # Install Pip for Python3
585 sudo apt install -y python3-pip python3-venv
586 sudo -H LC_ALL=C python3 -m pip install -U pip
587
588 # Create a venv to avoid conflicts with the host installation
589 python3 -m venv $OPENSTACK_PYTHON_VENV
590
591 source $OPENSTACK_PYTHON_VENV/bin/activate
592
593 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
594 python -m pip install -U wheel
595 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
596
597 # Install the Openstack cloud module (ansible>=2.10)
598 ansible-galaxy collection install openstack.cloud
599
600 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
601
602 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
603
604 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
605
606 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
607 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
608 fi
609
610 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
611 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
612 fi
613
614 # Execute the Ansible playbook based on openrc or clouds.yaml
615 if [ -e "$1" ]; then
616 . $1
617 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
618 $OSM_DEVOPS/installers/openstack/site.yml
619 else
620 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
621 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
622 fi
623
624 # Exit from venv
625 deactivate
626
627 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
628 return 0
629 }
630
631 function arrange_docker_default_network_policy() {
632 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
633 sudo iptables -I DOCKER-USER -j ACCEPT
634 sudo iptables-save | sudo tee /etc/iptables/rules.v4
635 sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
636 }
637
638 function install_k8s_monitoring() {
639 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
640 # install OSM monitoring
641 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh -o ${OSM_NAMESPACE} || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
642 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
643 }
644
645 function dump_vars(){
646 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
647 echo "APT_PROXY_URL=$APT_PROXY_URL"
648 echo "K8S_CLUSTER_ENGINE=$K8S_CLUSTER_ENGINE"
649 echo "DEVELOP=$DEVELOP"
650 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
651 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
652 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
653 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
654 echo "DOCKER_USER=$DOCKER_USER"
655 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
656 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
657 echo "INSTALL_JUJU=$INSTALL_JUJU"
658 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
659 echo "INSTALL_LXD=$INSTALL_LXD"
660 echo "INSTALL_NGSA=$INSTALL_NGSA"
661 echo "INSTALL_DOCKER=$INSTALL_DOCKER"
662 echo "INSTALL_ONLY=$INSTALL_ONLY"
663 echo "INSTALL_PLA=$INSTALL_PLA"
664 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
665 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
666 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
667 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
668 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
669 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
670 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
671 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
672 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
673 echo "OSM_DEVOPS=$OSM_DEVOPS"
674 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
675 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
676 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
677 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
678 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
679 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
680 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
681 echo "PULL_IMAGES=$PULL_IMAGES"
682 echo "RECONFIGURE=$RECONFIGURE"
683 echo "RELEASE=$RELEASE"
684 echo "REPOSITORY=$REPOSITORY"
685 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
686 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
687 echo "SHOWOPTS=$SHOWOPTS"
688 echo "TEST_INSTALLER=$TEST_INSTALLER"
689 echo "TO_REBUILD=$TO_REBUILD"
690 echo "UNINSTALL=$UNINSTALL"
691 echo "UPDATE=$UPDATE"
692 echo "Install from specific refspec (-b): $COMMIT_ID"
693 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
694 }
695
696 function parse_docker_registry_url() {
697 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
698 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
699 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
700 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
701 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
702 }
703
704 function ctrl_c() {
705 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
706 echo "** Trapped CTRL-C"
707 FATAL "User stopped the installation"
708 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
709 }
710
711 UNINSTALL=""
712 DEVELOP=""
713 UPDATE=""
714 RECONFIGURE=""
715 TEST_INSTALLER=""
716 INSTALL_LXD=""
717 SHOWOPTS=""
718 COMMIT_ID=""
719 ASSUME_YES=""
720 APT_PROXY_URL=""
721 K8S_CLUSTER_ENGINE="kubeadm"
722 INSTALL_FROM_SOURCE=""
723 DEBUG_INSTALL=""
724 RELEASE="testing-daily"
725 REPOSITORY="testing"
726 INSTALL_K8S_MONITOR=""
727 INSTALL_NGSA="y"
728 INSTALL_PLA=""
729 INSTALL_VIMEMU=""
730 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
731 LXD_REPOSITORY_PATH=""
732 INSTALL_TO_OPENSTACK=""
733 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
734 OPENSTACK_PUBLIC_NET_NAME=""
735 OPENSTACK_ATTACH_VOLUME="false"
736 OPENSTACK_SSH_KEY_FILE=""
737 OPENSTACK_USERDATA_FILE=""
738 OPENSTACK_VM_NAME="server-osm"
739 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
740 INSTALL_ONLY=""
741 TO_REBUILD=""
742 INSTALL_DOCKER=""
743 INSTALL_JUJU=""
744 INSTALL_NOHOSTCLIENT=""
745 INSTALL_CACHELXDIMAGES=""
746 OSM_DEVOPS=
747 OSM_VCA_HOST=
748 OSM_VCA_SECRET=
749 OSM_VCA_PUBKEY=
750 OSM_VCA_CLOUDNAME="localhost"
751 OSM_VCA_K8S_CLOUDNAME="k8scloud"
752 OSM_NAMESPACE=osm
753 NO_HOST_PORTS=""
754 DOCKER_NOBUILD=""
755 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
756 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
757 OSM_WORK_DIR="/etc/osm"
758 OSM_HELM_WORK_DIR="${OSM_WORK_DIR}/helm"
759 OSM_HOST_VOL="/var/lib/osm"
760 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
761 OSM_DOCKER_TAG="testing-daily"
762 DOCKER_USER=opensourcemano
763 PULL_IMAGES="y"
764 KAFKA_TAG=2.11-1.0.2
765 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
766 PROMETHEUS_TAG=v2.28.1
767 GRAFANA_TAG=8.1.1
768 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
769 PROMETHEUS_CADVISOR_TAG=latest
770 KEYSTONEDB_TAG=10
771 OSM_DATABASE_COMMONKEY=
772 ELASTIC_VERSION=6.4.2
773 ELASTIC_CURATOR_VERSION=5.5.4
774 POD_NETWORK_CIDR=10.244.0.0/16
775 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
776 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
777 DOCKER_REGISTRY_URL=
778 DOCKER_PROXY_URL=
779 MODULE_DOCKER_TAG=
780 OSM_INSTALLATION_TYPE="Default"
781
782 while getopts ":a:b:c:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
783 case "${o}" in
784 a)
785 APT_PROXY_URL=${OPTARG}
786 ;;
787 b)
788 COMMIT_ID=${OPTARG}
789 PULL_IMAGES=""
790 ;;
791 c)
792 K8S_CLUSTER_ENGINE=${OPTARG}
793 [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ] && continue
794 [ "${K8S_CLUSTER_ENGINE}" == "k3s" ] && continue
795 [ "${K8S_CLUSTER_ENGINE}" == "microk8s" ] && continue
796 echo -e "Invalid argument for -c : ' ${K8S_CLUSTER_ENGINE}'\n" >&2
797 usage && exit 1
798 ;;
799 r)
800 REPOSITORY="${OPTARG}"
801 REPO_ARGS+=(-r "$REPOSITORY")
802 ;;
803 k)
804 REPOSITORY_KEY="${OPTARG}"
805 REPO_ARGS+=(-k "$REPOSITORY_KEY")
806 ;;
807 u)
808 REPOSITORY_BASE="${OPTARG}"
809 REPO_ARGS+=(-u "$REPOSITORY_BASE")
810 ;;
811 R)
812 RELEASE="${OPTARG}"
813 REPO_ARGS+=(-R "$RELEASE")
814 ;;
815 D)
816 OSM_DEVOPS="${OPTARG}"
817 ;;
818 o)
819 INSTALL_ONLY="y"
820 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
821 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
822 ;;
823 O)
824 INSTALL_TO_OPENSTACK="y"
825 if [ -n "${OPTARG}" ]; then
826 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
827 else
828 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
829 usage && exit 1
830 fi
831 ;;
832 f)
833 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
834 ;;
835 F)
836 OPENSTACK_USERDATA_FILE="${OPTARG}"
837 ;;
838 N)
839 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
840 ;;
841 m)
842 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
843 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
844 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
845 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
846 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
847 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
848 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
849 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
850 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
851 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
852 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
853 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
854 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
855 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
856 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
857 ;;
858 H)
859 OSM_VCA_HOST="${OPTARG}"
860 ;;
861 S)
862 OSM_VCA_SECRET="${OPTARG}"
863 ;;
864 s)
865 OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
866 ;;
867 t)
868 OSM_DOCKER_TAG="${OPTARG}"
869 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
870 ;;
871 U)
872 DOCKER_USER="${OPTARG}"
873 ;;
874 P)
875 OSM_VCA_PUBKEY=$(cat ${OPTARG})
876 ;;
877 A)
878 OSM_VCA_APIPROXY="${OPTARG}"
879 ;;
880 l)
881 LXD_CLOUD_FILE="${OPTARG}"
882 ;;
883 L)
884 LXD_CRED_FILE="${OPTARG}"
885 ;;
886 K)
887 CONTROLLER_NAME="${OPTARG}"
888 ;;
889 d)
890 DOCKER_REGISTRY_URL="${OPTARG}"
891 ;;
892 p)
893 DOCKER_PROXY_URL="${OPTARG}"
894 ;;
895 T)
896 MODULE_DOCKER_TAG="${OPTARG}"
897 ;;
898 -)
899 [ "${OPTARG}" == "help" ] && usage && exit 0
900 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
901 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
902 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
903 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
904 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
905 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
906 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
907 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
908 [ "${OPTARG}" == "lxd" ] && INSTALL_LXD="y" && continue
909 [ "${OPTARG}" == "nolxd" ] && INSTALL_LXD="" && continue
910 [ "${OPTARG}" == "docker" ] && INSTALL_DOCKER="y" && continue
911 [ "${OPTARG}" == "nodocker" ] && INSTALL_DOCKER="" && continue
912 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
913 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
914 [ "${OPTARG}" == "juju" ] && INSTALL_JUJU="y" && continue
915 [ "${OPTARG}" == "nojuju" ] && INSTALL_JUJU="" && continue
916 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
917 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
918 [ "${OPTARG}" == "pullimages" ] && continue
919 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
920 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
921 [ "${OPTARG}" == "bundle" ] && continue
922 [ "${OPTARG}" == "k8s" ] && continue
923 [ "${OPTARG}" == "lxd-cred" ] && continue
924 [ "${OPTARG}" == "microstack" ] && continue
925 [ "${OPTARG}" == "overlay" ] && continue
926 [ "${OPTARG}" == "only-vca" ] && continue
927 [ "${OPTARG}" == "small-profile" ] && continue
928 [ "${OPTARG}" == "vca" ] && continue
929 [ "${OPTARG}" == "ha" ] && continue
930 [ "${OPTARG}" == "tag" ] && continue
931 [ "${OPTARG}" == "registry" ] && continue
932 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
933 [ "${OPTARG}" == "old-sa" ] && INSTALL_NGSA="" && continue
934 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
935 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
936 [ "${OPTARG}" == "nocachelxdimages" ] && continue
937 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
938 echo -e "Invalid option: '--$OPTARG'\n" >&2
939 usage && exit 1
940 ;;
941 :)
942 echo "Option -$OPTARG requires an argument" >&2
943 usage && exit 1
944 ;;
945 \?)
946 echo -e "Invalid option: '-$OPTARG'\n" >&2
947 usage && exit 1
948 ;;
949 h)
950 usage && exit 0
951 ;;
952 y)
953 ASSUME_YES="y"
954 ;;
955 *)
956 usage && exit 1
957 ;;
958 esac
959 done
960
961 source $OSM_DEVOPS/common/all_funcs
962
963 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
964 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
965
966 # Uninstall if "--uninstall"
967 if [ -n "$UNINSTALL" ]; then
968 if [ -n "$CHARMED" ]; then
969 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
970 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
971 else
972 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
973 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
974 fi
975 echo -e "\nDONE"
976 exit 0
977 fi
978
979 # Installation starts here
980
981 # Get README and create OSM_TRACK_INSTALLATION_ID
982 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README.txt &> /dev/null
983 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
984
985 # Get OS info to be tracked
986 os_distro=$(lsb_release -i 2>/dev/null | awk '{print $3}')
987 echo $os_distro
988 os_release=$(lsb_release -r 2>/dev/null | awk '{print $2}')
989 echo $os_release
990 os_info="${os_distro}_${os_release}"
991 os_info="${os_info// /_}"
992
993 if [ -n "$CHARMED" ]; then
994 # Charmed installation
995 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
996 ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \
997 FATAL_TRACK charmed_install "charmed_install.sh failed"
998 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
999 echo -e "\nDONE"
1000 exit 0
1001 elif [ -n "$INSTALL_TO_OPENSTACK" ]; then
1002 # Installation to Openstack
1003 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1004 echo -e "\nDONE"
1005 exit 0
1006 else
1007 # Community_installer
1008 # Check incompatible options
1009 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1010 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1011 # Special cases go first
1012 # if develop, we force master
1013 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1014 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1015 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1016 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1017 # This is where installation starts
1018 install_osm
1019 echo -e "\nDONE"
1020 exit 0
1021 fi