Remove use of INSTALL_LIGHTWEIGHT in full_install_osm.sh
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 function usage(){
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM"
20 echo -e " OPTIONS"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " ..."
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -c <kubernetes engine>: use a specific kubernetes engine (options: kubeadm, k3s, microk8s), default is kubeadm"
34 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
35 echo -e " -H <VCA host> use specific juju host controller IP"
36 echo -e " -S <VCA secret> use VCA/juju secret key"
37 echo -e " -P <VCA pubkey> use VCA/juju public key file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " --old-sa: install old Service Assurance framework (MON, POL); do not install Airflow and Pushgateway"
41 echo -e " --ng-sa: install new Service Assurance framework (Airflow, AlertManager and Pushgateway) (enabled by default)"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --debug: debug mode"
58 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
59 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
60 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
61 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
62 echo -e " --nojuju: do not juju, assumes already installed"
63 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
64 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
65 echo -e " --nohostclient: do not install the osmclient"
66 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
67 echo -e " --source: install OSM from source code using the latest stable tag"
68 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
69 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
70 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
71 echo -e " --volume: create a VM volume when installing to OpenStack"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
78 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
79 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
80 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
81 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
82 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
83 echo -e " [--tag]: Docker image tag. (--charmed option)"
84 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
85 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
86 }
87
88 # takes a juju/accounts.yaml file and returns the password specific
89 # for a controller. I wrote this using only bash tools to minimize
90 # additions of other packages
91 function parse_juju_password {
92 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
93 password_file="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name=$1
95 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller=$controller_name '{
100 indent = length($1)/2;
101 vname[indent] = $2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
106 printf("%s",$3);
107 }
108 }
109 }'
110 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
111 }
112
113 function set_vca_variables() {
114 OSM_VCA_CLOUDNAME="lxd-cloud"
115 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME="localhost"
116 if [ -z "$OSM_VCA_HOST" ]; then
117 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
120 fi
121 if [ -z "$OSM_VCA_SECRET" ]; then
122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_NAMESPACE)
123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
124 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
125 fi
126 if [ -z "$OSM_VCA_PUBKEY" ]; then
127 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
129 fi
130 if [ -z "$OSM_VCA_CACERT" ]; then
131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
133 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
134 fi
135 }
136
137 function generate_secret() {
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
139 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
140 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
141 }
142
143 function check_packages() {
144 NEEDED_PACKAGES="$1"
145 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
146 for PACKAGE in ${NEEDED_PACKAGES} ; do
147 dpkg -L ${PACKAGE}
148 if [ $? -ne 0 ]; then
149 echo -e "Package ${PACKAGE} is not installed."
150 echo -e "Updating apt-cache ..."
151 sudo apt-get update
152 echo -e "Installing ${PACKAGE} ..."
153 sudo apt-get install -y ${PACKAGE} || FATAL "failed to install ${PACKAGE}"
154 fi
155 done
156 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
157 }
158
159 function ask_user(){
160 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
161 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
162 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
163 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
164 read -e -p "$1" USER_CONFIRMATION
165 while true ; do
166 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
167 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
168 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
169 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
170 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
171 done
172 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
173 }
174
175 function install_osmclient(){
176 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
177 CLIENT_RELEASE=${RELEASE#"-R "}
178 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
179 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
180 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
181 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
182 curl $key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add -
183 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
184 sudo apt-get -y update
185 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-pip
186 sudo -H LC_ALL=C python3 -m pip install -U pip
187 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-osm-im python3-osmclient
188 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
189 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
190 fi
191 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
192 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libmagic1
193 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
194 fi
195 echo -e "\nOSM client installed"
196 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
197 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
198 echo " export OSM_HOSTNAME=https://nbi.${OSM_DEFAULT_IP}.nip.io"
199 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
200 return 0
201 }
202
203 function docker_login() {
204 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
205 echo "Docker login"
206 [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}"
207 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
208 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
209 }
210
211 function generate_docker_images() {
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
213 echo "Generating docker images"
214 _build_from=$COMMIT_ID
215 [ -z "$_build_from" ] && _build_from="latest"
216 echo "OSM Docker images generated from $_build_from"
217 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
218 trap 'rm -rf "${LWTEMPDIR}"' EXIT
219 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
220 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
221 module_lower=${module,,}
222 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
223 continue
224 fi
225 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
226 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
227 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
228 fi
229 done
230 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
231 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
232 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
233 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
234 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
235 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
236 fi
237 echo "Finished generation of docker images"
238 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
239 }
240
241 function cmp_overwrite() {
242 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
243 file1="$1"
244 file2="$2"
245 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
246 if [ -f "${file2}" ]; then
247 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
248 else
249 cp -b ${file1} ${file2}
250 fi
251 fi
252 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
253 }
254
255 #deploys osm pods and services
256 function deploy_osm_services() {
257 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
258 # helm is already installed as part of install_kubeadm_cluster.sh
259
260 # Generate helm values to be passed with -f osm-values.yaml
261 sudo mkdir -p ${OSM_HELM_WORK_DIR}
262 if [ -n "${INSTALL_JUJU}" ]; then
263 sudo bash -c "cat << EOF > ${OSM_HELM_WORK_DIR}/osm-values.yaml
264 vca:
265 pubkey: \"${OSM_VCA_PUBKEY}\"
266 EOF"
267 fi
268
269 # Generate helm values to be passed with --set
270 OSM_HELM_OPTS=""
271 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set nbi.useOsmSecret=false"
272 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set mysql.dbHostPath=${OSM_NAMESPACE_VOL}" # not needed as mysql is now bitnami helm chart
273
274 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.repositoryBase=${DOCKER_REGISTRY_URL}${DOCKER_USER}"
275 [ ! "$OSM_DOCKER_TAG" == "testing-daily" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set-string global.image.tag=${OSM_DOCKER_TAG}"
276 [ ! "$OSM_DOCKER_TAG" == "testing-daily" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set prometheus.server.sidecarContainers.prometheus-config-sidecar.image=${DOCKER_REGISTRY_URL}${DOCKER_USER}/prometheus:${OSM_DOCKER_TAG}"
277
278 if [ -n "${INSTALL_JUJU}" ]; then
279 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.enabled=true"
280 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.host=${OSM_VCA_HOST}"
281 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.secret=${OSM_VCA_SECRET}"
282 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.cacert=${OSM_VCA_CACERT}"
283 fi
284 [ -n "$OSM_VCA_APIPROXY" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set lcm.config.OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}"
285 [ -n "${INSTALL_NGSA}" ] || OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.oldServiceAssurance=true"
286 if [ -n "${OSM_BEHIND_PROXY}" ]; then
287 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.behindHttpProxy=true"
288 [ -n "${HTTP_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTP_PROXY=\"${HTTP_PROXY}\""
289 [ -n "${HTTPS_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTPS_PROXY=\"${HTTPS_PROXY}\""
290 if [ -n "${NO_PROXY}" ]; then
291 if [[ ! "${NO_PROXY}" =~ .*".svc".* ]]; then
292 NO_PROXY="${NO_PROXY},.svc"
293 fi
294 if [[ ! "${NO_PROXY}" =~ .*".cluster.local".* ]]; then
295 NO_PROXY="${NO_PROXY},.cluster.local"
296 fi
297 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.NO_PROXY=\"${NO_PROXY//,/\,}\""
298 fi
299 fi
300
301 if [ -n "${INSTALL_JUJU}" ]; then
302 OSM_HELM_OPTS="-f ${OSM_HELM_WORK_DIR}/osm-values.yaml ${OSM_HELM_OPTS}"
303 fi
304 echo "helm upgrade --install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}"
305 helm upgrade --install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}
306 # Override existing values.yaml with the final values.yaml used to install OSM
307 helm -n $OSM_NAMESPACE get values $OSM_NAMESPACE | sudo tee -a ${OSM_HELM_WORK_DIR}/osm-values.yaml
308 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
309 }
310
311 #deploy charmed services
312 function deploy_charmed_services() {
313 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
314 juju add-model $OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
315 juju deploy ch:mongodb-k8s -m $OSM_NAMESPACE --channel latest/stable
316 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
317 }
318
319 #deploy mongodb
320 function deploy_mongodb() {
321 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
322 MONGO_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
323 [ -n "${DOCKER_REGISTRY_URL}" ] && MONGO_OPTS="${MONGO_OPTS} -r ${DOCKER_REGISTRY_URL}"
324 $OSM_DEVOPS/installers/install_mongodb.sh ${MONGO_OPTS} || \
325 FATAL_TRACK install_osm_mongodb_service "install_mongodb.sh failed"
326 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
327 }
328
329 function install_osm_ngsa_service() {
330 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
331 NGSA_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
332 [ -n "${DOCKER_REGISTRY_URL}" ] && NGSA_OPTS="${NGSA_OPTS} -r ${DOCKER_REGISTRY_URL}"
333 $OSM_DEVOPS/installers/install_ngsa.sh ${NGSA_OPTS} || \
334 FATAL_TRACK install_osm_ngsa_service "install_ngsa.sh failed"
335 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
336 }
337
338 function add_local_k8scluster() {
339 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
340 /usr/bin/osm --all-projects vim-create \
341 --name _system-osm-vim \
342 --account_type dummy \
343 --auth_url http://dummy \
344 --user osm --password osm --tenant osm \
345 --description "dummy" \
346 --config '{management_network_name: mgmt}'
347 /usr/bin/osm --all-projects k8scluster-add \
348 --creds ${HOME}/.kube/config \
349 --vim _system-osm-vim \
350 --k8s-nets '{"net1": null}' \
351 --version '1.15' \
352 --description "OSM Internal Cluster" \
353 _system-osm-k8s
354 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
355 }
356
357 function configure_apt_proxy() {
358 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
359 OSM_APT_PROXY=$1
360 OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt"
361 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
362 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
363 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
364 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
365 EOF"
366 else
367 sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
368 fi
369 sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
370 track prereq apt_proxy_configured_ok
371 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
372 }
373
374 function ask_proceed() {
375 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
376
377 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
378 1. Install and configure LXD
379 2. Install juju
380 3. Install docker CE
381 4. Disable swap space
382 5. Install and initialize Kubernetes
383 as pre-requirements.
384 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
385
386 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
387 }
388
389 function check_osm_behind_proxy() {
390 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
391
392 export OSM_BEHIND_PROXY=""
393 export OSM_PROXY_ENV_VARIABLES=""
394 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
395 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
396 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
397 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
398 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
399 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
400
401 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
402 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
403
404 if [ -n "${OSM_BEHIND_PROXY}" ]; then
405 [ -z "$ASSUME_YES" ] && ! ask_user "
406 The following env variables have been found for the current user:
407 ${OSM_PROXY_ENV_VARIABLES}.
408
409 This suggests that this machine is behind a proxy and a special configuration is required.
410 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
411 env variables.
412
413 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
414 Depending on the program, the env variables to work behind a proxy might be different
415 (e.g. http_proxy vs HTTP_PROXY).
416
417 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
418 and HTTPS_PROXY are defined.
419
420 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
421 those env variables are also set for root user. If you are not sure whether those variables
422 are configured for the root user, you can stop the installation now.
423
424 Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1
425 else
426 echo "This machine is not behind a proxy"
427 fi
428
429 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
430 }
431
432 function find_devops_folder() {
433 if [ -z "$OSM_DEVOPS" ]; then
434 if [ -n "$TEST_INSTALLER" ]; then
435 echo -e "\nUsing local devops repo for OSM installation"
436 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
437 else
438 echo -e "\nCreating temporary dir for OSM installation"
439 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
440 trap 'rm -rf "$OSM_DEVOPS"' EXIT
441 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
442 fi
443 fi
444 }
445
446 function install_osm() {
447 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
448
449 trap ctrl_c INT
450
451
452 check_osm_behind_proxy
453 check_packages "git wget curl tar snapd"
454 if [ -n "${INSTALL_JUJU}" ]; then
455 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
456 fi
457 find_devops_folder
458
459 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none os_info $os_info none none
460
461 track checks checkingroot_ok
462 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
463 track checks noroot_ok
464 ask_proceed
465 track checks proceed_ok
466
467 echo "Installing OSM"
468
469 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
470
471 echo "Determining IP address of the interface with the default route"
472 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
473 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
474 [ -z "$OSM_DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
475 OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
476 [ -z "$OSM_DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
477
478 # configure apt proxy
479 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
480
481 # if lxd is requested, we will install it
482 if [ -n "$INSTALL_LXD" ]; then
483 LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
484 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P"
485 $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed"
486 fi
487
488 track prereq prereqok_ok
489
490 if [ -n "$INSTALL_DOCKER" ] || [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
491 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
492 echo "Kubeadm requires docker, so docker will be installed."
493 fi
494 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
495 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
496 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -P"
497 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
498 fi
499 track docker_ce docker_ce_ok
500
501 $OSM_DEVOPS/installers/install_helm_client.sh -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
502 FATAL_TRACK k8scluster "install_helm_client.sh failed"
503 track helm_client install_helm_client_ok
504
505 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
506 KUBEADM_INSTALL_OPTS="-d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
507 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh ${KUBEADM_INSTALL_OPTS} || \
508 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
509 K8SCLUSTER_ADDONS_INSTALL_OPTS="-i ${OSM_DEFAULT_IP} -d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
510 $OSM_DEVOPS/installers/install_cluster_addons.sh ${K8SCLUSTER_ADDONS_INSTALL_OPTS} || \
511 FATAL_TRACK k8scluster "install_cluster_addons.sh failed"
512 fi
513 track k8scluster k8scluster_ok
514
515 if [ -n "${INSTALL_JUJU}" ]; then
516 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_CACHELXDIMAGES}"
517 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
518 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
519 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
520 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
521 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P"
522 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
523 set_vca_variables
524 fi
525 track juju juju_ok
526
527 # Deploy OSM services
528 [ -z "$DOCKER_NOBUILD" ] && [ -z "$PULL_IMAGES" ] && generate_docker_images
529 track docker_images docker_images_ok
530
531 deploy_mongodb
532 track deploy_osm deploy_mongodb_ok
533 deploy_osm_services
534 track deploy_osm deploy_osm_services_k8s_ok
535 if [ -n "$INSTALL_K8S_MONITOR" ]; then
536 # install OSM MONITORING
537 install_k8s_monitoring
538 track deploy_osm install_k8s_monitoring_ok
539 fi
540 if [ -n "$INSTALL_NGSA" ]; then
541 # optional NGSA install
542 install_osm_ngsa_service
543 track deploy_osm install_osm_ngsa_ok
544 fi
545
546 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
547 track osmclient osmclient_ok
548
549 echo -e "Checking OSM health state..."
550 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_NAMESPACE} -k || \
551 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
552 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
553 track healthchecks osm_unhealthy didnotconverge)
554 track healthchecks after_healthcheck_ok
555
556 add_local_k8scluster
557 track final_ops add_local_k8scluster_ok
558
559 # if lxd is requested, iptables firewall is updated to work with both docker and LXD
560 if [ -n "$INSTALL_LXD" ]; then
561 arrange_docker_default_network_policy
562 fi
563
564 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
565 track end
566 sudo find /etc/osm
567 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
568 return 0
569 }
570
571 function install_to_openstack() {
572 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
573
574 if [ -z "$2" ]; then
575 FATAL "OpenStack installer requires a valid external network name"
576 fi
577
578 # Install Pip for Python3
579 sudo apt install -y python3-pip python3-venv
580 sudo -H LC_ALL=C python3 -m pip install -U pip
581
582 # Create a venv to avoid conflicts with the host installation
583 python3 -m venv $OPENSTACK_PYTHON_VENV
584
585 source $OPENSTACK_PYTHON_VENV/bin/activate
586
587 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
588 python -m pip install -U wheel
589 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
590
591 # Install the Openstack cloud module (ansible>=2.10)
592 ansible-galaxy collection install openstack.cloud
593
594 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
595
596 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
597
598 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
599
600 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
601 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
602 fi
603
604 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
605 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
606 fi
607
608 # Execute the Ansible playbook based on openrc or clouds.yaml
609 if [ -e "$1" ]; then
610 . $1
611 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
612 $OSM_DEVOPS/installers/openstack/site.yml
613 else
614 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
615 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
616 fi
617
618 # Exit from venv
619 deactivate
620
621 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
622 return 0
623 }
624
625 function arrange_docker_default_network_policy() {
626 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
627 sudo iptables -I DOCKER-USER -j ACCEPT
628 sudo iptables-save | sudo tee /etc/iptables/rules.v4
629 sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
630 }
631
632 function install_k8s_monitoring() {
633 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
634 # install OSM monitoring
635 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
636 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
637 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
638 }
639
640 function dump_vars(){
641 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
642 echo "APT_PROXY_URL=$APT_PROXY_URL"
643 echo "K8S_CLUSTER_ENGINE=$K8S_CLUSTER_ENGINE"
644 echo "DEVELOP=$DEVELOP"
645 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
646 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
647 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
648 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
649 echo "DOCKER_USER=$DOCKER_USER"
650 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
651 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
652 echo "INSTALL_JUJU=$INSTALL_JUJU"
653 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
654 echo "INSTALL_LXD=$INSTALL_LXD"
655 echo "INSTALL_NGSA=$INSTALL_NGSA"
656 echo "INSTALL_DOCKER=$INSTALL_DOCKER"
657 echo "INSTALL_ONLY=$INSTALL_ONLY"
658 echo "INSTALL_PLA=$INSTALL_PLA"
659 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
660 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
661 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
662 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
663 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
664 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
665 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
666 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
667 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
668 echo "OSM_DEVOPS=$OSM_DEVOPS"
669 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
670 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
671 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
672 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
673 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
674 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
675 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
676 echo "PULL_IMAGES=$PULL_IMAGES"
677 echo "RECONFIGURE=$RECONFIGURE"
678 echo "RELEASE=$RELEASE"
679 echo "REPOSITORY=$REPOSITORY"
680 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
681 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
682 echo "SHOWOPTS=$SHOWOPTS"
683 echo "TEST_INSTALLER=$TEST_INSTALLER"
684 echo "TO_REBUILD=$TO_REBUILD"
685 echo "UNINSTALL=$UNINSTALL"
686 echo "UPDATE=$UPDATE"
687 echo "Install from specific refspec (-b): $COMMIT_ID"
688 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
689 }
690
691 function parse_docker_registry_url() {
692 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
693 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
694 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
695 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
696 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
697 }
698
699 function ctrl_c() {
700 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
701 echo "** Trapped CTRL-C"
702 FATAL "User stopped the installation"
703 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
704 }
705
706 UNINSTALL=""
707 DEVELOP=""
708 UPDATE=""
709 RECONFIGURE=""
710 TEST_INSTALLER=""
711 INSTALL_LXD=""
712 SHOWOPTS=""
713 COMMIT_ID=""
714 ASSUME_YES=""
715 APT_PROXY_URL=""
716 K8S_CLUSTER_ENGINE="kubeadm"
717 INSTALL_FROM_SOURCE=""
718 DEBUG_INSTALL=""
719 RELEASE="ReleaseTEN"
720 REPOSITORY="stable"
721 INSTALL_K8S_MONITOR=""
722 INSTALL_NGSA="y"
723 INSTALL_PLA=""
724 INSTALL_VIMEMU=""
725 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
726 LXD_REPOSITORY_PATH=""
727 INSTALL_TO_OPENSTACK=""
728 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
729 OPENSTACK_PUBLIC_NET_NAME=""
730 OPENSTACK_ATTACH_VOLUME="false"
731 OPENSTACK_SSH_KEY_FILE=""
732 OPENSTACK_USERDATA_FILE=""
733 OPENSTACK_VM_NAME="server-osm"
734 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
735 INSTALL_ONLY=""
736 TO_REBUILD=""
737 INSTALL_DOCKER=""
738 INSTALL_JUJU=""
739 INSTALL_NOHOSTCLIENT=""
740 INSTALL_CACHELXDIMAGES=""
741 OSM_DEVOPS=
742 OSM_VCA_HOST=
743 OSM_VCA_SECRET=
744 OSM_VCA_PUBKEY=
745 OSM_VCA_CLOUDNAME="localhost"
746 OSM_VCA_K8S_CLOUDNAME="k8scloud"
747 OSM_NAMESPACE=osm
748 NO_HOST_PORTS=""
749 DOCKER_NOBUILD=""
750 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
751 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
752 OSM_WORK_DIR="/etc/osm"
753 OSM_HELM_WORK_DIR="${OSM_WORK_DIR}/helm"
754 OSM_HOST_VOL="/var/lib/osm"
755 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
756 OSM_DOCKER_TAG="testing-daily"
757 DOCKER_USER=opensourcemano
758 PULL_IMAGES="y"
759 KAFKA_TAG=2.11-1.0.2
760 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
761 PROMETHEUS_TAG=v2.28.1
762 GRAFANA_TAG=8.1.1
763 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
764 PROMETHEUS_CADVISOR_TAG=latest
765 KEYSTONEDB_TAG=10
766 OSM_DATABASE_COMMONKEY=
767 ELASTIC_VERSION=6.4.2
768 ELASTIC_CURATOR_VERSION=5.5.4
769 POD_NETWORK_CIDR=10.244.0.0/16
770 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
771 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
772 DOCKER_REGISTRY_URL=
773 DOCKER_PROXY_URL=
774 MODULE_DOCKER_TAG=
775 OSM_INSTALLATION_TYPE="Default"
776
777 while getopts ":a:b:c:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
778 case "${o}" in
779 a)
780 APT_PROXY_URL=${OPTARG}
781 ;;
782 b)
783 COMMIT_ID=${OPTARG}
784 PULL_IMAGES=""
785 ;;
786 c)
787 K8S_CLUSTER_ENGINE=${OPTARG}
788 [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ] && continue
789 [ "${K8S_CLUSTER_ENGINE}" == "k3s" ] && continue
790 [ "${K8S_CLUSTER_ENGINE}" == "microk8s" ] && continue
791 echo -e "Invalid argument for -c : ' ${K8S_CLUSTER_ENGINE}'\n" >&2
792 usage && exit 1
793 ;;
794 r)
795 REPOSITORY="${OPTARG}"
796 REPO_ARGS+=(-r "$REPOSITORY")
797 ;;
798 k)
799 REPOSITORY_KEY="${OPTARG}"
800 REPO_ARGS+=(-k "$REPOSITORY_KEY")
801 ;;
802 u)
803 REPOSITORY_BASE="${OPTARG}"
804 REPO_ARGS+=(-u "$REPOSITORY_BASE")
805 ;;
806 R)
807 RELEASE="${OPTARG}"
808 REPO_ARGS+=(-R "$RELEASE")
809 ;;
810 D)
811 OSM_DEVOPS="${OPTARG}"
812 ;;
813 o)
814 INSTALL_ONLY="y"
815 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
816 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
817 ;;
818 O)
819 INSTALL_TO_OPENSTACK="y"
820 if [ -n "${OPTARG}" ]; then
821 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
822 else
823 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
824 usage && exit 1
825 fi
826 ;;
827 f)
828 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
829 ;;
830 F)
831 OPENSTACK_USERDATA_FILE="${OPTARG}"
832 ;;
833 N)
834 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
835 ;;
836 m)
837 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
838 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
839 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
840 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
841 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
842 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
843 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
844 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
845 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
846 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
847 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
848 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
849 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
850 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
851 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
852 ;;
853 H)
854 OSM_VCA_HOST="${OPTARG}"
855 ;;
856 S)
857 OSM_VCA_SECRET="${OPTARG}"
858 ;;
859 s)
860 OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
861 ;;
862 t)
863 OSM_DOCKER_TAG="${OPTARG}"
864 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
865 ;;
866 U)
867 DOCKER_USER="${OPTARG}"
868 ;;
869 P)
870 OSM_VCA_PUBKEY=$(cat ${OPTARG})
871 ;;
872 A)
873 OSM_VCA_APIPROXY="${OPTARG}"
874 ;;
875 l)
876 LXD_CLOUD_FILE="${OPTARG}"
877 ;;
878 L)
879 LXD_CRED_FILE="${OPTARG}"
880 ;;
881 K)
882 CONTROLLER_NAME="${OPTARG}"
883 ;;
884 d)
885 DOCKER_REGISTRY_URL="${OPTARG}"
886 ;;
887 p)
888 DOCKER_PROXY_URL="${OPTARG}"
889 ;;
890 T)
891 MODULE_DOCKER_TAG="${OPTARG}"
892 ;;
893 -)
894 [ "${OPTARG}" == "help" ] && usage && exit 0
895 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
896 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
897 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
898 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
899 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
900 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
901 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
902 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
903 [ "${OPTARG}" == "lxd" ] && INSTALL_LXD="y" && continue
904 [ "${OPTARG}" == "nolxd" ] && INSTALL_LXD="" && continue
905 [ "${OPTARG}" == "docker" ] && INSTALL_DOCKER="y" && continue
906 [ "${OPTARG}" == "nodocker" ] && INSTALL_DOCKER="" && continue
907 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
908 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
909 [ "${OPTARG}" == "juju" ] && INSTALL_JUJU="y" && continue
910 [ "${OPTARG}" == "nojuju" ] && INSTALL_JUJU="" && continue
911 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
912 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
913 [ "${OPTARG}" == "pullimages" ] && continue
914 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
915 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
916 [ "${OPTARG}" == "bundle" ] && continue
917 [ "${OPTARG}" == "k8s" ] && continue
918 [ "${OPTARG}" == "lxd-cred" ] && continue
919 [ "${OPTARG}" == "microstack" ] && continue
920 [ "${OPTARG}" == "overlay" ] && continue
921 [ "${OPTARG}" == "only-vca" ] && continue
922 [ "${OPTARG}" == "small-profile" ] && continue
923 [ "${OPTARG}" == "vca" ] && continue
924 [ "${OPTARG}" == "ha" ] && continue
925 [ "${OPTARG}" == "tag" ] && continue
926 [ "${OPTARG}" == "registry" ] && continue
927 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
928 [ "${OPTARG}" == "old-sa" ] && INSTALL_NGSA="" && continue
929 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
930 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
931 [ "${OPTARG}" == "nocachelxdimages" ] && continue
932 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
933 echo -e "Invalid option: '--$OPTARG'\n" >&2
934 usage && exit 1
935 ;;
936 :)
937 echo "Option -$OPTARG requires an argument" >&2
938 usage && exit 1
939 ;;
940 \?)
941 echo -e "Invalid option: '-$OPTARG'\n" >&2
942 usage && exit 1
943 ;;
944 h)
945 usage && exit 0
946 ;;
947 y)
948 ASSUME_YES="y"
949 ;;
950 *)
951 usage && exit 1
952 ;;
953 esac
954 done
955
956 source $OSM_DEVOPS/common/all_funcs
957
958 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
959 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
960
961 # Uninstall if "--uninstall"
962 if [ -n "$UNINSTALL" ]; then
963 if [ -n "$CHARMED" ]; then
964 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
965 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
966 else
967 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
968 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
969 fi
970 echo -e "\nDONE"
971 exit 0
972 fi
973
974 # Installation starts here
975
976 # Get README and create OSM_TRACK_INSTALLATION_ID
977 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README.txt &> /dev/null
978 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
979
980 # Get OS info to be tracked
981 os_distro=$(lsb_release -i 2>/dev/null | awk '{print $3}')
982 echo $os_distro
983 os_release=$(lsb_release -r 2>/dev/null | awk '{print $2}')
984 echo $os_release
985 os_info="${os_distro}_${os_release}"
986 os_info="${os_info// /_}"
987
988 if [ -n "$CHARMED" ]; then
989 # Charmed installation
990 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
991 ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \
992 FATAL_TRACK charmed_install "charmed_install.sh failed"
993 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
994 echo -e "\nDONE"
995 exit 0
996 elif [ -n "$INSTALL_TO_OPENSTACK" ]; then
997 # Installation to Openstack
998 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
999 echo -e "\nDONE"
1000 exit 0
1001 else
1002 # Community_installer
1003 # Check incompatible options
1004 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1005 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1006 # Special cases go first
1007 # if develop, we force master
1008 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1009 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1010 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1011 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1012 # This is where installation starts
1013 install_osm
1014 echo -e "\nDONE"
1015 exit 0
1016 fi