Separate helm client installation from install_kubeadm_cluster
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 function usage(){
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM"
20 echo -e " OPTIONS"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " ..."
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -c <kubernetes engine>: use a specific kubernetes engine (options: kubeadm, k3s, microk8s), default is kubeadm"
34 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
35 echo -e " -H <VCA host> use specific juju host controller IP"
36 echo -e " -S <VCA secret> use VCA/juju secret key"
37 echo -e " -P <VCA pubkey> use VCA/juju public key file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " --old-sa: install old Service Assurance framework (MON, POL); do not install Airflow and Pushgateway"
41 echo -e " --ng-sa: install new Service Assurance framework (Airflow, AlertManager and Pushgateway) (enabled by default)"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --debug: debug mode"
58 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
59 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
60 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
61 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
62 echo -e " --nojuju: do not juju, assumes already installed"
63 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
64 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
65 echo -e " --nohostclient: do not install the osmclient"
66 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
67 echo -e " --source: install OSM from source code using the latest stable tag"
68 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
69 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
70 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
71 echo -e " --volume: create a VM volume when installing to OpenStack"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
78 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
79 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
80 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
81 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
82 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
83 echo -e " [--tag]: Docker image tag. (--charmed option)"
84 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
85 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
86 }
87
88 # takes a juju/accounts.yaml file and returns the password specific
89 # for a controller. I wrote this using only bash tools to minimize
90 # additions of other packages
91 function parse_juju_password {
92 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
93 password_file="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name=$1
95 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller=$controller_name '{
100 indent = length($1)/2;
101 vname[indent] = $2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
106 printf("%s",$3);
107 }
108 }
109 }'
110 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
111 }
112
113 function set_vca_variables() {
114 OSM_VCA_CLOUDNAME="lxd-cloud"
115 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME="localhost"
116 if [ -z "$OSM_VCA_HOST" ]; then
117 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
120 fi
121 if [ -z "$OSM_VCA_SECRET" ]; then
122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_NAMESPACE)
123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
124 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
125 fi
126 if [ -z "$OSM_VCA_PUBKEY" ]; then
127 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
129 fi
130 if [ -z "$OSM_VCA_CACERT" ]; then
131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
133 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
134 fi
135 }
136
137 function generate_secret() {
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
139 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
140 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
141 }
142
143 function check_packages() {
144 NEEDED_PACKAGES="$1"
145 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
146 for PACKAGE in ${NEEDED_PACKAGES} ; do
147 dpkg -L ${PACKAGE}
148 if [ $? -ne 0 ]; then
149 echo -e "Package ${PACKAGE} is not installed."
150 echo -e "Updating apt-cache ..."
151 sudo apt-get update
152 echo -e "Installing ${PACKAGE} ..."
153 sudo apt-get install -y ${PACKAGE} || FATAL "failed to install ${PACKAGE}"
154 fi
155 done
156 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
157 }
158
159 function ask_user(){
160 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
161 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
162 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
163 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
164 read -e -p "$1" USER_CONFIRMATION
165 while true ; do
166 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
167 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
168 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
169 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
170 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
171 done
172 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
173 }
174
175 function install_osmclient(){
176 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
177 CLIENT_RELEASE=${RELEASE#"-R "}
178 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
179 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
180 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
181 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
182 curl $key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add -
183 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
184 sudo apt-get -y update
185 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-pip
186 sudo -H LC_ALL=C python3 -m pip install -U pip
187 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-osm-im python3-osmclient
188 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
189 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
190 fi
191 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
192 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libmagic1
193 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
194 fi
195 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
196 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
197 echo -e "\nOSM client installed"
198 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
199 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
200 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
201 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
202 else
203 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
204 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
205 echo " export OSM_HOSTNAME=<OSM_host>"
206 fi
207 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
208 return 0
209 }
210
211 function docker_login() {
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
213 echo "Docker login"
214 [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}"
215 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
216 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
217 }
218
219 function generate_docker_images() {
220 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
221 echo "Generating docker images"
222 _build_from=$COMMIT_ID
223 [ -z "$_build_from" ] && _build_from="latest"
224 echo "OSM Docker images generated from $_build_from"
225 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
226 trap 'rm -rf "${LWTEMPDIR}"' EXIT
227 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
228 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
229 module_lower=${module,,}
230 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
231 continue
232 fi
233 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
234 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
235 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
236 fi
237 done
238 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
239 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
240 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
241 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
242 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
243 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
244 fi
245 echo "Finished generation of docker images"
246 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
247 }
248
249 function cmp_overwrite() {
250 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
251 file1="$1"
252 file2="$2"
253 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
254 if [ -f "${file2}" ]; then
255 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
256 else
257 cp -b ${file1} ${file2}
258 fi
259 fi
260 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
261 }
262
263 #deploys osm pods and services
264 function deploy_osm_services() {
265 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
266 # helm is already installed as part of install_kubeadm_cluster.sh
267
268 # Generate helm values to be passed with -f osm-values.yaml
269 sudo mkdir -p ${OSM_HELM_WORK_DIR}
270 if [ -n "${INSTALL_JUJU}" ]; then
271 sudo bash -c "cat << EOF > ${OSM_HELM_WORK_DIR}/osm-values.yaml
272 vca:
273 pubkey: \"${OSM_VCA_PUBKEY}\"
274 EOF"
275 fi
276
277 # Generate helm values to be passed with --set
278 OSM_HELM_OPTS=""
279 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set nbi.useOsmSecret=false"
280 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.repositoryBase=${DOCKER_REGISTRY_URL}${DOCKER_USER}"
281 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set mysql.dbHostPath=${OSM_NAMESPACE_VOL}"
282 if [ -n "${INSTALL_JUJU}" ]; then
283 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.enabled=true"
284 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.host=${OSM_VCA_HOST}"
285 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.secret=${OSM_VCA_SECRET}"
286 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.cacert=${OSM_VCA_CACERT}"
287 fi
288 [ -n "$OSM_VCA_APIPROXY" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set lcm.config.OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}"
289 [ ! "$OSM_DOCKER_TAG" == "testing-daily" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set-string global.image.tag=${OSM_DOCKER_TAG}"
290 [ -n "${INSTALL_NGSA}" ] || OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.oldServiceAssurance=true"
291 if [ -n "${OSM_BEHIND_PROXY}" ]; then
292 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.behindHttpProxy=true"
293 [ -n "${HTTP_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTP_PROXY=\"${HTTP_PROXY}\""
294 [ -n "${HTTPS_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTPS_PROXY=\"${HTTPS_PROXY}\""
295 if [ -n "${NO_PROXY}" ]; then
296 if [[ ! "${NO_PROXY}" =~ .*".svc".* ]]; then
297 NO_PROXY="${NO_PROXY},.svc"
298 fi
299 if [[ ! "${NO_PROXY}" =~ .*".cluster.local".* ]]; then
300 NO_PROXY="${NO_PROXY},.cluster.local"
301 fi
302 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.NO_PROXY=\"${NO_PROXY//,/\,}\""
303 fi
304 fi
305
306 if [ -n "${INSTALL_JUJU}" ]; then
307 OSM_HELM_OPTS="-f ${OSM_HELM_WORK_DIR}/osm-values.yaml ${OSM_HELM_OPTS}"
308 fi
309 echo "helm install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}"
310 helm install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}
311 # Override existing values.yaml with the final values.yaml used to install OSM
312 helm -n $OSM_NAMESPACE get values $OSM_NAMESPACE | sudo tee -a ${OSM_HELM_WORK_DIR}/osm-values.yaml
313 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
314 }
315
316 #deploy charmed services
317 function deploy_charmed_services() {
318 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
319 juju add-model $OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
320 juju deploy ch:mongodb-k8s -m $OSM_NAMESPACE --channel latest/stable
321 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
322 }
323
324 #deploy mongodb
325 function deploy_mongodb() {
326 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
327 MONGO_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
328 [ -n "${DOCKER_REGISTRY_URL}" ] && MONGO_OPTS="${MONGO_OPTS} -r ${DOCKER_REGISTRY_URL}"
329 $OSM_DEVOPS/installers/install_mongodb.sh ${MONGO_OPTS} || \
330 FATAL_TRACK install_osm_mongodb_service "install_mongodb.sh failed"
331 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
332 }
333
334 function install_osm_ngsa_service() {
335 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
336 NGSA_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
337 [ -n "${DOCKER_REGISTRY_URL}" ] && NGSA_OPTS="${NGSA_OPTS} -r ${DOCKER_REGISTRY_URL}"
338 $OSM_DEVOPS/installers/install_ngsa.sh ${NGSA_OPTS} || \
339 FATAL_TRACK install_osm_ngsa_service "install_ngsa.sh failed"
340 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
341 }
342
343 function add_local_k8scluster() {
344 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
345 /usr/bin/osm --all-projects vim-create \
346 --name _system-osm-vim \
347 --account_type dummy \
348 --auth_url http://dummy \
349 --user osm --password osm --tenant osm \
350 --description "dummy" \
351 --config '{management_network_name: mgmt}'
352 /usr/bin/osm --all-projects k8scluster-add \
353 --creds ${HOME}/.kube/config \
354 --vim _system-osm-vim \
355 --k8s-nets '{"net1": null}' \
356 --version '1.15' \
357 --description "OSM Internal Cluster" \
358 _system-osm-k8s
359 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
360 }
361
362 function configure_apt_proxy() {
363 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
364 OSM_APT_PROXY=$1
365 OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt"
366 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
367 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
368 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
369 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
370 EOF"
371 else
372 sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
373 fi
374 sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
375 track prereq apt_proxy_configured_ok
376 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
377 }
378
379 function ask_proceed() {
380 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
381
382 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
383 1. Install and configure LXD
384 2. Install juju
385 3. Install docker CE
386 4. Disable swap space
387 5. Install and initialize Kubernetes
388 as pre-requirements.
389 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
390
391 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
392 }
393
394 function check_osm_behind_proxy() {
395 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
396
397 export OSM_BEHIND_PROXY=""
398 export OSM_PROXY_ENV_VARIABLES=""
399 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
400 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
401 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
402 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
403 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
404 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
405
406 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
407 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
408
409 if [ -n "${OSM_BEHIND_PROXY}" ]; then
410 [ -z "$ASSUME_YES" ] && ! ask_user "
411 The following env variables have been found for the current user:
412 ${OSM_PROXY_ENV_VARIABLES}.
413
414 This suggests that this machine is behind a proxy and a special configuration is required.
415 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
416 env variables.
417
418 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
419 Depending on the program, the env variables to work behind a proxy might be different
420 (e.g. http_proxy vs HTTP_PROXY).
421
422 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
423 and HTTPS_PROXY are defined.
424
425 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
426 those env variables are also set for root user. If you are not sure whether those variables
427 are configured for the root user, you can stop the installation now.
428
429 Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1
430 else
431 echo "This machine is not behind a proxy"
432 fi
433
434 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
435 }
436
437 function find_devops_folder() {
438 if [ -z "$OSM_DEVOPS" ]; then
439 if [ -n "$TEST_INSTALLER" ]; then
440 echo -e "\nUsing local devops repo for OSM installation"
441 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
442 else
443 echo -e "\nCreating temporary dir for OSM installation"
444 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
445 trap 'rm -rf "$OSM_DEVOPS"' EXIT
446 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
447 fi
448 fi
449 }
450
451 function install_osm() {
452 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
453
454 trap ctrl_c INT
455
456
457 check_osm_behind_proxy
458 check_packages "git wget curl tar snapd"
459 if [ -n "${INSTALL_JUJU}" ]; then
460 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
461 fi
462 find_devops_folder
463
464 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none os_info $os_info none none
465
466 track checks checkingroot_ok
467 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
468 track checks noroot_ok
469 ask_proceed
470 track checks proceed_ok
471
472 echo "Installing OSM"
473
474 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
475
476 echo "Determining IP address of the interface with the default route"
477 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
478 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
479 [ -z "$OSM_DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
480 OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
481 [ -z "$OSM_DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
482
483 # configure apt proxy
484 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
485
486 # if lxd is requested, we will install it
487 if [ -n "$INSTALL_LXD" ]; then
488 LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
489 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P"
490 $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed"
491 fi
492
493 track prereq prereqok_ok
494
495 if [ -n "$INSTALL_DOCKER" ] || [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
496 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
497 echo "Kubeadm requires docker, so docker will be installed."
498 fi
499 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
500 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
501 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -P"
502 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
503 fi
504 track docker_ce docker_ce_ok
505
506 $OSM_DEVOPS/installers/install_helm_client.sh -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
507 FATAL_TRACK k8scluster "install_helm_client.sh failed"
508 track helm_client install_helm_client_ok
509
510 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
511 KUBEADM_INSTALL_OPTS="-i ${OSM_DEFAULT_IP} -d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
512 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh ${KUBEADM_INSTALL_OPTS} || \
513 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
514 fi
515 track k8scluster k8scluster_ok
516
517 if [ -n "${INSTALL_JUJU}" ]; then
518 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_CACHELXDIMAGES}"
519 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
520 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
521 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
522 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
523 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P"
524 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
525 set_vca_variables
526 fi
527 track juju juju_ok
528
529 # Deploy OSM services
530 [ -z "$DOCKER_NOBUILD" ] && [ -z "$PULL_IMAGES" ] && generate_docker_images
531 track docker_images docker_images_ok
532
533 deploy_mongodb
534 track deploy_osm deploy_mongodb_ok
535 deploy_osm_services
536 track deploy_osm deploy_osm_services_k8s_ok
537 if [ -n "$INSTALL_K8S_MONITOR" ]; then
538 # install OSM MONITORING
539 install_k8s_monitoring
540 track deploy_osm install_k8s_monitoring_ok
541 fi
542 if [ -n "$INSTALL_NGSA" ]; then
543 # optional NGSA install
544 install_osm_ngsa_service
545 track deploy_osm install_osm_ngsa_ok
546 fi
547
548 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
549 track osmclient osmclient_ok
550
551 echo -e "Checking OSM health state..."
552 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_NAMESPACE} -k || \
553 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
554 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
555 track healthchecks osm_unhealthy didnotconverge)
556 track healthchecks after_healthcheck_ok
557
558 add_local_k8scluster
559 track final_ops add_local_k8scluster_ok
560
561 # if lxd is requested, iptables firewall is updated to work with both docker and LXD
562 if [ -n "$INSTALL_LXD" ]; then
563 arrange_docker_default_network_policy
564 fi
565
566 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
567 track end
568 sudo find /etc/osm
569 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
570 return 0
571 }
572
573 function install_to_openstack() {
574 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
575
576 if [ -z "$2" ]; then
577 FATAL "OpenStack installer requires a valid external network name"
578 fi
579
580 # Install Pip for Python3
581 sudo apt install -y python3-pip python3-venv
582 sudo -H LC_ALL=C python3 -m pip install -U pip
583
584 # Create a venv to avoid conflicts with the host installation
585 python3 -m venv $OPENSTACK_PYTHON_VENV
586
587 source $OPENSTACK_PYTHON_VENV/bin/activate
588
589 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
590 python -m pip install -U wheel
591 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
592
593 # Install the Openstack cloud module (ansible>=2.10)
594 ansible-galaxy collection install openstack.cloud
595
596 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
597
598 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
599
600 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
601
602 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
603 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
604 fi
605
606 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
607 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
608 fi
609
610 # Execute the Ansible playbook based on openrc or clouds.yaml
611 if [ -e "$1" ]; then
612 . $1
613 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
614 $OSM_DEVOPS/installers/openstack/site.yml
615 else
616 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
617 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
618 fi
619
620 # Exit from venv
621 deactivate
622
623 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
624 return 0
625 }
626
627 function arrange_docker_default_network_policy() {
628 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
629 sudo iptables -I DOCKER-USER -j ACCEPT
630 sudo iptables-save | sudo tee /etc/iptables/rules.v4
631 sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
632 }
633
634 function install_k8s_monitoring() {
635 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
636 # install OSM monitoring
637 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
638 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
639 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
640 }
641
642 function dump_vars(){
643 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
644 echo "APT_PROXY_URL=$APT_PROXY_URL"
645 echo "K8S_CLUSTER_ENGINE=$K8S_CLUSTER_ENGINE"
646 echo "DEVELOP=$DEVELOP"
647 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
648 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
649 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
650 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
651 echo "DOCKER_USER=$DOCKER_USER"
652 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
653 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
654 echo "INSTALL_JUJU=$INSTALL_JUJU"
655 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
656 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
657 echo "INSTALL_LXD=$INSTALL_LXD"
658 echo "INSTALL_NGSA=$INSTALL_NGSA"
659 echo "INSTALL_DOCKER=$INSTALL_DOCKER"
660 echo "INSTALL_ONLY=$INSTALL_ONLY"
661 echo "INSTALL_PLA=$INSTALL_PLA"
662 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
663 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
664 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
665 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
666 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
667 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
668 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
669 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
670 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
671 echo "OSM_DEVOPS=$OSM_DEVOPS"
672 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
673 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
674 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
675 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
676 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
677 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
678 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
679 echo "PULL_IMAGES=$PULL_IMAGES"
680 echo "RECONFIGURE=$RECONFIGURE"
681 echo "RELEASE=$RELEASE"
682 echo "REPOSITORY=$REPOSITORY"
683 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
684 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
685 echo "SHOWOPTS=$SHOWOPTS"
686 echo "TEST_INSTALLER=$TEST_INSTALLER"
687 echo "TO_REBUILD=$TO_REBUILD"
688 echo "UNINSTALL=$UNINSTALL"
689 echo "UPDATE=$UPDATE"
690 echo "Install from specific refspec (-b): $COMMIT_ID"
691 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
692 }
693
694 function parse_docker_registry_url() {
695 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
696 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
697 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
698 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
699 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
700 }
701
702 function ctrl_c() {
703 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
704 echo "** Trapped CTRL-C"
705 FATAL "User stopped the installation"
706 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
707 }
708
709 UNINSTALL=""
710 DEVELOP=""
711 UPDATE=""
712 RECONFIGURE=""
713 TEST_INSTALLER=""
714 INSTALL_LXD=""
715 SHOWOPTS=""
716 COMMIT_ID=""
717 ASSUME_YES=""
718 APT_PROXY_URL=""
719 K8S_CLUSTER_ENGINE="kubeadm"
720 INSTALL_FROM_SOURCE=""
721 DEBUG_INSTALL=""
722 RELEASE="ReleaseTEN"
723 REPOSITORY="stable"
724 INSTALL_K8S_MONITOR=""
725 INSTALL_NGSA="y"
726 INSTALL_PLA=""
727 INSTALL_VIMEMU=""
728 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
729 LXD_REPOSITORY_PATH=""
730 INSTALL_LIGHTWEIGHT="y"
731 INSTALL_TO_OPENSTACK=""
732 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
733 OPENSTACK_PUBLIC_NET_NAME=""
734 OPENSTACK_ATTACH_VOLUME="false"
735 OPENSTACK_SSH_KEY_FILE=""
736 OPENSTACK_USERDATA_FILE=""
737 OPENSTACK_VM_NAME="server-osm"
738 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
739 INSTALL_ONLY=""
740 TO_REBUILD=""
741 INSTALL_DOCKER=""
742 INSTALL_JUJU=""
743 INSTALL_NOHOSTCLIENT=""
744 INSTALL_CACHELXDIMAGES=""
745 OSM_DEVOPS=
746 OSM_VCA_HOST=
747 OSM_VCA_SECRET=
748 OSM_VCA_PUBKEY=
749 OSM_VCA_CLOUDNAME="localhost"
750 OSM_VCA_K8S_CLOUDNAME="k8scloud"
751 OSM_NAMESPACE=osm
752 NO_HOST_PORTS=""
753 DOCKER_NOBUILD=""
754 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
755 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
756 OSM_WORK_DIR="/etc/osm"
757 OSM_HELM_WORK_DIR="${OSM_WORK_DIR}/helm"
758 OSM_HOST_VOL="/var/lib/osm"
759 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
760 OSM_DOCKER_TAG="testing-daily"
761 DOCKER_USER=opensourcemano
762 PULL_IMAGES="y"
763 KAFKA_TAG=2.11-1.0.2
764 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
765 PROMETHEUS_TAG=v2.28.1
766 GRAFANA_TAG=8.1.1
767 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
768 PROMETHEUS_CADVISOR_TAG=latest
769 KEYSTONEDB_TAG=10
770 OSM_DATABASE_COMMONKEY=
771 ELASTIC_VERSION=6.4.2
772 ELASTIC_CURATOR_VERSION=5.5.4
773 POD_NETWORK_CIDR=10.244.0.0/16
774 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
775 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
776 DOCKER_REGISTRY_URL=
777 DOCKER_PROXY_URL=
778 MODULE_DOCKER_TAG=
779 OSM_INSTALLATION_TYPE="Default"
780
781 while getopts ":a:b:c:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
782 case "${o}" in
783 a)
784 APT_PROXY_URL=${OPTARG}
785 ;;
786 b)
787 COMMIT_ID=${OPTARG}
788 PULL_IMAGES=""
789 ;;
790 c)
791 K8S_CLUSTER_ENGINE=${OPTARG}
792 [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ] && continue
793 [ "${K8S_CLUSTER_ENGINE}" == "k3s" ] && continue
794 [ "${K8S_CLUSTER_ENGINE}" == "microk8s" ] && continue
795 echo -e "Invalid argument for -c : ' ${K8S_CLUSTER_ENGINE}'\n" >&2
796 usage && exit 1
797 ;;
798 r)
799 REPOSITORY="${OPTARG}"
800 REPO_ARGS+=(-r "$REPOSITORY")
801 ;;
802 k)
803 REPOSITORY_KEY="${OPTARG}"
804 REPO_ARGS+=(-k "$REPOSITORY_KEY")
805 ;;
806 u)
807 REPOSITORY_BASE="${OPTARG}"
808 REPO_ARGS+=(-u "$REPOSITORY_BASE")
809 ;;
810 R)
811 RELEASE="${OPTARG}"
812 REPO_ARGS+=(-R "$RELEASE")
813 ;;
814 D)
815 OSM_DEVOPS="${OPTARG}"
816 ;;
817 o)
818 INSTALL_ONLY="y"
819 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
820 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
821 ;;
822 O)
823 INSTALL_TO_OPENSTACK="y"
824 if [ -n "${OPTARG}" ]; then
825 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
826 else
827 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
828 usage && exit 1
829 fi
830 ;;
831 f)
832 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
833 ;;
834 F)
835 OPENSTACK_USERDATA_FILE="${OPTARG}"
836 ;;
837 N)
838 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
839 ;;
840 m)
841 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
842 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
843 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
844 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
845 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
846 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
847 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
848 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
849 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
850 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
851 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
852 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
853 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
854 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
855 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
856 ;;
857 H)
858 OSM_VCA_HOST="${OPTARG}"
859 ;;
860 S)
861 OSM_VCA_SECRET="${OPTARG}"
862 ;;
863 s)
864 OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
865 ;;
866 t)
867 OSM_DOCKER_TAG="${OPTARG}"
868 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
869 ;;
870 U)
871 DOCKER_USER="${OPTARG}"
872 ;;
873 P)
874 OSM_VCA_PUBKEY=$(cat ${OPTARG})
875 ;;
876 A)
877 OSM_VCA_APIPROXY="${OPTARG}"
878 ;;
879 l)
880 LXD_CLOUD_FILE="${OPTARG}"
881 ;;
882 L)
883 LXD_CRED_FILE="${OPTARG}"
884 ;;
885 K)
886 CONTROLLER_NAME="${OPTARG}"
887 ;;
888 d)
889 DOCKER_REGISTRY_URL="${OPTARG}"
890 ;;
891 p)
892 DOCKER_PROXY_URL="${OPTARG}"
893 ;;
894 T)
895 MODULE_DOCKER_TAG="${OPTARG}"
896 ;;
897 -)
898 [ "${OPTARG}" == "help" ] && usage && exit 0
899 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
900 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
901 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
902 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
903 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
904 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
905 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
906 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
907 [ "${OPTARG}" == "lxd" ] && INSTALL_LXD="y" && continue
908 [ "${OPTARG}" == "nolxd" ] && INSTALL_LXD="" && continue
909 [ "${OPTARG}" == "docker" ] && INSTALL_DOCKER="y" && continue
910 [ "${OPTARG}" == "nodocker" ] && INSTALL_DOCKER="" && continue
911 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
912 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
913 [ "${OPTARG}" == "juju" ] && INSTALL_JUJU="y" && continue
914 [ "${OPTARG}" == "nojuju" ] && INSTALL_JUJU="" && continue
915 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
916 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
917 [ "${OPTARG}" == "pullimages" ] && continue
918 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
919 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
920 [ "${OPTARG}" == "bundle" ] && continue
921 [ "${OPTARG}" == "k8s" ] && continue
922 [ "${OPTARG}" == "lxd-cred" ] && continue
923 [ "${OPTARG}" == "microstack" ] && continue
924 [ "${OPTARG}" == "overlay" ] && continue
925 [ "${OPTARG}" == "only-vca" ] && continue
926 [ "${OPTARG}" == "small-profile" ] && continue
927 [ "${OPTARG}" == "vca" ] && continue
928 [ "${OPTARG}" == "ha" ] && continue
929 [ "${OPTARG}" == "tag" ] && continue
930 [ "${OPTARG}" == "registry" ] && continue
931 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
932 [ "${OPTARG}" == "old-sa" ] && INSTALL_NGSA="" && continue
933 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
934 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
935 [ "${OPTARG}" == "nocachelxdimages" ] && continue
936 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
937 echo -e "Invalid option: '--$OPTARG'\n" >&2
938 usage && exit 1
939 ;;
940 :)
941 echo "Option -$OPTARG requires an argument" >&2
942 usage && exit 1
943 ;;
944 \?)
945 echo -e "Invalid option: '-$OPTARG'\n" >&2
946 usage && exit 1
947 ;;
948 h)
949 usage && exit 0
950 ;;
951 y)
952 ASSUME_YES="y"
953 ;;
954 *)
955 usage && exit 1
956 ;;
957 esac
958 done
959
960 source $OSM_DEVOPS/common/all_funcs
961
962 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
963 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
964
965 # Uninstall if "--uninstall"
966 if [ -n "$UNINSTALL" ]; then
967 if [ -n "$CHARMED" ]; then
968 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
969 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
970 else
971 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
972 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
973 fi
974 echo -e "\nDONE"
975 exit 0
976 fi
977
978 # Installation starts here
979
980 # Get README and create OSM_TRACK_INSTALLATION_ID
981 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README.txt &> /dev/null
982 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
983
984 # Get OS info to be tracked
985 os_distro=$(lsb_release -i 2>/dev/null | awk '{print $3}')
986 echo $os_distro
987 os_release=$(lsb_release -r 2>/dev/null | awk '{print $2}')
988 echo $os_release
989 os_info="${os_distro}_${os_release}"
990 os_info="${os_info// /_}"
991
992 if [ -n "$CHARMED" ]; then
993 # Charmed installation
994 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
995 ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \
996 FATAL_TRACK charmed_install "charmed_install.sh failed"
997 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
998 echo -e "\nDONE"
999 exit 0
1000 elif [ -n "$INSTALL_TO_OPENSTACK" ]; then
1001 # Installation to Openstack
1002 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1003 echo -e "\nDONE"
1004 exit 0
1005 else
1006 # Community_installer
1007 # Check incompatible options
1008 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1009 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1010 # Special cases go first
1011 # if develop, we force master
1012 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1013 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1014 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1015 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1016 # This is where installation starts
1017 install_osm
1018 echo -e "\nDONE"
1019 exit 0
1020 fi