New option -c in full_install_osm to allow different K8s cluster engines
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 function usage(){
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM"
20 echo -e " OPTIONS"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " ..."
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -c <kubernetes engine>: use a specific kubernetes engine (options: kubeadm, k3s, microk8s), default is kubeadm"
34 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
35 echo -e " -H <VCA host> use specific juju host controller IP"
36 echo -e " -S <VCA secret> use VCA/juju secret key"
37 echo -e " -P <VCA pubkey> use VCA/juju public key file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " --old-sa: install old Service Assurance framework (MON, POL); do not install Airflow and Pushgateway"
41 echo -e " --ng-sa: install new Service Assurance framework (Airflow, AlertManager and Pushgateway) (enabled by default)"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --debug: debug mode"
58 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
59 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
60 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
61 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
62 echo -e " --nojuju: do not juju, assumes already installed"
63 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
64 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
65 echo -e " --nohostclient: do not install the osmclient"
66 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
67 echo -e " --source: install OSM from source code using the latest stable tag"
68 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
69 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
70 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
71 echo -e " --volume: create a VM volume when installing to OpenStack"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
78 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
79 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
80 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
81 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
82 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
83 echo -e " [--tag]: Docker image tag. (--charmed option)"
84 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
85 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
86 }
87
88 # takes a juju/accounts.yaml file and returns the password specific
89 # for a controller. I wrote this using only bash tools to minimize
90 # additions of other packages
91 function parse_juju_password {
92 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
93 password_file="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name=$1
95 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller=$controller_name '{
100 indent = length($1)/2;
101 vname[indent] = $2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
106 printf("%s",$3);
107 }
108 }
109 }'
110 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
111 }
112
113 function set_vca_variables() {
114 OSM_VCA_CLOUDNAME="lxd-cloud"
115 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME="localhost"
116 if [ -z "$OSM_VCA_HOST" ]; then
117 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
120 fi
121 if [ -z "$OSM_VCA_SECRET" ]; then
122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_NAMESPACE)
123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
124 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
125 fi
126 if [ -z "$OSM_VCA_PUBKEY" ]; then
127 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
129 fi
130 if [ -z "$OSM_VCA_CACERT" ]; then
131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
133 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
134 fi
135 }
136
137 function generate_secret() {
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
139 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
140 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
141 }
142
143 function check_packages() {
144 NEEDED_PACKAGES="$1"
145 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
146 for PACKAGE in ${NEEDED_PACKAGES} ; do
147 dpkg -L ${PACKAGE}
148 if [ $? -ne 0 ]; then
149 echo -e "Package ${PACKAGE} is not installed."
150 echo -e "Updating apt-cache ..."
151 sudo apt-get update
152 echo -e "Installing ${PACKAGE} ..."
153 sudo apt-get install -y ${PACKAGE} || FATAL "failed to install ${PACKAGE}"
154 fi
155 done
156 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
157 }
158
159 function ask_user(){
160 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
161 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
162 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
163 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
164 read -e -p "$1" USER_CONFIRMATION
165 while true ; do
166 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
167 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
168 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
169 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
170 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
171 done
172 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
173 }
174
175 function install_osmclient(){
176 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
177 CLIENT_RELEASE=${RELEASE#"-R "}
178 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
179 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
180 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
181 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
182 curl $key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add -
183 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
184 sudo apt-get -y update
185 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-pip
186 sudo -H LC_ALL=C python3 -m pip install -U pip
187 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-osm-im python3-osmclient
188 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
189 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
190 fi
191 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
192 sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libmagic1
193 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
194 fi
195 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
196 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
197 echo -e "\nOSM client installed"
198 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
199 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
200 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
201 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
202 else
203 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
204 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
205 echo " export OSM_HOSTNAME=<OSM_host>"
206 fi
207 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
208 return 0
209 }
210
211 function docker_login() {
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
213 echo "Docker login"
214 [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}"
215 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
216 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
217 }
218
219 function generate_docker_images() {
220 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
221 echo "Generating docker images"
222 _build_from=$COMMIT_ID
223 [ -z "$_build_from" ] && _build_from="latest"
224 echo "OSM Docker images generated from $_build_from"
225 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
226 trap 'rm -rf "${LWTEMPDIR}"' EXIT
227 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
228 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
229 module_lower=${module,,}
230 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
231 continue
232 fi
233 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
234 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
235 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
236 fi
237 done
238 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
239 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
240 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
241 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
242 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
243 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
244 fi
245 echo "Finished generation of docker images"
246 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
247 }
248
249 function cmp_overwrite() {
250 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
251 file1="$1"
252 file2="$2"
253 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
254 if [ -f "${file2}" ]; then
255 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
256 else
257 cp -b ${file1} ${file2}
258 fi
259 fi
260 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
261 }
262
263 #deploys osm pods and services
264 function deploy_osm_services() {
265 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
266 # helm is already installed as part of install_kubeadm_cluster.sh
267
268 # Generate helm values to be passed with -f osm-values.yaml
269 sudo mkdir -p ${OSM_HELM_WORK_DIR}
270 if [ -n "${INSTALL_JUJU}" ]; then
271 sudo bash -c "cat << EOF > ${OSM_HELM_WORK_DIR}/osm-values.yaml
272 vca:
273 pubkey: \"${OSM_VCA_PUBKEY}\"
274 EOF"
275 fi
276
277 # Generate helm values to be passed with --set
278 OSM_HELM_OPTS=""
279 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set nbi.useOsmSecret=false"
280 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.repositoryBase=${DOCKER_REGISTRY_URL}${DOCKER_USER}"
281 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set mysql.dbHostPath=${OSM_NAMESPACE_VOL}"
282 if [ -n "${INSTALL_JUJU}" ]; then
283 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.enabled=true"
284 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.host=${OSM_VCA_HOST}"
285 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.secret=${OSM_VCA_SECRET}"
286 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.cacert=${OSM_VCA_CACERT}"
287 fi
288 [ -n "$OSM_VCA_APIPROXY" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set lcm.config.OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}"
289 [ ! "$OSM_DOCKER_TAG" == "testing-daily" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.tag=${OSM_DOCKER_TAG}"
290 [ -n "${INSTALL_NGSA}" ] || OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.oldServiceAssurance=true"
291 if [ -n "${OSM_BEHIND_PROXY}" ]; then
292 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.behindHttpProxy=true"
293 [ -n "${HTTP_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTP_PROXY=${HTTP_PROXY}"
294 [ -n "${HTTPS_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTPS_PROXY=${HTTPS_PROXY}"
295 [ -n "${NO_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.NO_PROXY=${NO_PROXY}"
296 fi
297
298 if [ -n "${INSTALL_JUJU}" ]; then
299 OSM_HELM_OPTS="-f ${OSM_HELM_WORK_DIR}/osm-values.yaml ${OSM_HELM_OPTS}"
300 fi
301 echo "helm install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}"
302 helm install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}
303 # Override existing values.yaml with the final values.yaml used to install OSM
304 helm -n $OSM_NAMESPACE get values $OSM_NAMESPACE | sudo tee -a ${OSM_HELM_WORK_DIR}/osm-values.yaml
305 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
306 }
307
308 #deploy charmed services
309 function deploy_charmed_services() {
310 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
311 juju add-model $OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
312 juju deploy ch:mongodb-k8s -m $OSM_NAMESPACE --channel latest/stable
313 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
314 }
315
316 #deploy mongodb
317 function deploy_mongodb() {
318 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
319 MONGO_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
320 [ -n "${DOCKER_REGISTRY_URL}" ] && MONGO_OPTS="${MONGO_OPTS} -r ${DOCKER_REGISTRY_URL}"
321 $OSM_DEVOPS/installers/install_mongodb.sh ${MONGO_OPTS} || \
322 FATAL_TRACK install_osm_mongodb_service "install_mongodb.sh failed"
323 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
324 }
325
326 function install_osm_ngsa_service() {
327 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
328 NGSA_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
329 [ -n "${DOCKER_REGISTRY_URL}" ] && NGSA_OPTS="${NGSA_OPTS} -r ${DOCKER_REGISTRY_URL}"
330 $OSM_DEVOPS/installers/install_ngsa.sh ${NGSA_OPTS} || \
331 FATAL_TRACK install_osm_ngsa_service "install_ngsa.sh failed"
332 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
333 }
334
335 function add_local_k8scluster() {
336 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
337 /usr/bin/osm --all-projects vim-create \
338 --name _system-osm-vim \
339 --account_type dummy \
340 --auth_url http://dummy \
341 --user osm --password osm --tenant osm \
342 --description "dummy" \
343 --config '{management_network_name: mgmt}'
344 /usr/bin/osm --all-projects k8scluster-add \
345 --creds ${HOME}/.kube/config \
346 --vim _system-osm-vim \
347 --k8s-nets '{"net1": null}' \
348 --version '1.15' \
349 --description "OSM Internal Cluster" \
350 _system-osm-k8s
351 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
352 }
353
354 function configure_apt_proxy() {
355 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
356 OSM_APT_PROXY=$1
357 OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt"
358 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
359 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
360 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
361 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
362 EOF"
363 else
364 sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
365 fi
366 sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
367 track prereq apt_proxy_configured_ok
368 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
369 }
370
371 function ask_proceed() {
372 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
373
374 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
375 1. Install and configure LXD
376 2. Install juju
377 3. Install docker CE
378 4. Disable swap space
379 5. Install and initialize Kubernetes
380 as pre-requirements.
381 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
382
383 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
384 }
385
386 function check_osm_behind_proxy() {
387 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
388
389 export OSM_BEHIND_PROXY=""
390 export OSM_PROXY_ENV_VARIABLES=""
391 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
392 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
393 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
394 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
395 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
396 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
397
398 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
399 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
400
401 if [ -n "${OSM_BEHIND_PROXY}" ]; then
402 [ -z "$ASSUME_YES" ] && ! ask_user "
403 The following env variables have been found for the current user:
404 ${OSM_PROXY_ENV_VARIABLES}.
405
406 This suggests that this machine is behind a proxy and a special configuration is required.
407 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
408 env variables.
409
410 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
411 Depending on the program, the env variables to work behind a proxy might be different
412 (e.g. http_proxy vs HTTP_PROXY).
413
414 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
415 and HTTPS_PROXY are defined.
416
417 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
418 those env variables are also set for root user. If you are not sure whether those variables
419 are configured for the root user, you can stop the installation now.
420
421 Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1
422 else
423 echo "This machine is not behind a proxy"
424 fi
425
426 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
427 }
428
429 function find_devops_folder() {
430 if [ -z "$OSM_DEVOPS" ]; then
431 if [ -n "$TEST_INSTALLER" ]; then
432 echo -e "\nUsing local devops repo for OSM installation"
433 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
434 else
435 echo -e "\nCreating temporary dir for OSM installation"
436 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
437 trap 'rm -rf "$OSM_DEVOPS"' EXIT
438 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
439 fi
440 fi
441 }
442
443 function install_osm() {
444 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
445
446 trap ctrl_c INT
447
448
449 check_osm_behind_proxy
450 check_packages "git wget curl tar snapd"
451 if [ -n "${INSTALL_JUJU}" ]; then
452 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
453 fi
454 find_devops_folder
455
456 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none os_info $os_info none none
457
458 track checks checkingroot_ok
459 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
460 track checks noroot_ok
461 ask_proceed
462 track checks proceed_ok
463
464 echo "Installing OSM"
465
466 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
467
468 echo "Determining IP address of the interface with the default route"
469 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
470 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
471 [ -z "$OSM_DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
472 OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
473 [ -z "$OSM_DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
474
475 # configure apt proxy
476 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
477
478 # if lxd is requested, we will install it
479 if [ -n "$INSTALL_LXD" ]; then
480 LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
481 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P"
482 $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed"
483 fi
484
485 track prereq prereqok_ok
486
487 if [ -n "$INSTALL_DOCKER" ] || [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
488 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
489 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
490 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -P"
491 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
492 fi
493
494 track docker_ce docker_ce_ok
495
496 if [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
497 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh -i ${OSM_DEFAULT_IP} -d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
498 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
499 track k8scluster k8scluster_ok
500 fi
501
502 if [ -n "${INSTALL_JUJU}" ]; then
503 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_CACHELXDIMAGES}"
504 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
505 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
506 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
507 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
508 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P"
509 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
510 set_vca_variables
511 fi
512 track juju juju_ok
513
514 # Deploy OSM services
515 [ -z "$DOCKER_NOBUILD" ] && [ -z "$PULL_IMAGES" ] && generate_docker_images
516 track docker_images docker_images_ok
517
518 deploy_mongodb
519 track deploy_osm deploy_mongodb_ok
520 deploy_osm_services
521 track deploy_osm deploy_osm_services_k8s_ok
522 if [ -n "$INSTALL_K8S_MONITOR" ]; then
523 # install OSM MONITORING
524 install_k8s_monitoring
525 track deploy_osm install_k8s_monitoring_ok
526 fi
527 if [ -n "$INSTALL_NGSA" ]; then
528 # optional NGSA install
529 install_osm_ngsa_service
530 track deploy_osm install_osm_ngsa_ok
531 fi
532
533 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
534 track osmclient osmclient_ok
535
536 echo -e "Checking OSM health state..."
537 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_NAMESPACE} -k || \
538 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
539 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
540 track healthchecks osm_unhealthy didnotconverge)
541 track healthchecks after_healthcheck_ok
542
543 add_local_k8scluster
544 track final_ops add_local_k8scluster_ok
545
546 # if lxd is requested, iptables firewall is updated to work with both docker and LXD
547 if [ -n "$INSTALL_LXD" ]; then
548 arrange_docker_default_network_policy
549 fi
550
551 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
552 track end
553 sudo find /etc/osm
554 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
555 return 0
556 }
557
558 function install_to_openstack() {
559 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
560
561 if [ -z "$2" ]; then
562 FATAL "OpenStack installer requires a valid external network name"
563 fi
564
565 # Install Pip for Python3
566 sudo apt install -y python3-pip python3-venv
567 sudo -H LC_ALL=C python3 -m pip install -U pip
568
569 # Create a venv to avoid conflicts with the host installation
570 python3 -m venv $OPENSTACK_PYTHON_VENV
571
572 source $OPENSTACK_PYTHON_VENV/bin/activate
573
574 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
575 python -m pip install -U wheel
576 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
577
578 # Install the Openstack cloud module (ansible>=2.10)
579 ansible-galaxy collection install openstack.cloud
580
581 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
582
583 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
584
585 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
586
587 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
588 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
589 fi
590
591 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
592 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
593 fi
594
595 # Execute the Ansible playbook based on openrc or clouds.yaml
596 if [ -e "$1" ]; then
597 . $1
598 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
599 $OSM_DEVOPS/installers/openstack/site.yml
600 else
601 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
602 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
603 fi
604
605 # Exit from venv
606 deactivate
607
608 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
609 return 0
610 }
611
612 function arrange_docker_default_network_policy() {
613 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
614 sudo iptables -I DOCKER-USER -j ACCEPT
615 sudo iptables-save | sudo tee /etc/iptables/rules.v4
616 sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
617 }
618
619 function install_k8s_monitoring() {
620 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
621 # install OSM monitoring
622 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
623 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
624 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
625 }
626
627 function dump_vars(){
628 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
629 echo "APT_PROXY_URL=$APT_PROXY_URL"
630 echo "K8S_CLUSTER_ENGINE=$K8S_CLUSTER_ENGINE"
631 echo "DEVELOP=$DEVELOP"
632 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
633 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
634 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
635 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
636 echo "DOCKER_USER=$DOCKER_USER"
637 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
638 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
639 echo "INSTALL_JUJU=$INSTALL_JUJU"
640 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
641 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
642 echo "INSTALL_LXD=$INSTALL_LXD"
643 echo "INSTALL_NGSA=$INSTALL_NGSA"
644 echo "INSTALL_DOCKER=$INSTALL_DOCKER"
645 echo "INSTALL_ONLY=$INSTALL_ONLY"
646 echo "INSTALL_PLA=$INSTALL_PLA"
647 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
648 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
649 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
650 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
651 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
652 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
653 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
654 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
655 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
656 echo "OSM_DEVOPS=$OSM_DEVOPS"
657 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
658 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
659 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
660 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
661 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
662 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
663 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
664 echo "PULL_IMAGES=$PULL_IMAGES"
665 echo "RECONFIGURE=$RECONFIGURE"
666 echo "RELEASE=$RELEASE"
667 echo "REPOSITORY=$REPOSITORY"
668 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
669 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
670 echo "SHOWOPTS=$SHOWOPTS"
671 echo "TEST_INSTALLER=$TEST_INSTALLER"
672 echo "TO_REBUILD=$TO_REBUILD"
673 echo "UNINSTALL=$UNINSTALL"
674 echo "UPDATE=$UPDATE"
675 echo "Install from specific refspec (-b): $COMMIT_ID"
676 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
677 }
678
679 function parse_docker_registry_url() {
680 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
681 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
682 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
683 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
684 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
685 }
686
687 function ctrl_c() {
688 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
689 echo "** Trapped CTRL-C"
690 FATAL "User stopped the installation"
691 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
692 }
693
694 UNINSTALL=""
695 DEVELOP=""
696 UPDATE=""
697 RECONFIGURE=""
698 TEST_INSTALLER=""
699 INSTALL_LXD=""
700 SHOWOPTS=""
701 COMMIT_ID=""
702 ASSUME_YES=""
703 APT_PROXY_URL=""
704 K8S_CLUSTER_ENGINE="kubeadm"
705 INSTALL_FROM_SOURCE=""
706 DEBUG_INSTALL=""
707 RELEASE="ReleaseTEN"
708 REPOSITORY="stable"
709 INSTALL_K8S_MONITOR=""
710 INSTALL_NGSA="y"
711 INSTALL_PLA=""
712 INSTALL_VIMEMU=""
713 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
714 LXD_REPOSITORY_PATH=""
715 INSTALL_LIGHTWEIGHT="y"
716 INSTALL_TO_OPENSTACK=""
717 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
718 OPENSTACK_PUBLIC_NET_NAME=""
719 OPENSTACK_ATTACH_VOLUME="false"
720 OPENSTACK_SSH_KEY_FILE=""
721 OPENSTACK_USERDATA_FILE=""
722 OPENSTACK_VM_NAME="server-osm"
723 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
724 INSTALL_ONLY=""
725 TO_REBUILD=""
726 INSTALL_DOCKER="y"
727 INSTALL_JUJU=""
728 INSTALL_NOHOSTCLIENT=""
729 INSTALL_CACHELXDIMAGES=""
730 OSM_DEVOPS=
731 OSM_VCA_HOST=
732 OSM_VCA_SECRET=
733 OSM_VCA_PUBKEY=
734 OSM_VCA_CLOUDNAME="localhost"
735 OSM_VCA_K8S_CLOUDNAME="k8scloud"
736 OSM_NAMESPACE=osm
737 NO_HOST_PORTS=""
738 DOCKER_NOBUILD=""
739 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
740 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
741 OSM_WORK_DIR="/etc/osm"
742 OSM_HELM_WORK_DIR="${OSM_WORK_DIR}/helm"
743 OSM_HOST_VOL="/var/lib/osm"
744 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
745 OSM_DOCKER_TAG="testing-daily"
746 DOCKER_USER=opensourcemano
747 PULL_IMAGES="y"
748 KAFKA_TAG=2.11-1.0.2
749 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
750 PROMETHEUS_TAG=v2.28.1
751 GRAFANA_TAG=8.1.1
752 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
753 PROMETHEUS_CADVISOR_TAG=latest
754 KEYSTONEDB_TAG=10
755 OSM_DATABASE_COMMONKEY=
756 ELASTIC_VERSION=6.4.2
757 ELASTIC_CURATOR_VERSION=5.5.4
758 POD_NETWORK_CIDR=10.244.0.0/16
759 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
760 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
761 DOCKER_REGISTRY_URL=
762 DOCKER_PROXY_URL=
763 MODULE_DOCKER_TAG=
764 OSM_INSTALLATION_TYPE="Default"
765
766 while getopts ":a:b:c:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
767 case "${o}" in
768 a)
769 APT_PROXY_URL=${OPTARG}
770 ;;
771 b)
772 COMMIT_ID=${OPTARG}
773 PULL_IMAGES=""
774 ;;
775 c)
776 K8S_CLUSTER_ENGINE=${OPTARG}
777 [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ] && continue
778 [ "${K8S_CLUSTER_ENGINE}" == "k3s" ] && continue
779 [ "${K8S_CLUSTER_ENGINE}" == "microk8s" ] && continue
780 echo -e "Invalid argument for -c : ' ${K8S_CLUSTER_ENGINE}'\n" >&2
781 usage && exit 1
782 ;;
783 r)
784 REPOSITORY="${OPTARG}"
785 REPO_ARGS+=(-r "$REPOSITORY")
786 ;;
787 k)
788 REPOSITORY_KEY="${OPTARG}"
789 REPO_ARGS+=(-k "$REPOSITORY_KEY")
790 ;;
791 u)
792 REPOSITORY_BASE="${OPTARG}"
793 REPO_ARGS+=(-u "$REPOSITORY_BASE")
794 ;;
795 R)
796 RELEASE="${OPTARG}"
797 REPO_ARGS+=(-R "$RELEASE")
798 ;;
799 D)
800 OSM_DEVOPS="${OPTARG}"
801 ;;
802 o)
803 INSTALL_ONLY="y"
804 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
805 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
806 ;;
807 O)
808 INSTALL_TO_OPENSTACK="y"
809 if [ -n "${OPTARG}" ]; then
810 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
811 else
812 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
813 usage && exit 1
814 fi
815 ;;
816 f)
817 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
818 ;;
819 F)
820 OPENSTACK_USERDATA_FILE="${OPTARG}"
821 ;;
822 N)
823 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
824 ;;
825 m)
826 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
827 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
828 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
829 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
830 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
831 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
832 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
833 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
834 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
835 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
836 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
837 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
838 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
839 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
840 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
841 ;;
842 H)
843 OSM_VCA_HOST="${OPTARG}"
844 ;;
845 S)
846 OSM_VCA_SECRET="${OPTARG}"
847 ;;
848 s)
849 OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
850 ;;
851 t)
852 OSM_DOCKER_TAG="${OPTARG}"
853 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
854 ;;
855 U)
856 DOCKER_USER="${OPTARG}"
857 ;;
858 P)
859 OSM_VCA_PUBKEY=$(cat ${OPTARG})
860 ;;
861 A)
862 OSM_VCA_APIPROXY="${OPTARG}"
863 ;;
864 l)
865 LXD_CLOUD_FILE="${OPTARG}"
866 ;;
867 L)
868 LXD_CRED_FILE="${OPTARG}"
869 ;;
870 K)
871 CONTROLLER_NAME="${OPTARG}"
872 ;;
873 d)
874 DOCKER_REGISTRY_URL="${OPTARG}"
875 ;;
876 p)
877 DOCKER_PROXY_URL="${OPTARG}"
878 ;;
879 T)
880 MODULE_DOCKER_TAG="${OPTARG}"
881 ;;
882 -)
883 [ "${OPTARG}" == "help" ] && usage && exit 0
884 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
885 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
886 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
887 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
888 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
889 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
890 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
891 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
892 [ "${OPTARG}" == "lxd" ] && INSTALL_LXD="y" && continue
893 [ "${OPTARG}" == "nolxd" ] && INSTALL_LXD="" && continue
894 [ "${OPTARG}" == "docker" ] && INSTALL_DOCKER="y" && continue
895 [ "${OPTARG}" == "nodocker" ] && INSTALL_DOCKER="" && continue
896 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
897 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
898 [ "${OPTARG}" == "juju" ] && INSTALL_JUJU="y" && continue
899 [ "${OPTARG}" == "nojuju" ] && INSTALL_JUJU="" && continue
900 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
901 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
902 [ "${OPTARG}" == "pullimages" ] && continue
903 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
904 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
905 [ "${OPTARG}" == "bundle" ] && continue
906 [ "${OPTARG}" == "k8s" ] && continue
907 [ "${OPTARG}" == "lxd-cred" ] && continue
908 [ "${OPTARG}" == "microstack" ] && continue
909 [ "${OPTARG}" == "overlay" ] && continue
910 [ "${OPTARG}" == "only-vca" ] && continue
911 [ "${OPTARG}" == "small-profile" ] && continue
912 [ "${OPTARG}" == "vca" ] && continue
913 [ "${OPTARG}" == "ha" ] && continue
914 [ "${OPTARG}" == "tag" ] && continue
915 [ "${OPTARG}" == "registry" ] && continue
916 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
917 [ "${OPTARG}" == "old-sa" ] && INSTALL_NGSA="" && continue
918 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
919 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
920 [ "${OPTARG}" == "nocachelxdimages" ] && continue
921 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
922 echo -e "Invalid option: '--$OPTARG'\n" >&2
923 usage && exit 1
924 ;;
925 :)
926 echo "Option -$OPTARG requires an argument" >&2
927 usage && exit 1
928 ;;
929 \?)
930 echo -e "Invalid option: '-$OPTARG'\n" >&2
931 usage && exit 1
932 ;;
933 h)
934 usage && exit 0
935 ;;
936 y)
937 ASSUME_YES="y"
938 ;;
939 *)
940 usage && exit 1
941 ;;
942 esac
943 done
944
945 source $OSM_DEVOPS/common/all_funcs
946
947 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
948 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
949
950 # Uninstall if "--uninstall"
951 if [ -n "$UNINSTALL" ]; then
952 if [ -n "$CHARMED" ]; then
953 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
954 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
955 else
956 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
957 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
958 fi
959 echo -e "\nDONE"
960 exit 0
961 fi
962
963 # Installation starts here
964
965 # Get README and create OSM_TRACK_INSTALLATION_ID
966 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README.txt &> /dev/null
967 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
968
969 # Get OS info to be tracked
970 os_distro=$(lsb_release -i 2>/dev/null | awk '{print $3}')
971 echo $os_distro
972 os_release=$(lsb_release -r 2>/dev/null | awk '{print $2}')
973 echo $os_release
974 os_info="${os_distro}_${os_release}"
975 os_info="${os_info// /_}"
976
977 if [ -n "$CHARMED" ]; then
978 # Charmed installation
979 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
980 ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \
981 FATAL_TRACK charmed_install "charmed_install.sh failed"
982 wget -q -O- https://osm-download.etsi.org/ftp/osm-15.0-fifteen/README2.txt &> /dev/null
983 echo -e "\nDONE"
984 exit 0
985 elif [ -n "$INSTALL_TO_OPENSTACK" ]; then
986 # Installation to Openstack
987 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
988 echo -e "\nDONE"
989 exit 0
990 else
991 # Community_installer
992 # Check incompatible options
993 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
994 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
995 # Special cases go first
996 # if develop, we force master
997 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
998 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
999 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1000 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1001 # This is where installation starts
1002 install_osm
1003 echo -e "\nDONE"
1004 exit 0
1005 fi