Added ctrl_c function to trap user installation interruptions
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 function usage(){
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " OPTIONS"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " ..."
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
81 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
82 echo -e " [--tag]: Docker image tag. (--charmed option)"
83 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
84 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
85 }
86
87 # takes a juju/accounts.yaml file and returns the password specific
88 # for a controller. I wrote this using only bash tools to minimize
89 # additions of other packages
90 function parse_juju_password {
91 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
92 password_file="${HOME}/.local/share/juju/accounts.yaml"
93 local controller_name=$1
94 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
95 sed -ne "s|^\($s\):|\1|" \
96 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
97 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
98 awk -F$fs -v controller=$controller_name '{
99 indent = length($1)/2;
100 vname[indent] = $2;
101 for (i in vname) {if (i > indent) {delete vname[i]}}
102 if (length($3) > 0) {
103 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
104 if (match(vn,controller) && match($2,"password")) {
105 printf("%s",$3);
106 }
107 }
108 }'
109 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
110 }
111
112 function set_vca_variables() {
113 OSM_VCA_CLOUDNAME="lxd-cloud"
114 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME="localhost"
115 if [ -z "$OSM_VCA_HOST" ]; then
116 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
119 fi
120 if [ -z "$OSM_VCA_SECRET" ]; then
121 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
122 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
123 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
124 fi
125 if [ -z "$OSM_VCA_PUBKEY" ]; then
126 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
127 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
128 fi
129 if [ -z "$OSM_VCA_CACERT" ]; then
130 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
131 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
132 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
133 fi
134 }
135
136 function generate_secret() {
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
138 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
139 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
140 }
141
142 function check_packages() {
143 NEEDED_PACKAGES="$1"
144 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
145 for PACKAGE in ${NEEDED_PACKAGES} ; do
146 dpkg -L ${PACKAGE} &>/dev/null
147 if [ $? -ne 0 ]; then
148 echo -e "Package ${PACKAGE} is not installed. Updating apt"
149 sudo apt-get update
150 sudo apt-get install -y ${PACKAGE} || FATAL "failed to install ${PACKAGE}"
151 fi
152 done
153 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
154 }
155
156 function install_lxd() {
157 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
158 # Apply sysctl production values for optimal performance
159 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
160 sudo sysctl --system
161
162 # Install LXD snap
163 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
164 sudo snap install lxd --channel $LXD_VERSION/stable
165
166 # Configure LXD
167 sudo usermod -a -G lxd `whoami`
168 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
169 sg lxd -c "lxd waitready"
170 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
171 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
172 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
173 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
174 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
175 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
176 #sudo systemctl stop lxd-bridge
177 #sudo systemctl --system daemon-reload
178 #sudo systemctl enable lxd-bridge
179 #sudo systemctl start lxd-bridge
180 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
181 }
182
183 function ask_user(){
184 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
185 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
186 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
187 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
188 read -e -p "$1" USER_CONFIRMATION
189 while true ; do
190 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
191 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
192 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
193 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
194 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
195 done
196 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
197 }
198
199 function install_osmclient(){
200 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
201 CLIENT_RELEASE=${RELEASE#"-R "}
202 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
203 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
204 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
205 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
206 curl $key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add -
207 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
208 sudo apt-get update
209 sudo apt-get install -y python3-pip
210 sudo -H LC_ALL=C python3 -m pip install -U pip
211 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
212 sudo apt-get install -y python3-osm-im python3-osmclient
213 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
214 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
215 fi
216 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
217 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
218 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
219 fi
220 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
221 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
222 echo -e "\nOSM client installed"
223 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
224 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
225 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
226 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
227 else
228 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
229 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
230 echo " export OSM_HOSTNAME=<OSM_host>"
231 fi
232 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
233 return 0
234 }
235
236 function docker_login() {
237 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
238 echo "Docker login"
239 [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}"
240 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
241 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
242 }
243
244 function generate_docker_images() {
245 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
246 echo "Pulling and generating docker images"
247 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
248
249 echo "Pulling docker images"
250
251 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
252 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
253 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
254 fi
255
256 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
257 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
258 fi
259
260 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
261 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
262 fi
263
264 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
265 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
266 fi
267
268 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
269 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
270 fi
271
272 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
273 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
274 fi
275
276 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
277 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
278 fi
279
280 if [ -n "$PULL_IMAGES" ]; then
281 echo "Pulling OSM docker images"
282 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
283 module_lower=${module,,}
284 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
285 continue
286 fi
287 module_tag="${OSM_DOCKER_TAG}"
288 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
289 module_tag="${MODULE_DOCKER_TAG}"
290 fi
291 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
292 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
293 done
294 else
295 _build_from=$COMMIT_ID
296 [ -z "$_build_from" ] && _build_from="latest"
297 echo "OSM Docker images generated from $_build_from"
298 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
299 trap 'rm -rf "${LWTEMPDIR}"' EXIT
300 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
301 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
302 module_lower=${module,,}
303 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
304 continue
305 fi
306 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
307 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
308 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
309 fi
310 done
311 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
312 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
313 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
314 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
315 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
316 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
317 fi
318 echo "Finished generation of docker images"
319 fi
320
321 echo "Finished pulling and generating docker images"
322 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
323 }
324
325 function cmp_overwrite() {
326 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
327 file1="$1"
328 file2="$2"
329 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
330 if [ -f "${file2}" ]; then
331 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
332 else
333 cp -b ${file1} ${file2}
334 fi
335 fi
336 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
337 }
338
339 function generate_k8s_manifest_files() {
340 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
341 #Kubernetes resources
342 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
343 sudo rm -f $OSM_K8S_WORK_DIR/mongo.yaml
344 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
345 }
346
347 function generate_prometheus_grafana_files() {
348 #this only works with docker swarm
349 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
350 # Prometheus files
351 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
352 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
353
354 # Grafana files
355 sudo mkdir -p $OSM_DOCKER_WORK_DIR/grafana
356 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
357 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
358 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
359 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
360
361 # Prometheus Exporters files
362 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
363 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
364 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
365 }
366
367 function generate_docker_env_files() {
368 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
369 echo "Doing a backup of existing env files"
370 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
371 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
372 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
373 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
374 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
375 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
376 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
377 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
378
379 echo "Generating docker env files"
380 # LCM
381 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
382 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
383 fi
384
385 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
386 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
387 else
388 sudo sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
389 fi
390
391 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
392 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
393 else
394 sudo sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
395 fi
396
397 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
398 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
399 else
400 sudo sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
401 fi
402
403 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
404 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
405 else
406 sudo sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
407 fi
408
409 if [ -n "$OSM_VCA_APIPROXY" ]; then
410 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
411 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
412 else
413 sudo sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
414 fi
415 fi
416
417 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
418 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
419 fi
420
421 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
422 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
423 fi
424
425 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
426 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
427 else
428 sudo sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
429 fi
430
431 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
432 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
433 else
434 sudo sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
435 fi
436
437 # RO
438 MYSQL_ROOT_PASSWORD=$(generate_secret)
439 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
440 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro-db.env
441 fi
442 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
443 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro.env
444 fi
445 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
446 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/ro.env
447 fi
448
449 # Keystone
450 KEYSTONE_DB_PASSWORD=$(generate_secret)
451 SERVICE_PASSWORD=$(generate_secret)
452 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
453 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone-db.env
454 fi
455 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
456 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone.env
457 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env
458 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env
459 fi
460
461 # NBI
462 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
463 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/nbi.env
464 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/nbi.env
465 fi
466
467 # MON
468 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
469 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
470 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
471 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
472 fi
473
474 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
475 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
476 else
477 sudo sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
478 fi
479
480 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
481 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
482 else
483 sudo sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
484 fi
485
486 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
487 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
488 else
489 sudo sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
490 fi
491
492 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
493 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
494 else
495 sudo sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
496 fi
497
498
499 # POL
500 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
501 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo tee -a $OSM_DOCKER_WORK_DIR/pol.env
502 fi
503
504 echo "Finished generation of docker env files"
505 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
506 }
507
508 #creates secrets from env files which will be used by containers
509 function kube_secrets(){
510 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
511 kubectl create ns $OSM_STACK_NAME
512 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
513 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
514 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
515 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
516 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
517 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
518 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
519 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
520 }
521
522 #deploys osm pods and services
523 function deploy_osm_services() {
524 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
525 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
526 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
527 }
528
529 #deploy charmed services
530 function deploy_charmed_services() {
531 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
532 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
533 juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME
534 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
535 }
536
537 function deploy_osm_pla_service() {
538 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
539 # corresponding to namespace_vol
540 sudo sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
541 # corresponding to deploy_osm_services
542 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
543 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
544 }
545
546 function parse_yaml() {
547 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
548 TAG=$1
549 shift
550 services=$@
551 for module in $services; do
552 if [ "$module" == "pla" ]; then
553 if [ -n "$INSTALL_PLA" ]; then
554 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
555 sudo sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
556 fi
557 else
558 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
559 sudo sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
560 fi
561 done
562 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
563 }
564
565 function update_manifest_files() {
566 osm_services="nbi lcm ro pol mon ng-ui keystone pla prometheus"
567 list_of_services=""
568 for module in $osm_services; do
569 module_upper="${module^^}"
570 if ! echo $TO_REBUILD | grep -q $module_upper ; then
571 list_of_services="$list_of_services $module"
572 fi
573 done
574 if [ ! "$OSM_DOCKER_TAG" == "11" ]; then
575 parse_yaml $OSM_DOCKER_TAG $list_of_services
576 fi
577 if [ -n "$MODULE_DOCKER_TAG" ]; then
578 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
579 fi
580 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
581 }
582
583 function namespace_vol() {
584 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
585 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
586 for osm in $osm_services; do
587 sudo sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
588 done
589 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
590 }
591
592 function add_local_k8scluster() {
593 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
594 /usr/bin/osm --all-projects vim-create \
595 --name _system-osm-vim \
596 --account_type dummy \
597 --auth_url http://dummy \
598 --user osm --password osm --tenant osm \
599 --description "dummy" \
600 --config '{management_network_name: mgmt}'
601 /usr/bin/osm --all-projects k8scluster-add \
602 --creds ${HOME}/.kube/config \
603 --vim _system-osm-vim \
604 --k8s-nets '{"net1": null}' \
605 --version '1.15' \
606 --description "OSM Internal Cluster" \
607 _system-osm-k8s
608 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
609 }
610
611 function configure_apt_proxy() {
612 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
613 OSM_APT_PROXY=$1
614 OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt"
615 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
616 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
617 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
618 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
619 EOF"
620 else
621 sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
622 fi
623 sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
624 track prereq apt_proxy_configured_ok
625 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
626 }
627
628 function install_osm() {
629 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
630
631 trap ctrl_c INT
632 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
633
634 track checks checkingroot_ok
635 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
636 track checks noroot_ok
637
638 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
639 1. Install and configure LXD
640 2. Install juju
641 3. Install docker CE
642 4. Disable swap space
643 5. Install and initialize Kubernetes
644 as pre-requirements.
645 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
646 track checks proceed_ok
647
648 echo "Installing OSM"
649
650 echo "Determining IP address of the interface with the default route"
651 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
652 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
653 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
654 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
655 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
656
657 # configure apt proxy
658 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
659
660 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
661 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
662 check_packages "snapd"
663 install_lxd
664 fi
665
666 track prereq prereqok_ok
667
668 if [ ! -n "$INSTALL_NODOCKER" ]; then
669 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
670 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
671 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
672 fi
673
674 track docker_ce docker_ce_ok
675
676 echo "Creating folders for installation"
677 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR
678 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
679 sudo cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
680
681 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh -i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
682 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
683 track k8scluster k8scluster_ok
684
685 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
686 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
687 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
688 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
689 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
690 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
691 set_vca_variables
692 track juju juju_ok
693
694 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
695 OSM_DATABASE_COMMONKEY=$(generate_secret)
696 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
697 fi
698
699 # Deploy OSM services
700 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
701 track docker_images docker_images_ok
702
703 generate_k8s_manifest_files
704 track osm_files manifest_files_ok
705 generate_docker_env_files
706 track osm_files env_files_ok
707
708 deploy_charmed_services
709 track deploy_osm deploy_charmed_services_ok
710 kube_secrets
711 track deploy_osm kube_secrets_ok
712 update_manifest_files
713 track deploy_osm update_manifest_files_ok
714 namespace_vol
715 track deploy_osm namespace_vol_ok
716 deploy_osm_services
717 track deploy_osm deploy_osm_services_k8s_ok
718 if [ -n "$INSTALL_PLA"]; then
719 # optional PLA install
720 deploy_osm_pla_service
721 track deploy_osm deploy_osm_pla_ok
722 fi
723 if [ -n "$INSTALL_K8S_MONITOR" ]; then
724 # install OSM MONITORING
725 install_k8s_monitoring
726 track deploy_osm install_k8s_monitoring_ok
727 fi
728
729 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
730 track osmclient osmclient_ok
731
732 echo -e "Checking OSM health state..."
733 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
734 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
735 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
736 track healthchecks osm_unhealthy didnotconverge)
737 track healthchecks after_healthcheck_ok
738
739 add_local_k8scluster
740 track final_ops add_local_k8scluster_ok
741
742 wget -q -O- https://osm-download.etsi.org/ftp/osm-11.0-eleven/README2.txt &> /dev/null
743 track end
744 sudo find /etc/osm
745 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
746 return 0
747 }
748
749 function install_to_openstack() {
750 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
751
752 if [ -z "$2" ]; then
753 FATAL "OpenStack installer requires a valid external network name"
754 fi
755
756 # Install Pip for Python3
757 sudo apt install -y python3-pip python3-venv
758 sudo -H LC_ALL=C python3 -m pip install -U pip
759
760 # Create a venv to avoid conflicts with the host installation
761 python3 -m venv $OPENSTACK_PYTHON_VENV
762
763 source $OPENSTACK_PYTHON_VENV/bin/activate
764
765 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
766 python -m pip install -U wheel
767 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
768
769 # Install the Openstack cloud module (ansible>=2.10)
770 ansible-galaxy collection install openstack.cloud
771
772 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
773
774 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
775
776 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
777
778 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
779 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
780 fi
781
782 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
783 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
784 fi
785
786 # Execute the Ansible playbook based on openrc or clouds.yaml
787 if [ -e "$1" ]; then
788 . $1
789 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
790 $OSM_DEVOPS/installers/openstack/site.yml
791 else
792 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
793 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
794 fi
795
796 # Exit from venv
797 deactivate
798
799 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
800 return 0
801 }
802
803 function install_k8s_monitoring() {
804 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
805 # install OSM monitoring
806 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
807 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
808 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
809 }
810
811 function dump_vars(){
812 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
813 echo "APT_PROXY_URL=$APT_PROXY_URL"
814 echo "DEVELOP=$DEVELOP"
815 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
816 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
817 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
818 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
819 echo "DOCKER_USER=$DOCKER_USER"
820 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
821 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
822 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
823 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
824 echo "INSTALL_LXD=$INSTALL_LXD"
825 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
826 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
827 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
828 echo "INSTALL_ONLY=$INSTALL_ONLY"
829 echo "INSTALL_PLA=$INSTALL_PLA"
830 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
831 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
832 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
833 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
834 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
835 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
836 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
837 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
838 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
839 echo "OSM_DEVOPS=$OSM_DEVOPS"
840 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
841 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
842 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
843 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
844 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
845 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
846 echo "PULL_IMAGES=$PULL_IMAGES"
847 echo "RECONFIGURE=$RECONFIGURE"
848 echo "RELEASE=$RELEASE"
849 echo "REPOSITORY=$REPOSITORY"
850 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
851 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
852 echo "SHOWOPTS=$SHOWOPTS"
853 echo "TEST_INSTALLER=$TEST_INSTALLER"
854 echo "TO_REBUILD=$TO_REBUILD"
855 echo "UNINSTALL=$UNINSTALL"
856 echo "UPDATE=$UPDATE"
857 echo "Install from specific refspec (-b): $COMMIT_ID"
858 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
859 }
860
861 function parse_docker_registry_url() {
862 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
863 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
864 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
865 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
866 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
867 }
868
869 function ctrl_c() {
870 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
871 echo "** Trapped CTRL-C"
872 FATAL "User stopped the installation"
873 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
874 }
875
876 LXD_VERSION=4.0
877 JUJU_VERSION=2.9
878 JUJU_AGENT_VERSION=2.9.17
879 UNINSTALL=""
880 DEVELOP=""
881 UPDATE=""
882 RECONFIGURE=""
883 TEST_INSTALLER=""
884 INSTALL_LXD=""
885 SHOWOPTS=""
886 COMMIT_ID=""
887 ASSUME_YES=""
888 APT_PROXY_URL=""
889 INSTALL_FROM_SOURCE=""
890 DEBUG_INSTALL=""
891 RELEASE="ReleaseTEN"
892 REPOSITORY="stable"
893 INSTALL_VIMEMU=""
894 INSTALL_PLA=""
895 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
896 LXD_REPOSITORY_PATH=""
897 INSTALL_LIGHTWEIGHT="y"
898 INSTALL_TO_OPENSTACK=""
899 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
900 OPENSTACK_PUBLIC_NET_NAME=""
901 OPENSTACK_ATTACH_VOLUME="false"
902 OPENSTACK_SSH_KEY_FILE=""
903 OPENSTACK_USERDATA_FILE=""
904 OPENSTACK_VM_NAME="server-osm"
905 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
906 INSTALL_ONLY=""
907 TO_REBUILD=""
908 INSTALL_NOLXD=""
909 INSTALL_NODOCKER=""
910 INSTALL_NOJUJU=""
911 INSTALL_K8S_MONITOR=""
912 INSTALL_NOHOSTCLIENT=""
913 INSTALL_CACHELXDIMAGES=""
914 OSM_DEVOPS=
915 OSM_VCA_HOST=
916 OSM_VCA_SECRET=
917 OSM_VCA_PUBKEY=
918 OSM_VCA_CLOUDNAME="localhost"
919 OSM_VCA_K8S_CLOUDNAME="k8scloud"
920 OSM_STACK_NAME=osm
921 NO_HOST_PORTS=""
922 DOCKER_NOBUILD=""
923 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
924 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
925 OSM_WORK_DIR="/etc/osm"
926 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
927 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
928 OSM_HOST_VOL="/var/lib/osm"
929 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
930 OSM_DOCKER_TAG=latest
931 DOCKER_USER=opensourcemano
932 PULL_IMAGES="y"
933 KAFKA_TAG=2.11-1.0.2
934 PROMETHEUS_TAG=v2.4.3
935 GRAFANA_TAG=latest
936 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
937 PROMETHEUS_CADVISOR_TAG=latest
938 KEYSTONEDB_TAG=10
939 OSM_DATABASE_COMMONKEY=
940 ELASTIC_VERSION=6.4.2
941 ELASTIC_CURATOR_VERSION=5.5.4
942 POD_NETWORK_CIDR=10.244.0.0/16
943 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
944 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
945 DOCKER_REGISTRY_URL=
946 DOCKER_PROXY_URL=
947 MODULE_DOCKER_TAG=
948 OSM_INSTALLATION_TYPE="Default"
949
950 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
951 case "${o}" in
952 a)
953 APT_PROXY_URL=${OPTARG}
954 ;;
955 b)
956 COMMIT_ID=${OPTARG}
957 PULL_IMAGES=""
958 ;;
959 r)
960 REPOSITORY="${OPTARG}"
961 REPO_ARGS+=(-r "$REPOSITORY")
962 ;;
963 k)
964 REPOSITORY_KEY="${OPTARG}"
965 REPO_ARGS+=(-k "$REPOSITORY_KEY")
966 ;;
967 u)
968 REPOSITORY_BASE="${OPTARG}"
969 REPO_ARGS+=(-u "$REPOSITORY_BASE")
970 ;;
971 R)
972 RELEASE="${OPTARG}"
973 REPO_ARGS+=(-R "$RELEASE")
974 ;;
975 D)
976 OSM_DEVOPS="${OPTARG}"
977 ;;
978 o)
979 INSTALL_ONLY="y"
980 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
981 ;;
982 O)
983 INSTALL_TO_OPENSTACK="y"
984 if [ -n "${OPTARG}" ]; then
985 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
986 else
987 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
988 usage && exit 1
989 fi
990 ;;
991 f)
992 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
993 ;;
994 F)
995 OPENSTACK_USERDATA_FILE="${OPTARG}"
996 ;;
997 N)
998 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
999 ;;
1000 m)
1001 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1002 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1003 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1004 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1005 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1006 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1007 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1008 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1009 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1010 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1011 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1012 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1013 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1014 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1015 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1016 ;;
1017 H)
1018 OSM_VCA_HOST="${OPTARG}"
1019 ;;
1020 S)
1021 OSM_VCA_SECRET="${OPTARG}"
1022 ;;
1023 s)
1024 OSM_STACK_NAME="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1025 ;;
1026 t)
1027 OSM_DOCKER_TAG="${OPTARG}"
1028 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1029 ;;
1030 U)
1031 DOCKER_USER="${OPTARG}"
1032 ;;
1033 P)
1034 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1035 ;;
1036 A)
1037 OSM_VCA_APIPROXY="${OPTARG}"
1038 ;;
1039 l)
1040 LXD_CLOUD_FILE="${OPTARG}"
1041 ;;
1042 L)
1043 LXD_CRED_FILE="${OPTARG}"
1044 ;;
1045 K)
1046 CONTROLLER_NAME="${OPTARG}"
1047 ;;
1048 d)
1049 DOCKER_REGISTRY_URL="${OPTARG}"
1050 ;;
1051 p)
1052 DOCKER_PROXY_URL="${OPTARG}"
1053 ;;
1054 T)
1055 MODULE_DOCKER_TAG="${OPTARG}"
1056 ;;
1057 -)
1058 [ "${OPTARG}" == "help" ] && usage && exit 0
1059 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1060 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
1061 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1062 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1063 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1064 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1065 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1066 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1067 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1068 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1069 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1070 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1071 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="--nojuju" && continue
1072 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1073 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1074 [ "${OPTARG}" == "pullimages" ] && continue
1075 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1076 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
1077 [ "${OPTARG}" == "bundle" ] && continue
1078 [ "${OPTARG}" == "k8s" ] && continue
1079 [ "${OPTARG}" == "lxd" ] && continue
1080 [ "${OPTARG}" == "lxd-cred" ] && continue
1081 [ "${OPTARG}" == "microstack" ] && continue
1082 [ "${OPTARG}" == "overlay" ] && continue
1083 [ "${OPTARG}" == "only-vca" ] && continue
1084 [ "${OPTARG}" == "vca" ] && continue
1085 [ "${OPTARG}" == "ha" ] && continue
1086 [ "${OPTARG}" == "tag" ] && continue
1087 [ "${OPTARG}" == "registry" ] && continue
1088 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1089 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1090 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1091 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
1092 echo -e "Invalid option: '--$OPTARG'\n" >&2
1093 usage && exit 1
1094 ;;
1095 :)
1096 echo "Option -$OPTARG requires an argument" >&2
1097 usage && exit 1
1098 ;;
1099 \?)
1100 echo -e "Invalid option: '-$OPTARG'\n" >&2
1101 usage && exit 1
1102 ;;
1103 h)
1104 usage && exit 0
1105 ;;
1106 y)
1107 ASSUME_YES="y"
1108 ;;
1109 *)
1110 usage && exit 1
1111 ;;
1112 esac
1113 done
1114
1115 source $OSM_DEVOPS/common/all_funcs
1116
1117 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1118 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
1119
1120 # Uninstall if "--uninstall"
1121 if [ -n "$UNINSTALL" ]; then
1122 if [ -n "$CHARMED" ]; then
1123 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
1124 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1125 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
1126 else
1127 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
1128 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
1129 fi
1130 echo -e "\nDONE"
1131 exit 0
1132 fi
1133
1134 # Charmed installation
1135 if [ -n "$CHARMED" ]; then
1136 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1137 FATAL_TRACK charmed_install "charmed_install.sh failed"
1138 echo -e "\nDONE"
1139 exit 0
1140 fi
1141
1142 # Installation to Openstack
1143 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1144 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1145 echo -e "\nDONE"
1146 exit 0
1147 fi
1148
1149 # Community_installer
1150 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1151 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1152 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1153
1154 # if develop, we force master
1155 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1156
1157 check_packages "git wget curl tar"
1158
1159 sudo snap install jq
1160 if [ -z "$OSM_DEVOPS" ]; then
1161 if [ -n "$TEST_INSTALLER" ]; then
1162 echo -e "\nUsing local devops repo for OSM installation"
1163 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1164 else
1165 echo -e "\nCreating temporary dir for OSM installation"
1166 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1167 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1168
1169 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1170
1171 if [ -z "$COMMIT_ID" ]; then
1172 echo -e "\nGuessing the current stable release"
1173 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1174 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1175
1176 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1177 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1178 else
1179 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1180 fi
1181 git -C $OSM_DEVOPS checkout $COMMIT_ID
1182 fi
1183 fi
1184
1185 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1186 OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1187 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1188 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1189
1190 #Installation starts here
1191 wget -q -O- https://osm-download.etsi.org/ftp/osm-11.0-eleven/README.txt &> /dev/null
1192 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1193 install_osm
1194 echo -e "\nDONE"
1195 exit 0