f31d46e14dc4e1c1ce0f85443bc3efd170184dc4
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 function usage(){
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " OPTIONS"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " ..."
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
81 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
82 echo -e " [--tag]: Docker image tag. (--charmed option)"
83 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
84 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
85 }
86
87 # takes a juju/accounts.yaml file and returns the password specific
88 # for a controller. I wrote this using only bash tools to minimize
89 # additions of other packages
90 function parse_juju_password {
91 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
92 password_file="${HOME}/.local/share/juju/accounts.yaml"
93 local controller_name=$1
94 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
95 sed -ne "s|^\($s\):|\1|" \
96 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
97 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
98 awk -F$fs -v controller=$controller_name '{
99 indent = length($1)/2;
100 vname[indent] = $2;
101 for (i in vname) {if (i > indent) {delete vname[i]}}
102 if (length($3) > 0) {
103 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
104 if (match(vn,controller) && match($2,"password")) {
105 printf("%s",$3);
106 }
107 }
108 }'
109 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
110 }
111
112 function set_vca_variables() {
113 OSM_VCA_CLOUDNAME="lxd-cloud"
114 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME="localhost"
115 if [ -z "$OSM_VCA_HOST" ]; then
116 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
119 fi
120 if [ -z "$OSM_VCA_SECRET" ]; then
121 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
122 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
123 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
124 fi
125 if [ -z "$OSM_VCA_PUBKEY" ]; then
126 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
127 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
128 fi
129 if [ -z "$OSM_VCA_CACERT" ]; then
130 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
131 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
132 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
133 fi
134 }
135
136 function generate_secret() {
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
138 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
139 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
140 }
141
142 function check_packages() {
143 NEEDED_PACKAGES="$1"
144 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
145 for PACKAGE in ${NEEDED_PACKAGES} ; do
146 dpkg -L ${PACKAGE}
147 if [ $? -ne 0 ]; then
148 echo -e "Package ${PACKAGE} is not installed."
149 echo -e "Updating apt-cache ..."
150 sudo apt-get update
151 echo -e "Installing ${PACKAGE} ..."
152 sudo apt-get install -y ${PACKAGE} || FATAL "failed to install ${PACKAGE}"
153 fi
154 done
155 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
156 }
157
158 function install_lxd() {
159 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
160 # Apply sysctl production values for optimal performance
161 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
162 sudo sysctl --system
163
164 # Install LXD snap
165 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
166 sudo snap install lxd --channel $LXD_VERSION/stable
167
168 # Configure LXD
169 sudo usermod -a -G lxd `whoami`
170 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
171 sg lxd -c "lxd waitready"
172 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
173 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
174 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
175 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
176 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
177 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
178 #sudo systemctl stop lxd-bridge
179 #sudo systemctl --system daemon-reload
180 #sudo systemctl enable lxd-bridge
181 #sudo systemctl start lxd-bridge
182 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
183 }
184
185 function ask_user(){
186 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
187 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
188 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
189 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
190 read -e -p "$1" USER_CONFIRMATION
191 while true ; do
192 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
193 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
194 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
195 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
196 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
197 done
198 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
199 }
200
201 function install_osmclient(){
202 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
203 CLIENT_RELEASE=${RELEASE#"-R "}
204 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
205 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
206 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
207 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
208 curl $key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add -
209 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
210 sudo apt-get update
211 sudo apt-get install -y python3-pip
212 sudo -H LC_ALL=C python3 -m pip install -U pip
213 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
214 sudo apt-get install -y python3-osm-im python3-osmclient
215 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
216 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
217 fi
218 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
219 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
220 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
221 fi
222 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
223 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
224 echo -e "\nOSM client installed"
225 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
226 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
227 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
228 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
229 else
230 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
231 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
232 echo " export OSM_HOSTNAME=<OSM_host>"
233 fi
234 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
235 return 0
236 }
237
238 function docker_login() {
239 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
240 echo "Docker login"
241 [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}"
242 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
243 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
244 }
245
246 function generate_docker_images() {
247 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
248 echo "Pulling and generating docker images"
249 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
250
251 echo "Pulling docker images"
252
253 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
254 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
255 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
256 fi
257
258 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
259 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
260 fi
261
262 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
263 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
264 fi
265
266 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
267 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
268 fi
269
270 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
271 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
272 fi
273
274 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
275 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
276 fi
277
278 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
279 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
280 fi
281
282 if [ -n "$PULL_IMAGES" ]; then
283 echo "Pulling OSM docker images"
284 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
285 module_lower=${module,,}
286 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
287 continue
288 fi
289 module_tag="${OSM_DOCKER_TAG}"
290 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
291 module_tag="${MODULE_DOCKER_TAG}"
292 fi
293 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
294 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
295 done
296 else
297 _build_from=$COMMIT_ID
298 [ -z "$_build_from" ] && _build_from="latest"
299 echo "OSM Docker images generated from $_build_from"
300 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
301 trap 'rm -rf "${LWTEMPDIR}"' EXIT
302 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
303 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
304 module_lower=${module,,}
305 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
306 continue
307 fi
308 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
309 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
310 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
311 fi
312 done
313 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
314 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
315 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
316 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
317 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
318 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
319 fi
320 echo "Finished generation of docker images"
321 fi
322
323 echo "Finished pulling and generating docker images"
324 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
325 }
326
327 function cmp_overwrite() {
328 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
329 file1="$1"
330 file2="$2"
331 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
332 if [ -f "${file2}" ]; then
333 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
334 else
335 cp -b ${file1} ${file2}
336 fi
337 fi
338 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
339 }
340
341 function generate_k8s_manifest_files() {
342 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
343 #Kubernetes resources
344 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
345 sudo rm -f $OSM_K8S_WORK_DIR/mongo.yaml
346 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
347 }
348
349 function generate_prometheus_grafana_files() {
350 #this only works with docker swarm
351 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
352 # Prometheus files
353 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
354 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
355
356 # Grafana files
357 sudo mkdir -p $OSM_DOCKER_WORK_DIR/grafana
358 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
359 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
360 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
361 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
362
363 # Prometheus Exporters files
364 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
365 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
366 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
367 }
368
369 function generate_docker_env_files() {
370 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
371 echo "Doing a backup of existing env files"
372 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
373 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
374 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
375 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
376 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
377 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
378 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
379 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
380
381 echo "Generating docker env files"
382 # LCM
383 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
384 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
385 fi
386
387 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
388 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
389 else
390 sudo sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
391 fi
392
393 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
394 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
395 else
396 sudo sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
397 fi
398
399 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
400 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
401 else
402 sudo sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
403 fi
404
405 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
406 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
407 else
408 sudo sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
409 fi
410
411 if [ -n "$OSM_VCA_APIPROXY" ]; then
412 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
413 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
414 else
415 sudo sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
416 fi
417 fi
418
419 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
420 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
421 fi
422
423 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
424 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
425 fi
426
427 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
428 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
429 else
430 sudo sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
431 fi
432
433 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
434 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
435 else
436 sudo sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
437 fi
438
439 # RO
440 MYSQL_ROOT_PASSWORD=$(generate_secret)
441 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
442 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro-db.env
443 fi
444 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
445 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro.env
446 fi
447 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
448 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/ro.env
449 fi
450
451 # Keystone
452 KEYSTONE_DB_PASSWORD=$(generate_secret)
453 SERVICE_PASSWORD=$(generate_secret)
454 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
455 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone-db.env
456 fi
457 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
458 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone.env
459 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env
460 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env
461 fi
462
463 # NBI
464 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
465 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/nbi.env
466 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/nbi.env
467 fi
468
469 # MON
470 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
471 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
472 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
473 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
474 fi
475
476 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
477 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
478 else
479 sudo sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
480 fi
481
482 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
483 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
484 else
485 sudo sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
486 fi
487
488 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
489 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
490 else
491 sudo sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
492 fi
493
494 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
495 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
496 else
497 sudo sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
498 fi
499
500
501 # POL
502 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
503 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo tee -a $OSM_DOCKER_WORK_DIR/pol.env
504 fi
505
506 echo "Finished generation of docker env files"
507 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
508 }
509
510 #creates secrets from env files which will be used by containers
511 function kube_secrets(){
512 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
513 kubectl create ns $OSM_STACK_NAME
514 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
515 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
516 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
517 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
518 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
519 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
520 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
521 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
522 }
523
524 #deploys osm pods and services
525 function deploy_osm_services() {
526 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
527 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
528 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
529 }
530
531 #deploy charmed services
532 function deploy_charmed_services() {
533 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
534 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
535 juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME
536 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
537 }
538
539 function deploy_osm_pla_service() {
540 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
541 # corresponding to namespace_vol
542 sudo sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
543 # corresponding to deploy_osm_services
544 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
545 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
546 }
547
548 function parse_yaml() {
549 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
550 TAG=$1
551 shift
552 services=$@
553 for module in $services; do
554 if [ "$module" == "pla" ]; then
555 if [ -n "$INSTALL_PLA" ]; then
556 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
557 sudo sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
558 fi
559 else
560 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
561 sudo sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
562 fi
563 done
564 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
565 }
566
567 function update_manifest_files() {
568 osm_services="nbi lcm ro pol mon ng-ui keystone pla prometheus"
569 list_of_services=""
570 for module in $osm_services; do
571 module_upper="${module^^}"
572 if ! echo $TO_REBUILD | grep -q $module_upper ; then
573 list_of_services="$list_of_services $module"
574 fi
575 done
576 if [ ! "$OSM_DOCKER_TAG" == "11" ]; then
577 parse_yaml $OSM_DOCKER_TAG $list_of_services
578 fi
579 if [ -n "$MODULE_DOCKER_TAG" ]; then
580 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
581 fi
582 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
583 }
584
585 function namespace_vol() {
586 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
587 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
588 for osm in $osm_services; do
589 sudo sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
590 done
591 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
592 }
593
594 function add_local_k8scluster() {
595 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
596 /usr/bin/osm --all-projects vim-create \
597 --name _system-osm-vim \
598 --account_type dummy \
599 --auth_url http://dummy \
600 --user osm --password osm --tenant osm \
601 --description "dummy" \
602 --config '{management_network_name: mgmt}'
603 /usr/bin/osm --all-projects k8scluster-add \
604 --creds ${HOME}/.kube/config \
605 --vim _system-osm-vim \
606 --k8s-nets '{"net1": null}' \
607 --version '1.15' \
608 --description "OSM Internal Cluster" \
609 _system-osm-k8s
610 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
611 }
612
613 function configure_apt_proxy() {
614 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
615 OSM_APT_PROXY=$1
616 OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt"
617 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
618 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
619 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
620 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
621 EOF"
622 else
623 sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
624 fi
625 sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
626 track prereq apt_proxy_configured_ok
627 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
628 }
629
630 function install_osm() {
631 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
632
633 trap ctrl_c INT
634 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
635
636 track checks checkingroot_ok
637 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
638 track checks noroot_ok
639
640 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
641 1. Install and configure LXD
642 2. Install juju
643 3. Install docker CE
644 4. Disable swap space
645 5. Install and initialize Kubernetes
646 as pre-requirements.
647 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
648 track checks proceed_ok
649
650 echo "Installing OSM"
651
652 echo "Determining IP address of the interface with the default route"
653 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
654 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
655 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
656 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
657 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
658
659 # configure apt proxy
660 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
661
662 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
663 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
664 install_lxd
665 fi
666
667 track prereq prereqok_ok
668
669 if [ ! -n "$INSTALL_NODOCKER" ]; then
670 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
671 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
672 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
673 fi
674
675 track docker_ce docker_ce_ok
676
677 echo "Creating folders for installation"
678 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR
679 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
680 sudo cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
681
682 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh -i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
683 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
684 track k8scluster k8scluster_ok
685
686 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
687 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
688 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
689 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
690 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
691 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
692 set_vca_variables
693 track juju juju_ok
694
695 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
696 OSM_DATABASE_COMMONKEY=$(generate_secret)
697 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
698 fi
699
700 # Deploy OSM services
701 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
702 track docker_images docker_images_ok
703
704 generate_k8s_manifest_files
705 track osm_files manifest_files_ok
706 generate_docker_env_files
707 track osm_files env_files_ok
708
709 deploy_charmed_services
710 track deploy_osm deploy_charmed_services_ok
711 kube_secrets
712 track deploy_osm kube_secrets_ok
713 update_manifest_files
714 track deploy_osm update_manifest_files_ok
715 namespace_vol
716 track deploy_osm namespace_vol_ok
717 deploy_osm_services
718 track deploy_osm deploy_osm_services_k8s_ok
719 if [ -n "$INSTALL_PLA"]; then
720 # optional PLA install
721 deploy_osm_pla_service
722 track deploy_osm deploy_osm_pla_ok
723 fi
724 if [ -n "$INSTALL_K8S_MONITOR" ]; then
725 # install OSM MONITORING
726 install_k8s_monitoring
727 track deploy_osm install_k8s_monitoring_ok
728 fi
729
730 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
731 track osmclient osmclient_ok
732
733 echo -e "Checking OSM health state..."
734 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
735 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
736 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
737 track healthchecks osm_unhealthy didnotconverge)
738 track healthchecks after_healthcheck_ok
739
740 add_local_k8scluster
741 track final_ops add_local_k8scluster_ok
742
743 wget -q -O- https://osm-download.etsi.org/ftp/osm-11.0-eleven/README2.txt &> /dev/null
744 track end
745 sudo find /etc/osm
746 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
747 return 0
748 }
749
750 function install_to_openstack() {
751 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
752
753 if [ -z "$2" ]; then
754 FATAL "OpenStack installer requires a valid external network name"
755 fi
756
757 # Install Pip for Python3
758 sudo apt install -y python3-pip python3-venv
759 sudo -H LC_ALL=C python3 -m pip install -U pip
760
761 # Create a venv to avoid conflicts with the host installation
762 python3 -m venv $OPENSTACK_PYTHON_VENV
763
764 source $OPENSTACK_PYTHON_VENV/bin/activate
765
766 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
767 python -m pip install -U wheel
768 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
769
770 # Install the Openstack cloud module (ansible>=2.10)
771 ansible-galaxy collection install openstack.cloud
772
773 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
774
775 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
776
777 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
778
779 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
780 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
781 fi
782
783 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
784 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
785 fi
786
787 # Execute the Ansible playbook based on openrc or clouds.yaml
788 if [ -e "$1" ]; then
789 . $1
790 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
791 $OSM_DEVOPS/installers/openstack/site.yml
792 else
793 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
794 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
795 fi
796
797 # Exit from venv
798 deactivate
799
800 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
801 return 0
802 }
803
804 function install_k8s_monitoring() {
805 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
806 # install OSM monitoring
807 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
808 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
809 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
810 }
811
812 function dump_vars(){
813 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
814 echo "APT_PROXY_URL=$APT_PROXY_URL"
815 echo "DEVELOP=$DEVELOP"
816 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
817 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
818 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
819 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
820 echo "DOCKER_USER=$DOCKER_USER"
821 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
822 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
823 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
824 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
825 echo "INSTALL_LXD=$INSTALL_LXD"
826 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
827 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
828 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
829 echo "INSTALL_ONLY=$INSTALL_ONLY"
830 echo "INSTALL_PLA=$INSTALL_PLA"
831 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
832 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
833 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
834 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
835 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
836 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
837 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
838 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
839 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
840 echo "OSM_DEVOPS=$OSM_DEVOPS"
841 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
842 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
843 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
844 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
845 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
846 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
847 echo "PULL_IMAGES=$PULL_IMAGES"
848 echo "RECONFIGURE=$RECONFIGURE"
849 echo "RELEASE=$RELEASE"
850 echo "REPOSITORY=$REPOSITORY"
851 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
852 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
853 echo "SHOWOPTS=$SHOWOPTS"
854 echo "TEST_INSTALLER=$TEST_INSTALLER"
855 echo "TO_REBUILD=$TO_REBUILD"
856 echo "UNINSTALL=$UNINSTALL"
857 echo "UPDATE=$UPDATE"
858 echo "Install from specific refspec (-b): $COMMIT_ID"
859 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
860 }
861
862 function parse_docker_registry_url() {
863 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
864 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
865 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
866 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
867 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
868 }
869
870 function ctrl_c() {
871 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
872 echo "** Trapped CTRL-C"
873 FATAL "User stopped the installation"
874 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
875 }
876
877 LXD_VERSION=4.0
878 JUJU_VERSION=2.9
879 JUJU_AGENT_VERSION=2.9.22
880 UNINSTALL=""
881 DEVELOP=""
882 UPDATE=""
883 RECONFIGURE=""
884 TEST_INSTALLER=""
885 INSTALL_LXD=""
886 SHOWOPTS=""
887 COMMIT_ID=""
888 ASSUME_YES=""
889 APT_PROXY_URL=""
890 INSTALL_FROM_SOURCE=""
891 DEBUG_INSTALL=""
892 RELEASE="ReleaseTEN"
893 REPOSITORY="stable"
894 INSTALL_VIMEMU=""
895 INSTALL_PLA=""
896 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
897 LXD_REPOSITORY_PATH=""
898 INSTALL_LIGHTWEIGHT="y"
899 INSTALL_TO_OPENSTACK=""
900 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
901 OPENSTACK_PUBLIC_NET_NAME=""
902 OPENSTACK_ATTACH_VOLUME="false"
903 OPENSTACK_SSH_KEY_FILE=""
904 OPENSTACK_USERDATA_FILE=""
905 OPENSTACK_VM_NAME="server-osm"
906 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
907 INSTALL_ONLY=""
908 TO_REBUILD=""
909 INSTALL_NOLXD=""
910 INSTALL_NODOCKER=""
911 INSTALL_NOJUJU=""
912 INSTALL_K8S_MONITOR=""
913 INSTALL_NOHOSTCLIENT=""
914 INSTALL_CACHELXDIMAGES=""
915 OSM_DEVOPS=
916 OSM_VCA_HOST=
917 OSM_VCA_SECRET=
918 OSM_VCA_PUBKEY=
919 OSM_VCA_CLOUDNAME="localhost"
920 OSM_VCA_K8S_CLOUDNAME="k8scloud"
921 OSM_STACK_NAME=osm
922 NO_HOST_PORTS=""
923 DOCKER_NOBUILD=""
924 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
925 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
926 OSM_WORK_DIR="/etc/osm"
927 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
928 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
929 OSM_HOST_VOL="/var/lib/osm"
930 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
931 OSM_DOCKER_TAG=latest
932 DOCKER_USER=opensourcemano
933 PULL_IMAGES="y"
934 KAFKA_TAG=2.11-1.0.2
935 PROMETHEUS_TAG=v2.4.3
936 GRAFANA_TAG=latest
937 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
938 PROMETHEUS_CADVISOR_TAG=latest
939 KEYSTONEDB_TAG=10
940 OSM_DATABASE_COMMONKEY=
941 ELASTIC_VERSION=6.4.2
942 ELASTIC_CURATOR_VERSION=5.5.4
943 POD_NETWORK_CIDR=10.244.0.0/16
944 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
945 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
946 DOCKER_REGISTRY_URL=
947 DOCKER_PROXY_URL=
948 MODULE_DOCKER_TAG=
949 OSM_INSTALLATION_TYPE="Default"
950
951 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
952 case "${o}" in
953 a)
954 APT_PROXY_URL=${OPTARG}
955 ;;
956 b)
957 COMMIT_ID=${OPTARG}
958 PULL_IMAGES=""
959 ;;
960 r)
961 REPOSITORY="${OPTARG}"
962 REPO_ARGS+=(-r "$REPOSITORY")
963 ;;
964 k)
965 REPOSITORY_KEY="${OPTARG}"
966 REPO_ARGS+=(-k "$REPOSITORY_KEY")
967 ;;
968 u)
969 REPOSITORY_BASE="${OPTARG}"
970 REPO_ARGS+=(-u "$REPOSITORY_BASE")
971 ;;
972 R)
973 RELEASE="${OPTARG}"
974 REPO_ARGS+=(-R "$RELEASE")
975 ;;
976 D)
977 OSM_DEVOPS="${OPTARG}"
978 ;;
979 o)
980 INSTALL_ONLY="y"
981 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
982 ;;
983 O)
984 INSTALL_TO_OPENSTACK="y"
985 if [ -n "${OPTARG}" ]; then
986 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
987 else
988 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
989 usage && exit 1
990 fi
991 ;;
992 f)
993 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
994 ;;
995 F)
996 OPENSTACK_USERDATA_FILE="${OPTARG}"
997 ;;
998 N)
999 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1000 ;;
1001 m)
1002 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1003 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1004 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1005 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1006 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1007 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1008 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1009 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1010 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1011 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1012 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1013 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1014 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1015 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1016 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1017 ;;
1018 H)
1019 OSM_VCA_HOST="${OPTARG}"
1020 ;;
1021 S)
1022 OSM_VCA_SECRET="${OPTARG}"
1023 ;;
1024 s)
1025 OSM_STACK_NAME="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1026 ;;
1027 t)
1028 OSM_DOCKER_TAG="${OPTARG}"
1029 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1030 ;;
1031 U)
1032 DOCKER_USER="${OPTARG}"
1033 ;;
1034 P)
1035 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1036 ;;
1037 A)
1038 OSM_VCA_APIPROXY="${OPTARG}"
1039 ;;
1040 l)
1041 LXD_CLOUD_FILE="${OPTARG}"
1042 ;;
1043 L)
1044 LXD_CRED_FILE="${OPTARG}"
1045 ;;
1046 K)
1047 CONTROLLER_NAME="${OPTARG}"
1048 ;;
1049 d)
1050 DOCKER_REGISTRY_URL="${OPTARG}"
1051 ;;
1052 p)
1053 DOCKER_PROXY_URL="${OPTARG}"
1054 ;;
1055 T)
1056 MODULE_DOCKER_TAG="${OPTARG}"
1057 ;;
1058 -)
1059 [ "${OPTARG}" == "help" ] && usage && exit 0
1060 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1061 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
1062 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1063 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1064 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1065 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1066 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1067 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1068 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1069 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1070 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1071 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1072 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="--nojuju" && continue
1073 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1074 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1075 [ "${OPTARG}" == "pullimages" ] && continue
1076 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1077 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
1078 [ "${OPTARG}" == "bundle" ] && continue
1079 [ "${OPTARG}" == "k8s" ] && continue
1080 [ "${OPTARG}" == "lxd" ] && continue
1081 [ "${OPTARG}" == "lxd-cred" ] && continue
1082 [ "${OPTARG}" == "microstack" ] && continue
1083 [ "${OPTARG}" == "overlay" ] && continue
1084 [ "${OPTARG}" == "only-vca" ] && continue
1085 [ "${OPTARG}" == "vca" ] && continue
1086 [ "${OPTARG}" == "ha" ] && continue
1087 [ "${OPTARG}" == "tag" ] && continue
1088 [ "${OPTARG}" == "registry" ] && continue
1089 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1090 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1091 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1092 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
1093 echo -e "Invalid option: '--$OPTARG'\n" >&2
1094 usage && exit 1
1095 ;;
1096 :)
1097 echo "Option -$OPTARG requires an argument" >&2
1098 usage && exit 1
1099 ;;
1100 \?)
1101 echo -e "Invalid option: '-$OPTARG'\n" >&2
1102 usage && exit 1
1103 ;;
1104 h)
1105 usage && exit 0
1106 ;;
1107 y)
1108 ASSUME_YES="y"
1109 ;;
1110 *)
1111 usage && exit 1
1112 ;;
1113 esac
1114 done
1115
1116 source $OSM_DEVOPS/common/all_funcs
1117
1118 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1119 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
1120
1121 # Uninstall if "--uninstall"
1122 if [ -n "$UNINSTALL" ]; then
1123 if [ -n "$CHARMED" ]; then
1124 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
1125 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1126 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
1127 else
1128 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
1129 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
1130 fi
1131 echo -e "\nDONE"
1132 exit 0
1133 fi
1134
1135 # Charmed installation
1136 if [ -n "$CHARMED" ]; then
1137 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1138 FATAL_TRACK charmed_install "charmed_install.sh failed"
1139 echo -e "\nDONE"
1140 exit 0
1141 fi
1142
1143 # Installation to Openstack
1144 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1145 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1146 echo -e "\nDONE"
1147 exit 0
1148 fi
1149
1150 # Community_installer
1151 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1152 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1153 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1154
1155 # if develop, we force master
1156 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1157
1158 check_packages "git wget curl tar snapd"
1159
1160 sudo snap install jq
1161 if [ -z "$OSM_DEVOPS" ]; then
1162 if [ -n "$TEST_INSTALLER" ]; then
1163 echo -e "\nUsing local devops repo for OSM installation"
1164 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1165 else
1166 echo -e "\nCreating temporary dir for OSM installation"
1167 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1168 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1169
1170 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1171
1172 if [ -z "$COMMIT_ID" ]; then
1173 echo -e "\nGuessing the current stable release"
1174 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1175 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1176
1177 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1178 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1179 else
1180 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1181 fi
1182 git -C $OSM_DEVOPS checkout $COMMIT_ID
1183 fi
1184 fi
1185
1186 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1187 OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1188 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1189 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1190
1191 #Installation starts here
1192 wget -q -O- https://osm-download.etsi.org/ftp/osm-11.0-eleven/README.txt &> /dev/null
1193 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1194 install_osm
1195 echo -e "\nDONE"
1196 exit 0