Bug 1530: Limit IP addresses
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 function usage(){
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " OPTIONS"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " ..."
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
81 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
82 echo -e " [--tag]: Docker image tag. (--charmed option)"
83 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
84 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
85 }
86
87 # takes a juju/accounts.yaml file and returns the password specific
88 # for a controller. I wrote this using only bash tools to minimize
89 # additions of other packages
90 function parse_juju_password {
91 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
92 password_file="${HOME}/.local/share/juju/accounts.yaml"
93 local controller_name=$1
94 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
95 sed -ne "s|^\($s\):|\1|" \
96 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
97 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
98 awk -F$fs -v controller=$controller_name '{
99 indent = length($1)/2;
100 vname[indent] = $2;
101 for (i in vname) {if (i > indent) {delete vname[i]}}
102 if (length($3) > 0) {
103 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
104 if (match(vn,controller) && match($2,"password")) {
105 printf("%s",$3);
106 }
107 }
108 }'
109 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
110 }
111
112 function set_vca_variables() {
113 OSM_VCA_CLOUDNAME="lxd-cloud"
114 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME="localhost"
115 if [ -z "$OSM_VCA_HOST" ]; then
116 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
119 fi
120 if [ -z "$OSM_VCA_SECRET" ]; then
121 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
122 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
123 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
124 fi
125 if [ -z "$OSM_VCA_PUBKEY" ]; then
126 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
127 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
128 fi
129 if [ -z "$OSM_VCA_CACERT" ]; then
130 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
131 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
132 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
133 fi
134 }
135
136 function generate_secret() {
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
138 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
139 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
140 }
141
142 function check_packages() {
143 NEEDED_PACKAGES="$1"
144 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
145 for PACKAGE in ${NEEDED_PACKAGES} ; do
146 dpkg -L ${PACKAGE} &>/dev/null
147 if [ $? -ne 0 ]; then
148 echo -e "Package ${PACKAGE} is not installed. Updating apt"
149 sudo apt-get update
150 sudo apt-get install -y ${PACKAGE} || FATAL "failed to install ${PACKAGE}"
151 fi
152 done
153 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
154 }
155
156 function install_lxd() {
157 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
158 # Apply sysctl production values for optimal performance
159 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
160 sudo sysctl --system
161
162 # Install LXD snap
163 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
164 sudo snap install lxd --channel $LXD_VERSION/stable
165
166 # Configure LXD
167 sudo usermod -a -G lxd `whoami`
168 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
169 sg lxd -c "lxd waitready"
170 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
171 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
172 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
173 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
174 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
175 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
176 #sudo systemctl stop lxd-bridge
177 #sudo systemctl --system daemon-reload
178 #sudo systemctl enable lxd-bridge
179 #sudo systemctl start lxd-bridge
180 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
181 }
182
183 function ask_user(){
184 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
185 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
186 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
187 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
188 read -e -p "$1" USER_CONFIRMATION
189 while true ; do
190 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
191 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
192 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
193 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
194 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
195 done
196 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
197 }
198
199 function install_osmclient(){
200 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
201 CLIENT_RELEASE=${RELEASE#"-R "}
202 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
203 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
204 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
205 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
206 curl $key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 apt-key add -
207 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
208 sudo apt-get update
209 sudo apt-get install -y python3-pip
210 sudo -H LC_ALL=C python3 -m pip install -U pip
211 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
212 sudo apt-get install -y python3-osm-im python3-osmclient
213 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
214 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
215 fi
216 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
217 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
218 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
219 fi
220 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
221 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
222 echo -e "\nOSM client installed"
223 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
224 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
225 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
226 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
227 else
228 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
229 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
230 echo " export OSM_HOSTNAME=<OSM_host>"
231 fi
232 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
233 return 0
234 }
235
236 function docker_login() {
237 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
238 echo "Docker login"
239 [ -z "${DEBUG_INSTALL}" ] || DEBUG "Docker registry user: ${DOCKER_REGISTRY_USER}"
240 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
241 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
242 }
243
244 function generate_docker_images() {
245 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
246 echo "Pulling and generating docker images"
247 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
248
249 echo "Pulling docker images"
250
251 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
252 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
253 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
254 fi
255
256 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
257 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
258 fi
259
260 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
261 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
262 fi
263
264 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
265 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
266 fi
267
268 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
269 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
270 fi
271
272 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
273 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
274 fi
275
276 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
277 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
278 fi
279
280 if [ -n "$PULL_IMAGES" ]; then
281 echo "Pulling OSM docker images"
282 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
283 module_lower=${module,,}
284 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
285 continue
286 fi
287 module_tag="${OSM_DOCKER_TAG}"
288 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
289 module_tag="${MODULE_DOCKER_TAG}"
290 fi
291 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
292 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
293 done
294 else
295 _build_from=$COMMIT_ID
296 [ -z "$_build_from" ] && _build_from="latest"
297 echo "OSM Docker images generated from $_build_from"
298 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
299 trap 'rm -rf "${LWTEMPDIR}"' EXIT
300 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
301 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
302 module_lower=${module,,}
303 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
304 continue
305 fi
306 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
307 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
308 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
309 fi
310 done
311 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
312 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
313 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
314 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
315 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
316 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
317 fi
318 echo "Finished generation of docker images"
319 fi
320
321 echo "Finished pulling and generating docker images"
322 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
323 }
324
325 function cmp_overwrite() {
326 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
327 file1="$1"
328 file2="$2"
329 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
330 if [ -f "${file2}" ]; then
331 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
332 else
333 cp -b ${file1} ${file2}
334 fi
335 fi
336 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
337 }
338
339 function generate_k8s_manifest_files() {
340 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
341 #Kubernetes resources
342 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
343 sudo rm -f $OSM_K8S_WORK_DIR/mongo.yaml
344 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
345 }
346
347 function generate_prometheus_grafana_files() {
348 #this only works with docker swarm
349 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
350 # Prometheus files
351 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
352 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
353
354 # Grafana files
355 sudo mkdir -p $OSM_DOCKER_WORK_DIR/grafana
356 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
357 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
358 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
359 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
360
361 # Prometheus Exporters files
362 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
363 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
364 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
365 }
366
367 function generate_docker_env_files() {
368 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
369 echo "Doing a backup of existing env files"
370 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
371 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
372 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
373 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
374 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
375 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
376 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
377 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
378
379 echo "Generating docker env files"
380 # LCM
381 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
382 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
383 fi
384
385 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
386 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
387 else
388 sudo sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
389 fi
390
391 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
392 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
393 else
394 sudo sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
395 fi
396
397 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
398 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
399 else
400 sudo sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
401 fi
402
403 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
404 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
405 else
406 sudo sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
407 fi
408
409 if [ -n "$OSM_VCA_APIPROXY" ]; then
410 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
411 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
412 else
413 sudo sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
414 fi
415 fi
416
417 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
418 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
419 fi
420
421 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
422 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
423 fi
424
425 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
426 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
427 else
428 sudo sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
429 fi
430
431 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
432 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
433 else
434 sudo sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
435 fi
436
437 # RO
438 MYSQL_ROOT_PASSWORD=$(generate_secret)
439 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
440 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro-db.env
441 fi
442 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
443 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/ro.env
444 fi
445 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
446 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/ro.env
447 fi
448
449 # Keystone
450 KEYSTONE_DB_PASSWORD=$(generate_secret)
451 SERVICE_PASSWORD=$(generate_secret)
452 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
453 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone-db.env
454 fi
455 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
456 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/keystone.env
457 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env
458 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee -a $OSM_DOCKER_WORK_DIR/keystone.env
459 fi
460
461 # NBI
462 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
463 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo tee $OSM_DOCKER_WORK_DIR/nbi.env
464 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/nbi.env
465 fi
466
467 # MON
468 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
469 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
470 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
471 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
472 fi
473
474 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
475 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
476 else
477 sudo sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
478 fi
479
480 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
481 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
482 else
483 sudo sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
484 fi
485
486 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
487 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
488 else
489 sudo sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
490 fi
491
492 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
493 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/mon.env
494 else
495 sudo sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
496 fi
497
498
499 # POL
500 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
501 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo tee -a $OSM_DOCKER_WORK_DIR/pol.env
502 fi
503
504 echo "Finished generation of docker env files"
505 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
506 }
507
508 #creates secrets from env files which will be used by containers
509 function kube_secrets(){
510 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
511 kubectl create ns $OSM_STACK_NAME
512 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
513 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
514 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
515 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
516 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
517 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
518 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
519 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
520 }
521
522 #deploys osm pods and services
523 function deploy_osm_services() {
524 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
525 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
526 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
527 }
528
529 #deploy charmed services
530 function deploy_charmed_services() {
531 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
532 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
533 juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME
534 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
535 }
536
537 function deploy_osm_pla_service() {
538 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
539 # corresponding to namespace_vol
540 sudo sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
541 # corresponding to deploy_osm_services
542 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
543 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
544 }
545
546 function parse_yaml() {
547 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
548 TAG=$1
549 shift
550 services=$@
551 for module in $services; do
552 if [ "$module" == "pla" ]; then
553 if [ -n "$INSTALL_PLA" ]; then
554 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
555 sudo sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
556 fi
557 else
558 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
559 sudo sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
560 fi
561 done
562 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
563 }
564
565 function update_manifest_files() {
566 osm_services="nbi lcm ro pol mon ng-ui keystone pla prometheus"
567 list_of_services=""
568 for module in $osm_services; do
569 module_upper="${module^^}"
570 if ! echo $TO_REBUILD | grep -q $module_upper ; then
571 list_of_services="$list_of_services $module"
572 fi
573 done
574 if [ ! "$OSM_DOCKER_TAG" == "11" ]; then
575 parse_yaml $OSM_DOCKER_TAG $list_of_services
576 fi
577 if [ -n "$MODULE_DOCKER_TAG" ]; then
578 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
579 fi
580 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
581 }
582
583 function namespace_vol() {
584 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
585 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
586 for osm in $osm_services; do
587 sudo sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
588 done
589 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
590 }
591
592 function add_local_k8scluster() {
593 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
594 /usr/bin/osm --all-projects vim-create \
595 --name _system-osm-vim \
596 --account_type dummy \
597 --auth_url http://dummy \
598 --user osm --password osm --tenant osm \
599 --description "dummy" \
600 --config '{management_network_name: mgmt}'
601 /usr/bin/osm --all-projects k8scluster-add \
602 --creds ${HOME}/.kube/config \
603 --vim _system-osm-vim \
604 --k8s-nets '{"net1": null}' \
605 --version '1.15' \
606 --description "OSM Internal Cluster" \
607 _system-osm-k8s
608 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
609 }
610
611 function configure_apt_proxy() {
612 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
613 OSM_APT_PROXY=$1
614 OSM_APT_PROXY_FILE="/etc/apt/apt.conf.d/osm-apt"
615 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
616 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
617 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
618 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
619 EOF"
620 else
621 sudo sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
622 fi
623 sudo apt-get update || FATAL "Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
624 track prereq apt_proxy_configured_ok
625 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
626 }
627
628 function install_osm() {
629 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
630 track checks checkingroot_ok
631 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
632 track checks noroot_ok
633
634 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
635 1. Install and configure LXD
636 2. Install juju
637 3. Install docker CE
638 4. Disable swap space
639 5. Install and initialize Kubernetes
640 as pre-requirements.
641 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
642 track checks proceed_ok
643
644 echo "Installing OSM"
645
646 echo "Determining IP address of the interface with the default route"
647 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
648 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
649 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
650 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
651 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
652
653 # configure apt proxy
654 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
655
656 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
657 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
658 check_packages "snapd"
659 install_lxd
660 fi
661
662 track prereq prereqok_ok
663
664 if [ ! -n "$INSTALL_NODOCKER" ]; then
665 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
666 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
667 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
668 fi
669
670 track docker_ce docker_ce_ok
671
672 echo "Creating folders for installation"
673 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR
674 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
675 sudo cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
676
677 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh -i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
678 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
679 track k8scluster k8scluster_ok
680
681 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
682 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
683 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
684 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
685 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
686 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
687 set_vca_variables
688 track juju juju_ok
689
690 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
691 OSM_DATABASE_COMMONKEY=$(generate_secret)
692 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
693 fi
694
695 # Deploy OSM services
696 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
697 track docker_images docker_images_ok
698
699 generate_k8s_manifest_files
700 track osm_files manifest_files_ok
701 generate_docker_env_files
702 track osm_files env_files_ok
703
704 deploy_charmed_services
705 track deploy_osm deploy_charmed_services_ok
706 kube_secrets
707 track deploy_osm kube_secrets_ok
708 update_manifest_files
709 track deploy_osm update_manifest_files_ok
710 namespace_vol
711 track deploy_osm namespace_vol_ok
712 deploy_osm_services
713 track deploy_osm deploy_osm_services_k8s_ok
714 if [ -n "$INSTALL_PLA"]; then
715 # optional PLA install
716 deploy_osm_pla_service
717 track deploy_osm deploy_osm_pla_ok
718 fi
719 if [ -n "$INSTALL_K8S_MONITOR" ]; then
720 # install OSM MONITORING
721 install_k8s_monitoring
722 track deploy_osm install_k8s_monitoring_ok
723 fi
724
725 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
726 track osmclient osmclient_ok
727
728 echo -e "Checking OSM health state..."
729 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
730 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
731 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
732 track healthchecks osm_unhealthy didnotconverge)
733 track healthchecks after_healthcheck_ok
734
735 add_local_k8scluster
736 track final_ops add_local_k8scluster_ok
737
738 wget -q -O- https://osm-download.etsi.org/ftp/osm-11.0-eleven/README2.txt &> /dev/null
739 track end
740 sudo find /etc/osm
741 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
742 return 0
743 }
744
745 function install_to_openstack() {
746 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
747
748 if [ -z "$2" ]; then
749 FATAL "OpenStack installer requires a valid external network name"
750 fi
751
752 # Install Pip for Python3
753 sudo apt install -y python3-pip python3-venv
754 sudo -H LC_ALL=C python3 -m pip install -U pip
755
756 # Create a venv to avoid conflicts with the host installation
757 python3 -m venv $OPENSTACK_PYTHON_VENV
758
759 source $OPENSTACK_PYTHON_VENV/bin/activate
760
761 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
762 python -m pip install -U wheel
763 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
764
765 # Install the Openstack cloud module (ansible>=2.10)
766 ansible-galaxy collection install openstack.cloud
767
768 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
769
770 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
771
772 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
773
774 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
775 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
776 fi
777
778 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
779 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
780 fi
781
782 # Execute the Ansible playbook based on openrc or clouds.yaml
783 if [ -e "$1" ]; then
784 . $1
785 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
786 $OSM_DEVOPS/installers/openstack/site.yml
787 else
788 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
789 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
790 fi
791
792 # Exit from venv
793 deactivate
794
795 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
796 return 0
797 }
798
799 function install_k8s_monitoring() {
800 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
801 # install OSM monitoring
802 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
803 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
804 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
805 }
806
807 function dump_vars(){
808 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
809 echo "APT_PROXY_URL=$APT_PROXY_URL"
810 echo "DEVELOP=$DEVELOP"
811 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
812 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
813 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
814 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
815 echo "DOCKER_USER=$DOCKER_USER"
816 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
817 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
818 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
819 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
820 echo "INSTALL_LXD=$INSTALL_LXD"
821 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
822 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
823 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
824 echo "INSTALL_ONLY=$INSTALL_ONLY"
825 echo "INSTALL_PLA=$INSTALL_PLA"
826 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
827 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
828 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
829 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
830 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
831 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
832 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
833 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
834 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
835 echo "OSM_DEVOPS=$OSM_DEVOPS"
836 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
837 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
838 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
839 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
840 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
841 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
842 echo "PULL_IMAGES=$PULL_IMAGES"
843 echo "RECONFIGURE=$RECONFIGURE"
844 echo "RELEASE=$RELEASE"
845 echo "REPOSITORY=$REPOSITORY"
846 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
847 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
848 echo "SHOWOPTS=$SHOWOPTS"
849 echo "TEST_INSTALLER=$TEST_INSTALLER"
850 echo "TO_REBUILD=$TO_REBUILD"
851 echo "UNINSTALL=$UNINSTALL"
852 echo "UPDATE=$UPDATE"
853 echo "Install from specific refspec (-b): $COMMIT_ID"
854 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
855 }
856
857 function parse_docker_registry_url() {
858 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
859 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
860 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
861 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
862 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
863 }
864
865 LXD_VERSION=4.0
866 JUJU_VERSION=2.9
867 JUJU_AGENT_VERSION=2.9.17
868 UNINSTALL=""
869 DEVELOP=""
870 UPDATE=""
871 RECONFIGURE=""
872 TEST_INSTALLER=""
873 INSTALL_LXD=""
874 SHOWOPTS=""
875 COMMIT_ID=""
876 ASSUME_YES=""
877 APT_PROXY_URL=""
878 INSTALL_FROM_SOURCE=""
879 DEBUG_INSTALL=""
880 RELEASE="ReleaseTEN"
881 REPOSITORY="stable"
882 INSTALL_VIMEMU=""
883 INSTALL_PLA=""
884 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
885 LXD_REPOSITORY_PATH=""
886 INSTALL_LIGHTWEIGHT="y"
887 INSTALL_TO_OPENSTACK=""
888 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
889 OPENSTACK_PUBLIC_NET_NAME=""
890 OPENSTACK_ATTACH_VOLUME="false"
891 OPENSTACK_SSH_KEY_FILE=""
892 OPENSTACK_USERDATA_FILE=""
893 OPENSTACK_VM_NAME="server-osm"
894 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
895 INSTALL_ONLY=""
896 TO_REBUILD=""
897 INSTALL_NOLXD=""
898 INSTALL_NODOCKER=""
899 INSTALL_NOJUJU=""
900 INSTALL_K8S_MONITOR=""
901 INSTALL_NOHOSTCLIENT=""
902 INSTALL_CACHELXDIMAGES=""
903 OSM_DEVOPS=
904 OSM_VCA_HOST=
905 OSM_VCA_SECRET=
906 OSM_VCA_PUBKEY=
907 OSM_VCA_CLOUDNAME="localhost"
908 OSM_VCA_K8S_CLOUDNAME="k8scloud"
909 OSM_STACK_NAME=osm
910 NO_HOST_PORTS=""
911 DOCKER_NOBUILD=""
912 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
913 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
914 OSM_WORK_DIR="/etc/osm"
915 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
916 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
917 OSM_HOST_VOL="/var/lib/osm"
918 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
919 OSM_DOCKER_TAG=latest
920 DOCKER_USER=opensourcemano
921 PULL_IMAGES="y"
922 KAFKA_TAG=2.11-1.0.2
923 PROMETHEUS_TAG=v2.4.3
924 GRAFANA_TAG=latest
925 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
926 PROMETHEUS_CADVISOR_TAG=latest
927 KEYSTONEDB_TAG=10
928 OSM_DATABASE_COMMONKEY=
929 ELASTIC_VERSION=6.4.2
930 ELASTIC_CURATOR_VERSION=5.5.4
931 POD_NETWORK_CIDR=10.244.0.0/16
932 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
933 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
934 DOCKER_REGISTRY_URL=
935 DOCKER_PROXY_URL=
936 MODULE_DOCKER_TAG=
937 OSM_INSTALLATION_TYPE="Default"
938
939 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
940 case "${o}" in
941 a)
942 APT_PROXY_URL=${OPTARG}
943 ;;
944 b)
945 COMMIT_ID=${OPTARG}
946 PULL_IMAGES=""
947 ;;
948 r)
949 REPOSITORY="${OPTARG}"
950 REPO_ARGS+=(-r "$REPOSITORY")
951 ;;
952 k)
953 REPOSITORY_KEY="${OPTARG}"
954 REPO_ARGS+=(-k "$REPOSITORY_KEY")
955 ;;
956 u)
957 REPOSITORY_BASE="${OPTARG}"
958 REPO_ARGS+=(-u "$REPOSITORY_BASE")
959 ;;
960 R)
961 RELEASE="${OPTARG}"
962 REPO_ARGS+=(-R "$RELEASE")
963 ;;
964 D)
965 OSM_DEVOPS="${OPTARG}"
966 ;;
967 o)
968 INSTALL_ONLY="y"
969 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
970 ;;
971 O)
972 INSTALL_TO_OPENSTACK="y"
973 if [ -n "${OPTARG}" ]; then
974 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
975 else
976 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
977 usage && exit 1
978 fi
979 ;;
980 f)
981 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
982 ;;
983 F)
984 OPENSTACK_USERDATA_FILE="${OPTARG}"
985 ;;
986 N)
987 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
988 ;;
989 m)
990 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
991 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
992 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
993 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
994 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
995 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
996 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
997 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
998 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
999 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1000 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1001 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1002 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1003 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1004 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1005 ;;
1006 H)
1007 OSM_VCA_HOST="${OPTARG}"
1008 ;;
1009 S)
1010 OSM_VCA_SECRET="${OPTARG}"
1011 ;;
1012 s)
1013 OSM_STACK_NAME="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1014 ;;
1015 t)
1016 OSM_DOCKER_TAG="${OPTARG}"
1017 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1018 ;;
1019 U)
1020 DOCKER_USER="${OPTARG}"
1021 ;;
1022 P)
1023 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1024 ;;
1025 A)
1026 OSM_VCA_APIPROXY="${OPTARG}"
1027 ;;
1028 l)
1029 LXD_CLOUD_FILE="${OPTARG}"
1030 ;;
1031 L)
1032 LXD_CRED_FILE="${OPTARG}"
1033 ;;
1034 K)
1035 CONTROLLER_NAME="${OPTARG}"
1036 ;;
1037 d)
1038 DOCKER_REGISTRY_URL="${OPTARG}"
1039 ;;
1040 p)
1041 DOCKER_PROXY_URL="${OPTARG}"
1042 ;;
1043 T)
1044 MODULE_DOCKER_TAG="${OPTARG}"
1045 ;;
1046 -)
1047 [ "${OPTARG}" == "help" ] && usage && exit 0
1048 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1049 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
1050 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1051 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1052 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1053 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1054 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1055 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1056 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1057 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1058 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1059 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1060 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="--nojuju" && continue
1061 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1062 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1063 [ "${OPTARG}" == "pullimages" ] && continue
1064 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1065 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
1066 [ "${OPTARG}" == "bundle" ] && continue
1067 [ "${OPTARG}" == "k8s" ] && continue
1068 [ "${OPTARG}" == "lxd" ] && continue
1069 [ "${OPTARG}" == "lxd-cred" ] && continue
1070 [ "${OPTARG}" == "microstack" ] && continue
1071 [ "${OPTARG}" == "overlay" ] && continue
1072 [ "${OPTARG}" == "only-vca" ] && continue
1073 [ "${OPTARG}" == "vca" ] && continue
1074 [ "${OPTARG}" == "ha" ] && continue
1075 [ "${OPTARG}" == "tag" ] && continue
1076 [ "${OPTARG}" == "registry" ] && continue
1077 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1078 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1079 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1080 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
1081 echo -e "Invalid option: '--$OPTARG'\n" >&2
1082 usage && exit 1
1083 ;;
1084 :)
1085 echo "Option -$OPTARG requires an argument" >&2
1086 usage && exit 1
1087 ;;
1088 \?)
1089 echo -e "Invalid option: '-$OPTARG'\n" >&2
1090 usage && exit 1
1091 ;;
1092 h)
1093 usage && exit 0
1094 ;;
1095 y)
1096 ASSUME_YES="y"
1097 ;;
1098 *)
1099 usage && exit 1
1100 ;;
1101 esac
1102 done
1103
1104 source $OSM_DEVOPS/common/all_funcs
1105
1106 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1107 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
1108
1109 # Uninstall if "--uninstall"
1110 if [ -n "$UNINSTALL" ]; then
1111 if [ -n "$CHARMED" ]; then
1112 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
1113 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1114 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
1115 else
1116 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
1117 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
1118 fi
1119 echo -e "\nDONE"
1120 exit 0
1121 fi
1122
1123 # Charmed installation
1124 if [ -n "$CHARMED" ]; then
1125 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1126 FATAL_TRACK charmed_install "charmed_install.sh failed"
1127 echo -e "\nDONE"
1128 exit 0
1129 fi
1130
1131 # Installation to Openstack
1132 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1133 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1134 echo -e "\nDONE"
1135 exit 0
1136 fi
1137
1138 # Community_installer
1139 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1140 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1141 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1142
1143 # if develop, we force master
1144 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1145
1146 check_packages "git wget curl tar"
1147
1148 sudo snap install jq
1149 if [ -z "$OSM_DEVOPS" ]; then
1150 if [ -n "$TEST_INSTALLER" ]; then
1151 echo -e "\nUsing local devops repo for OSM installation"
1152 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1153 else
1154 echo -e "\nCreating temporary dir for OSM installation"
1155 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1156 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1157
1158 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1159
1160 if [ -z "$COMMIT_ID" ]; then
1161 echo -e "\nGuessing the current stable release"
1162 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1163 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1164
1165 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1166 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1167 else
1168 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1169 fi
1170 git -C $OSM_DEVOPS checkout $COMMIT_ID
1171 fi
1172 fi
1173
1174 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1175 OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1176 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1177 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1178
1179 #Installation starts here
1180 wget -q -O- https://osm-download.etsi.org/ftp/osm-11.0-eleven/README.txt &> /dev/null
1181 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1182
1183 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
1184
1185 install_osm
1186 echo -e "\nDONE"
1187 exit 0
1188