full_install_osm.sh: workaround to fix osmclient dependencies with osm-im
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
54 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
57 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
60 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
61 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
62 echo -e " --showopts: print chosen options and exit (only for debugging)"
63 echo -e " -y: do not prompt for confirmation, assumes yes"
64 echo -e " -h / --help: print this help"
65 }
66
67 #Uninstall OSM: remove containers
68 function uninstall(){
69 echo -e "\nUninstalling OSM"
70 if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
71 $OSM_DEVOPS/jenkins/host/clean_container RO
72 $OSM_DEVOPS/jenkins/host/clean_container VCA
73 $OSM_DEVOPS/jenkins/host/clean_container MON
74 $OSM_DEVOPS/jenkins/host/clean_container SO
75 #$OSM_DEVOPS/jenkins/host/clean_container UI
76 else
77 lxc stop RO && lxc delete RO
78 lxc stop VCA && lxc delete VCA
79 lxc stop MON && lxc delete MON
80 lxc stop SO-ub && lxc delete SO-ub
81 fi
82 echo -e "\nDeleting imported lxd images if they exist"
83 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
84 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
85 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
86 return 0
87 }
88
89 # takes a juju/accounts.yaml file and returns the password specific
90 # for a controller. I wrote this using only bash tools to minimize
91 # additions of other packages
92 function parse_juju_password {
93 password_file="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name=$1
95 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller=$controller_name '{
100 indent = length($1)/2;
101 vname[indent] = $2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
106 printf("%s",$3);
107 }
108 }
109 }'
110 }
111
112 function generate_secret() {
113 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
114 }
115
116 function remove_volumes() {
117 if [ -n "$KUBERNETES" ]; then
118 k8_volume=$1
119 echo "Removing ${k8_volume}"
120 $WORKDIR_SUDO rm -rf ${k8_volume}
121 else
122 stack=$1
123 volumes="mongo_db mon_db osm_packages ro_db"
124 for volume in $volumes; do
125 sg docker -c "docker volume rm ${stack}_${volume}"
126 done
127 fi
128 }
129
130 function remove_network() {
131 stack=$1
132 sg docker -c "docker network rm net${stack}"
133 }
134
135 function remove_stack() {
136 stack=$1
137 if sg docker -c "docker stack ps ${stack}" ; then
138 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
139 COUNTER=0
140 result=1
141 while [ ${COUNTER} -lt 30 ]; do
142 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
143 #echo "Dockers running: $result"
144 if [ "${result}" == "0" ]; then
145 break
146 fi
147 let COUNTER=COUNTER+1
148 sleep 1
149 done
150 if [ "${result}" == "0" ]; then
151 echo "All dockers of the stack ${stack} were removed"
152 else
153 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
154 fi
155 sleep 5
156 fi
157 }
158
159 #removes osm deployments and services
160 function remove_k8s_namespace() {
161 kubectl delete ns $1
162 }
163
164 #Uninstall lightweight OSM: remove dockers
165 function uninstall_lightweight() {
166 if [ -n "$INSTALL_ONLY" ]; then
167 if [ -n "$INSTALL_ELK" ]; then
168 echo -e "\nUninstalling OSM ELK stack"
169 remove_stack osm_elk
170 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
171 fi
172 if [ -n "$INSTALL_PERFMON" ]; then
173 echo -e "\nUninstalling OSM Performance Monitoring stack"
174 remove_stack osm_metrics
175 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_metrics
176 fi
177 else
178 echo -e "\nUninstalling OSM"
179 if [ -n "$KUBERNETES" ]; then
180 remove_k8s_namespace $OSM_STACK_NAME
181 else
182 remove_stack $OSM_STACK_NAME
183 remove_stack osm_elk
184 remove_stack osm_metrics
185 fi
186 echo "Now osm docker images and volumes will be deleted"
187 newgrp docker << EONG
188 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
189 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
190 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
191 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
192 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
196 EONG
197
198 if [ -n "$KUBERNETES" ]; then
199 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
200 remove_volumes $OSM_NAMESPACE_VOL
201 else
202 remove_volumes $OSM_STACK_NAME
203 remove_network $OSM_STACK_NAME
204 fi
205 echo "Removing $OSM_DOCKER_WORK_DIR"
206 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
207 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
208 fi
209 echo "Some docker images will be kept in case they are used by other docker stacks"
210 echo "To remove them, just run 'docker image prune' in a terminal"
211 return 0
212 }
213
214 #Configure NAT rules, based on the current IP addresses of containers
215 function nat(){
216 echo -e "\nChecking required packages: iptables-persistent"
217 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
218 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
219 echo -e "\nConfiguring NAT rules"
220 echo -e " Required root privileges"
221 sudo $OSM_DEVOPS/installers/nat_osm
222 }
223
224 function FATAL(){
225 echo "FATAL error: Cannot install OSM due to \"$1\""
226 exit 1
227 }
228
229 #Update RO, SO and UI:
230 function update(){
231 echo -e "\nUpdating components"
232
233 echo -e " Updating RO"
234 CONTAINER="RO"
235 MDG="RO"
236 INSTALL_FOLDER="/opt/openmano"
237 echo -e " Fetching the repo"
238 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
239 BRANCH=""
240 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
241 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
242 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
243 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
244 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
245 # COMMIT_ID either was previously set with -b option, or is an empty string
246 CHECKOUT_ID=$COMMIT_ID
247 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
248 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
249 if [[ $CHECKOUT_ID == "tags/"* ]]; then
250 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
251 else
252 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
253 fi
254 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
255 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
256 echo " Nothing to be done."
257 else
258 echo " Update required."
259 lxc exec $CONTAINER -- service osm-ro stop
260 lxc exec $CONTAINER -- git -C /opt/openmano stash
261 lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
262 lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
263 lxc exec $CONTAINER -- git -C /opt/openmano stash pop
264 lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
265 lxc exec $CONTAINER -- service osm-ro start
266 fi
267 echo
268
269 echo -e " Updating SO and UI"
270 CONTAINER="SO-ub"
271 MDG="SO"
272 INSTALL_FOLDER="" # To be filled in
273 echo -e " Fetching the repo"
274 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
275 BRANCH=""
276 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
277 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
278 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
279 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
280 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
281 # COMMIT_ID either was previously set with -b option, or is an empty string
282 CHECKOUT_ID=$COMMIT_ID
283 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
284 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
285 if [[ $CHECKOUT_ID == "tags/"* ]]; then
286 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
287 else
288 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
289 fi
290 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
291 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
292 echo " Nothing to be done."
293 else
294 echo " Update required."
295 # Instructions to be added
296 # lxc exec SO-ub -- ...
297 fi
298 echo
299 echo -e "Updating MON Container"
300 CONTAINER="MON"
301 MDG="MON"
302 INSTALL_FOLDER="/root/MON"
303 echo -e " Fetching the repo"
304 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
305 BRANCH=""
306 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
307 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
308 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
309 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
310 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
311 # COMMIT_ID either was previously set with -b option, or is an empty string
312 CHECKOUT_ID=$COMMIT_ID
313 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
314 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
315 if [[ $CHECKOUT_ID == "tags/"* ]]; then
316 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
317 else
318 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
319 fi
320 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
321 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
322 echo " Nothing to be done."
323 else
324 echo " Update required."
325 fi
326 echo
327 }
328
329 function so_is_up() {
330 if [ -n "$1" ]; then
331 SO_IP=$1
332 else
333 SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
334 fi
335 time=0
336 step=5
337 timelength=300
338 while [ $time -le $timelength ]
339 do
340 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
341 -H 'accept: application/vnd.yang.data+json' \
342 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
343 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
344 then
345 echo "RW.Restconf running....SO is up"
346 return 0
347 fi
348
349 sleep $step
350 echo -n "."
351 time=$((time+step))
352 done
353
354 FATAL "OSM Failed to startup. SO failed to startup"
355 }
356
357 function vca_is_up() {
358 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
359 echo "VCA is up and running"
360 return 0
361 fi
362
363 FATAL "OSM Failed to startup. VCA failed to startup"
364 }
365
366 function mon_is_up() {
367 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
368 echo "MON is up and running"
369 return 0
370 fi
371
372 FATAL "OSM Failed to startup. MON failed to startup"
373 }
374
375 function ro_is_up() {
376 if [ -n "$1" ]; then
377 RO_IP=$1
378 else
379 RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
380 fi
381 time=0
382 step=2
383 timelength=20
384 while [ $time -le $timelength ]; do
385 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
386 echo "RO is up and running"
387 return 0
388 fi
389 sleep $step
390 echo -n "."
391 time=$((time+step))
392 done
393
394 FATAL "OSM Failed to startup. RO failed to startup"
395 }
396
397
398 function configure_RO(){
399 . $OSM_DEVOPS/installers/export_ips
400 echo -e " Configuring RO"
401 lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
402 lxc exec RO -- service osm-ro restart
403
404 ro_is_up
405
406 lxc exec RO -- openmano tenant-delete -f osm >/dev/null
407 lxc exec RO -- openmano tenant-create osm > /dev/null
408 lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
409 lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
410 lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
411 }
412
413 function configure_VCA(){
414 echo -e " Configuring VCA"
415 JUJU_PASSWD=$(generate_secret)
416 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
417 }
418
419 function configure_SOUI(){
420 . $OSM_DEVOPS/installers/export_ips
421 JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
422 RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
423
424 echo -e " Configuring MON"
425 #Information to be added about SO socket for logging
426
427 echo -e " Configuring SO"
428 sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
429 sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
430 sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
431 sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
432 # make journaling persistent
433 lxc exec SO-ub -- mkdir -p /var/log/journal
434 lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
435 lxc exec SO-ub -- systemctl restart systemd-journald
436
437 echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
438
439 lxc exec SO-ub -- systemctl restart launchpad
440
441 so_is_up $SO_CONTAINER_IP
442
443 #delete existing config agent (could be there on reconfigure)
444 curl -k --request DELETE \
445 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
446 --header 'accept: application/vnd.yang.data+json' \
447 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
448 --header 'cache-control: no-cache' \
449 --header 'content-type: application/vnd.yang.data+json' &> /dev/null
450
451 result=$(curl -k --request POST \
452 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
453 --header 'accept: application/vnd.yang.data+json' \
454 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
455 --header 'cache-control: no-cache' \
456 --header 'content-type: application/vnd.yang.data+json' \
457 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
458 [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
459
460 #R1/R2 config line
461 #result=$(curl -k --request PUT \
462 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
463 # --header 'accept: application/vnd.yang.data+json' \
464 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
465 # --header 'cache-control: no-cache' \
466 # --header 'content-type: application/vnd.yang.data+json' \
467 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
468
469 result=$(curl -k --request PUT \
470 --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
471 --header 'accept: application/vnd.yang.data+json' \
472 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
473 --header 'cache-control: no-cache' \
474 --header 'content-type: application/vnd.yang.data+json' \
475 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
476 [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
477
478 result=$(curl -k --request PATCH \
479 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
480 --header 'accept: application/vnd.yang.data+json' \
481 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
482 --header 'cache-control: no-cache' \
483 --header 'content-type: application/vnd.yang.data+json' \
484 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
485 [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
486
487 result=$(curl -k --request PATCH \
488 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
489 --header 'accept: application/vnd.yang.data+json' \
490 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
491 --header 'cache-control: no-cache' \
492 --header 'content-type: application/vnd.yang.data+json' \
493 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
494 [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
495
496 lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
497 auto lo:1
498 iface lo:1 inet static
499 address $DEFAULT_IP
500 netmask 255.255.255.255
501 EOF
502 lxc exec SO-ub ifup lo:1
503 }
504
505 #Configure RO, VCA, and SO with the initial configuration:
506 # RO -> tenant:osm, logs to be sent to SO
507 # VCA -> juju-password
508 # SO -> route to Juju Controller, add RO account, add VCA account
509 function configure(){
510 #Configure components
511 echo -e "\nConfiguring components"
512 configure_RO
513 configure_VCA
514 configure_SOUI
515 }
516
517 function install_lxd() {
518 sudo apt-get update
519 sudo apt-get install -y lxd
520 newgrp lxd
521 lxd init --auto
522 lxd waitready
523 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
524 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
525 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
526 lxc profile device set default eth0 mtu $DEFAULT_MTU
527 #sudo systemctl stop lxd-bridge
528 #sudo systemctl --system daemon-reload
529 #sudo systemctl enable lxd-bridge
530 #sudo systemctl start lxd-bridge
531 }
532
533 function ask_user(){
534 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
535 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
536 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
537 read -e -p "$1" USER_CONFIRMATION
538 while true ; do
539 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
540 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
541 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
542 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
543 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
544 done
545 }
546
547 function launch_container_from_lxd(){
548 export OSM_MDG=$1
549 OSM_load_config
550 export OSM_BASE_IMAGE=$2
551 if ! container_exists $OSM_BUILD_CONTAINER; then
552 CONTAINER_OPTS=""
553 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
554 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
555 create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
556 wait_container_up $OSM_BUILD_CONTAINER
557 fi
558 }
559
560 function install_osmclient(){
561 CLIENT_RELEASE=${RELEASE#"-R "}
562 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
563 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
564 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
565 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
566 curl $key_location | sudo apt-key add -
567 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
568 sudo apt-get update
569 sudo apt-get install -y python3-pip
570 sudo -H LC_ALL=C python3 -m pip install -U pip
571 sudo -H LC_ALL=C python3 -m pip install -U python-magic
572 sudo apt-get install -y python3-osm-im python3-osmclient
573 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
574 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
575 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
576 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
577 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
578 echo -e "\nOSM client installed"
579 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
580 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
581 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
582 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
583 else
584 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
585 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
586 echo " export OSM_HOSTNAME=<OSM_host>"
587 fi
588 return 0
589 }
590
591 function install_from_lxdimages(){
592 LXD_RELEASE=${RELEASE#"-R "}
593 if [ -n "$LXD_REPOSITORY_PATH" ]; then
594 LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
595 else
596 LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
597 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
598 fi
599 echo -e "\nDeleting previous lxd images if they exist"
600 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
601 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
602 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
603 echo -e "\nImporting osm-ro"
604 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
605 lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
606 rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
607 echo -e "\nImporting osm-vca"
608 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
609 lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
610 rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
611 echo -e "\nImporting osm-soui"
612 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
613 lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
614 rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
615 launch_container_from_lxd RO osm-ro
616 ro_is_up && track RO
617 launch_container_from_lxd VCA osm-vca
618 vca_is_up && track VCA
619 launch_container_from_lxd MON osm-mon
620 mon_is_up && track MON
621 launch_container_from_lxd SO osm-soui
622 #so_is_up && track SOUI
623 track SOUI
624 }
625
626 function install_docker_ce() {
627 # installs and configures Docker CE
628 echo "Installing Docker CE ..."
629 sudo apt-get -qq update
630 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
631 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
632 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
633 sudo apt-get -qq update
634 sudo apt-get install -y docker-ce
635 echo "Adding user to group 'docker'"
636 sudo groupadd -f docker
637 sudo usermod -aG docker $USER
638 sleep 2
639 sudo service docker restart
640 echo "... restarted Docker service"
641 sg docker -c "docker version" || FATAL "Docker installation failed"
642 echo "... Docker CE installation done"
643 return 0
644 }
645
646 function install_docker_compose() {
647 # installs and configures docker-compose
648 echo "Installing Docker Compose ..."
649 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
650 sudo chmod +x /usr/local/bin/docker-compose
651 echo "... Docker Compose installation done"
652 }
653
654 function install_juju() {
655 echo "Installing juju"
656 sudo snap install juju --classic
657 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
658 echo "Finished installation of juju"
659 return 0
660 }
661
662 function juju_createcontroller() {
663 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
664 # Not found created, create the controller
665 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
666 fi
667 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
668 }
669
670 function juju_createproxy() {
671 echo -e "\nChecking required packages: iptables-persistent"
672 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
673 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
674
675 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
676 sudo iptables -t nat -A PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
677 sudo netfilter-persistent save
678 fi
679 }
680
681 function generate_docker_images() {
682 echo "Pulling and generating docker images"
683 _build_from=$COMMIT_ID
684 [ -z "$_build_from" ] && _build_from="master"
685
686 echo "OSM Docker images generated from $_build_from"
687
688 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
689 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
690 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
691 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
692
693 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
694 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
695 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
696 fi
697
698 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
699 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
700 fi
701
702 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
703 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
704 fi
705
706 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
707 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
708 fi
709
710 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
711 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
712 fi
713
714 if [ -n "$PULL_IMAGES" ]; then
715 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
716 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
717 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
718 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
719 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
720 fi
721
722 if [ -n "$PULL_IMAGES" ]; then
723 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
724 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
725 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
726 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
727 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
728 fi
729
730 if [ -n "$PULL_IMAGES" ]; then
731 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
732 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
733 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
734 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
735 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
736 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
737 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
738 fi
739
740 if [ -n "$PULL_IMAGES" ]; then
741 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
742 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
743 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
744 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
745 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
746 fi
747
748 if [ -n "$PULL_IMAGES" ]; then
749 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
750 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
751 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
752 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
753 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
754 fi
755
756 if [ -n "$PULL_IMAGES" ]; then
757 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
758 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
759 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
760 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
761 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
762 fi
763
764 if [ -n "$PULL_IMAGES" ]; then
765 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
766 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
767 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
768 fi
769 echo "Finished generation of docker images"
770 }
771
772 function cmp_overwrite() {
773 file1="$1"
774 file2="$2"
775 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
776 if [ -f "${file2}" ]; then
777 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
778 else
779 cp -b ${file1} ${file2}
780 fi
781 fi
782 }
783
784 function generate_docker_env_files() {
785 echo "Doing a backup of existing env files"
786 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
787 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
788 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
789 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
790 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
791 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
792 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
793 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
794 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
795
796 echo "Generating docker env files"
797 if [ -n "$KUBERNETES" ]; then
798 #Kubernetes resources
799 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
800 else
801 # Docker-compose
802 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
803
804 # Prometheus
805 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
806 fi
807
808 # LCM
809 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
810 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
811 fi
812
813 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
814 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
815 else
816 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
817 fi
818
819 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
820 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
821 else
822 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
823 fi
824
825 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
826 echo "OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
827 else
828 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"|g" $OSM_DOCKER_WORK_DIR/lcm.env
829 fi
830
831 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
832 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
833 else
834 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
835 fi
836
837 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
838 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
839 else
840 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
841 fi
842
843 # RO
844 MYSQL_ROOT_PASSWORD=$(generate_secret)
845 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
846 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
847 fi
848 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
849 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
850 fi
851
852 # Keystone
853 KEYSTONE_DB_PASSWORD=$(generate_secret)
854 SERVICE_PASSWORD=$(generate_secret)
855 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
856 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
857 fi
858 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
859 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
860 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
861 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
862 fi
863
864 # NBI
865 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
866 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
867 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
868 fi
869
870 # MON
871 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
872 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
873 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
874 fi
875
876 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
877 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
878 else
879 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
880 fi
881
882 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
883 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
884 else
885 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
886 fi
887
888 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
889 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
890 else
891 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
892 fi
893
894 # POL
895 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
896 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
897 fi
898
899 # LW-UI
900 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
901 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
902 fi
903
904 echo "Finished generation of docker env files"
905 }
906
907 function generate_osmclient_script () {
908 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
909 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
910 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
911 }
912
913 #installs kubernetes packages
914 function install_kube() {
915 sudo apt-get update && sudo apt-get install -y apt-transport-https
916 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
917 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
918 sudo apt-get update
919 echo "Installing Kubernetes Packages ..."
920 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
921 }
922
923 #initializes kubernetes control plane
924 function init_kubeadm() {
925 sudo swapoff -a
926 sudo kubeadm init --config $1
927 sleep 5
928 }
929
930 function kube_config_dir() {
931 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
932 mkdir -p $HOME/.kube
933 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
934 sudo chown $(id -u):$(id -g) $HOME/.kube/config
935 }
936
937 #deploys flannel as daemonsets
938 function deploy_cni_provider() {
939 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
940 trap 'rm -rf "${CNI_DIR}"' EXIT
941 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
942 kubectl apply -f $CNI_DIR
943 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
944 }
945
946 #creates secrets from env files which will be used by containers
947 function kube_secrets(){
948 kubectl create ns $OSM_STACK_NAME
949 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
950 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
951 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
952 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
953 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
954 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
955 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
956 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
957 }
958
959 #deploys osm pods and services
960 function deploy_osm_services() {
961 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
962 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
963 sleep 5
964 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
965 }
966
967 function parse_yaml() {
968 osm_services="nbi lcm ro pol mon light-ui keystone"
969 TAG=$1
970 for osm in $osm_services; do
971 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
972 done
973 }
974
975 function namespace_vol() {
976 osm_services="nbi lcm ro pol mon kafka mongo mysql"
977 for osm in $osm_services; do
978 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
979 done
980 }
981
982 function init_docker_swarm() {
983 if [ "${DEFAULT_MTU}" != "1500" ]; then
984 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
985 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
986 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
987 fi
988 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
989 return 0
990 }
991
992 function create_docker_network() {
993 echo "creating network"
994 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
995 echo "creating network DONE"
996 }
997
998 function deploy_lightweight() {
999
1000 echo "Deploying lightweight build"
1001 OSM_NBI_PORT=9999
1002 OSM_RO_PORT=9090
1003 OSM_KEYSTONE_PORT=5000
1004 OSM_UI_PORT=80
1005 OSM_MON_PORT=8662
1006 OSM_PROM_PORT=9090
1007 OSM_PROM_HOSTPORT=9091
1008 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1009 [ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1010
1011 if [ -n "$NO_HOST_PORTS" ]; then
1012 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1013 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1014 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1015 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1016 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1017 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1018 [ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1019 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1020 else
1021 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1022 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1023 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1024 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1025 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1026 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1027 [ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1028 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1029 fi
1030 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1031 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1032 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1033 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1034 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1035 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1036 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1037
1038 pushd $OSM_DOCKER_WORK_DIR
1039 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1040 popd
1041
1042 echo "Finished deployment of lightweight build"
1043 }
1044
1045 function deploy_elk() {
1046 echo "Pulling docker images for ELK"
1047 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1048 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1049 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1050 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1051 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1052 echo "Finished pulling elk docker images"
1053 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1054 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1055 remove_stack osm_elk
1056 echo "Deploying ELK stack"
1057 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1058 echo "Waiting for ELK stack to be up and running"
1059 time=0
1060 step=5
1061 timelength=40
1062 elk_is_up=1
1063 while [ $time -le $timelength ]; do
1064 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1065 elk_is_up=0
1066 break
1067 fi
1068 sleep $step
1069 time=$((time+step))
1070 done
1071 if [ $elk_is_up -eq 0 ]; then
1072 echo "ELK is up and running. Trying to create index pattern..."
1073 #Create index pattern
1074 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1075 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1076 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1077 #Make it the default index
1078 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1079 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1080 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1081 else
1082 echo "Cannot connect to Kibana to create index pattern."
1083 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1084 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1085 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1086 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1087 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1088 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1089 -d"{\"value\":\"filebeat-*\"}"'
1090 fi
1091 echo "Finished deployment of ELK stack"
1092 return 0
1093 }
1094
1095 function deploy_perfmon() {
1096 echo "Pulling docker images for PM (Grafana)"
1097 sg docker -c "docker pull grafana/grafana" || FATAL "cannot get grafana docker image"
1098 echo "Finished pulling PM docker images"
1099 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_metrics
1100 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_metrics/*.yml $OSM_DOCKER_WORK_DIR/osm_metrics
1101 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_metrics/*.json $OSM_DOCKER_WORK_DIR/osm_metrics
1102 remove_stack osm_metrics
1103 echo "Deploying PM stack (Grafana)"
1104 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_metrics/docker-compose.yml osm_metrics"
1105 echo "Finished deployment of PM stack"
1106 return 0
1107 }
1108
1109 function install_lightweight() {
1110 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1111 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1112 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1113 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1114
1115 track checkingroot
1116 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1117 track noroot
1118
1119 if [ -n "$KUBERNETES" ]; then
1120 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1121 1. Install and configure LXD
1122 2. Install juju
1123 3. Install docker CE
1124 4. Disable swap space
1125 5. Install and initialize Kubernetes
1126 as pre-requirements.
1127 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1128
1129 else
1130 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1131 fi
1132 track proceed
1133
1134 echo "Installing lightweight build of OSM"
1135 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1136 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1137 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1138 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1139 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1140 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1141 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1142
1143 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1144 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1145 need_packages_lw="lxd snapd"
1146 echo -e "Checking required packages: $need_packages_lw"
1147 dpkg -l $need_packages_lw &>/dev/null \
1148 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1149 || sudo apt-get update \
1150 || FATAL "failed to run apt-get update"
1151 dpkg -l $need_packages_lw &>/dev/null \
1152 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1153 || sudo apt-get install -y $need_packages_lw \
1154 || FATAL "failed to install $need_packages_lw"
1155 fi
1156 track prereqok
1157
1158 [ -z "$INSTALL_NOJUJU" ] && install_juju
1159 track juju_install
1160
1161 if [ -z "$OSM_VCA_HOST" ]; then
1162 juju_createcontroller
1163 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1164 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1165 fi
1166 track juju_controller
1167
1168 if [ -z "$OSM_VCA_SECRET" ]; then
1169 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1170 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1171 fi
1172 if [ -z "$OSM_VCA_PUBKEY" ]; then
1173 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1174 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1175 fi
1176 if [ -z "$OSM_VCA_APIPROXY" ]; then
1177 OSM_VCA_APIPROXY=$DEFAULT_IP
1178 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1179 fi
1180 juju_createproxy
1181
1182 if [ -z "$OSM_VCA_CACERT" ]; then
1183 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
1184 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1185 fi
1186 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1187 OSM_DATABASE_COMMONKEY=$(generate_secret)
1188 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1189 fi
1190 track juju
1191
1192 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1193 track docker_ce
1194
1195 #Installs Kubernetes and deploys osm services
1196 if [ -n "$KUBERNETES" ]; then
1197 install_kube
1198 track install_k8s
1199 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1200 kube_config_dir
1201 track init_k8s
1202 else
1203 #install_docker_compose
1204 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1205 track docker_swarm
1206 fi
1207
1208 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1209 track docker_build
1210
1211 generate_docker_env_files
1212
1213 if [ -n "$KUBERNETES" ]; then
1214 #remove old namespace
1215 remove_k8s_namespace $OSM_STACK_NAME
1216 deploy_cni_provider
1217 kube_secrets
1218 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml $OSM_DOCKER_TAG
1219 namespace_vol
1220 deploy_osm_services
1221 track deploy_osm_services_k8s
1222 else
1223 # remove old stack
1224 remove_stack $OSM_STACK_NAME
1225 create_docker_network
1226 deploy_lightweight
1227 generate_osmclient_script
1228 track docker_deploy
1229 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1230 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1231 [ -n "$INSTALL_PERFMON" ] && deploy_perfmon && track perfmon
1232 fi
1233
1234 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1235 track osmclient
1236
1237 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1238 track end
1239 return 0
1240 }
1241
1242 function install_vimemu() {
1243 echo "\nInstalling vim-emu"
1244 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1245 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1246 # clone vim-emu repository (attention: branch is currently master only)
1247 echo "Cloning vim-emu repository ..."
1248 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1249 # build vim-emu docker
1250 echo "Building vim-emu Docker container..."
1251
1252 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1253 # start vim-emu container as daemon
1254 echo "Starting vim-emu Docker container 'vim-emu' ..."
1255 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1256 # in lightweight mode, the emulator needs to be attached to netOSM
1257 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1258 else
1259 # classic build mode
1260 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1261 fi
1262 echo "Waiting for 'vim-emu' container to start ..."
1263 sleep 5
1264 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1265 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1266 # print vim-emu connection info
1267 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1268 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1269 echo -e "To add the emulated VIM to OSM you should do:"
1270 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1271 }
1272
1273 function dump_vars(){
1274 echo "DEVELOP=$DEVELOP"
1275 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1276 echo "UNINSTALL=$UNINSTALL"
1277 echo "NAT=$NAT"
1278 echo "UPDATE=$UPDATE"
1279 echo "RECONFIGURE=$RECONFIGURE"
1280 echo "TEST_INSTALLER=$TEST_INSTALLER"
1281 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1282 echo "INSTALL_LXD=$INSTALL_LXD"
1283 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1284 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1285 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1286 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1287 echo "INSTALL_ONLY=$INSTALL_ONLY"
1288 echo "INSTALL_ELK=$INSTALL_ELK"
1289 echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1290 echo "TO_REBUILD=$TO_REBUILD"
1291 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1292 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1293 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1294 echo "RELEASE=$RELEASE"
1295 echo "REPOSITORY=$REPOSITORY"
1296 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1297 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1298 echo "NOCONFIGURE=$NOCONFIGURE"
1299 echo "OSM_DEVOPS=$OSM_DEVOPS"
1300 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1301 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1302 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1303 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1304 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1305 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1306 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1307 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1308 echo "DOCKER_USER=$DOCKER_USER"
1309 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1310 echo "PULL_IMAGES=$PULL_IMAGES"
1311 echo "KUBERNETES=$KUBERNETES"
1312 echo "SHOWOPTS=$SHOWOPTS"
1313 echo "Install from specific refspec (-b): $COMMIT_ID"
1314 }
1315
1316 function track(){
1317 ctime=`date +%s`
1318 duration=$((ctime - SESSION_ID))
1319 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1320 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1321 event_name="bin"
1322 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1323 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1324 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1325 event_name="${event_name}_$1"
1326 url="${url}&event=${event_name}&ce_duration=${duration}"
1327 wget -q -O /dev/null $url
1328 }
1329
1330 UNINSTALL=""
1331 DEVELOP=""
1332 NAT=""
1333 UPDATE=""
1334 RECONFIGURE=""
1335 TEST_INSTALLER=""
1336 INSTALL_LXD=""
1337 SHOWOPTS=""
1338 COMMIT_ID=""
1339 ASSUME_YES=""
1340 INSTALL_FROM_SOURCE=""
1341 RELEASE="ReleaseSIX"
1342 REPOSITORY="stable"
1343 INSTALL_VIMEMU=""
1344 INSTALL_FROM_LXDIMAGES=""
1345 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1346 LXD_REPOSITORY_PATH=""
1347 INSTALL_LIGHTWEIGHT="y"
1348 INSTALL_ONLY=""
1349 INSTALL_ELK=""
1350 INSTALL_PERFMON=""
1351 TO_REBUILD=""
1352 INSTALL_NOLXD=""
1353 INSTALL_NODOCKER=""
1354 INSTALL_NOJUJU=""
1355 KUBERNETES=""
1356 INSTALL_NOHOSTCLIENT=""
1357 NOCONFIGURE=""
1358 RELEASE_DAILY=""
1359 SESSION_ID=`date +%s`
1360 OSM_DEVOPS=
1361 OSM_VCA_HOST=
1362 OSM_VCA_SECRET=
1363 OSM_VCA_PUBKEY=
1364 OSM_STACK_NAME=osm
1365 NO_HOST_PORTS=""
1366 DOCKER_NOBUILD=""
1367 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1368 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1369 WORKDIR_SUDO=sudo
1370 OSM_WORK_DIR="/etc/osm"
1371 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1372 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1373 OSM_HOST_VOL="/var/lib/osm"
1374 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1375 OSM_DOCKER_TAG=latest
1376 DOCKER_USER=opensourcemano
1377 PULL_IMAGES="y"
1378 KAFKA_TAG=2.11-1.0.2
1379 PROMETHEUS_TAG=v2.4.3
1380 KEYSTONEDB_TAG=10
1381 OSM_DATABASE_COMMONKEY=
1382 ELASTIC_VERSION=6.4.2
1383 ELASTIC_CURATOR_VERSION=5.5.4
1384 POD_NETWORK_CIDR=10.244.0.0/16
1385 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1386 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1387
1388 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o; do
1389 case "${o}" in
1390 h)
1391 usage && exit 0
1392 ;;
1393 b)
1394 COMMIT_ID=${OPTARG}
1395 PULL_IMAGES=""
1396 ;;
1397 r)
1398 REPOSITORY="${OPTARG}"
1399 REPO_ARGS+=(-r "$REPOSITORY")
1400 ;;
1401 c)
1402 [ "${OPTARG}" == "swarm" ] && continue
1403 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1404 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1405 usage && exit 1
1406 ;;
1407 R)
1408 RELEASE="${OPTARG}"
1409 REPO_ARGS+=(-R "$RELEASE")
1410 ;;
1411 k)
1412 REPOSITORY_KEY="${OPTARG}"
1413 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1414 ;;
1415 u)
1416 REPOSITORY_BASE="${OPTARG}"
1417 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1418 ;;
1419 U)
1420 DOCKER_USER="${OPTARG}"
1421 ;;
1422 l)
1423 LXD_REPOSITORY_BASE="${OPTARG}"
1424 ;;
1425 p)
1426 LXD_REPOSITORY_PATH="${OPTARG}"
1427 ;;
1428 D)
1429 OSM_DEVOPS="${OPTARG}"
1430 ;;
1431 s)
1432 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1433 ;;
1434 H)
1435 OSM_VCA_HOST="${OPTARG}"
1436 ;;
1437 S)
1438 OSM_VCA_SECRET="${OPTARG}"
1439 ;;
1440 P)
1441 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1442 ;;
1443 A)
1444 OSM_VCA_APIPROXY="${OPTARG}"
1445 ;;
1446 w)
1447 # when specifying workdir, do not use sudo for access
1448 WORKDIR_SUDO=
1449 OSM_WORK_DIR="${OPTARG}"
1450 ;;
1451 t)
1452 OSM_DOCKER_TAG="${OPTARG}"
1453 ;;
1454 o)
1455 INSTALL_ONLY="y"
1456 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1457 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1458 [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1459 ;;
1460 m)
1461 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1462 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1463 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1464 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1465 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1466 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1467 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1468 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1469 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1470 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1471 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1472 ;;
1473 -)
1474 [ "${OPTARG}" == "help" ] && usage && exit 0
1475 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1476 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1477 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1478 [ "${OPTARG}" == "nat" ] && NAT="y" && continue
1479 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1480 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1481 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1482 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1483 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1484 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1485 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue
1486 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1487 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue
1488 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1489 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1490 [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1491 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue
1492 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1493 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue
1494 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1495 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1496 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1497 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1498 [ "${OPTARG}" == "pullimages" ] && continue
1499 echo -e "Invalid option: '--$OPTARG'\n" >&2
1500 usage && exit 1
1501 ;;
1502 \?)
1503 echo -e "Invalid option: '-$OPTARG'\n" >&2
1504 usage && exit 1
1505 ;;
1506 y)
1507 ASSUME_YES="y"
1508 ;;
1509 *)
1510 usage && exit 1
1511 ;;
1512 esac
1513 done
1514
1515 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui"
1516 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui"
1517 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui"
1518 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --daily can only be used with --soui"
1519 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nolxd cannot be used with --soui"
1520 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nodocker cannot be used with --soui"
1521 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: -m cannot be used with --soui"
1522 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1523
1524 if [ -n "$SHOWOPTS" ]; then
1525 dump_vars
1526 exit 0
1527 fi
1528
1529 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master"
1530
1531 # if develop, we force master
1532 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1533
1534 need_packages="git jq wget curl tar"
1535 echo -e "Checking required packages: $need_packages"
1536 dpkg -l $need_packages &>/dev/null \
1537 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1538 || sudo apt-get update \
1539 || FATAL "failed to run apt-get update"
1540 dpkg -l $need_packages &>/dev/null \
1541 || ! echo -e "Installing $need_packages requires root privileges." \
1542 || sudo apt-get install -y $need_packages \
1543 || FATAL "failed to install $need_packages"
1544
1545 if [ -z "$OSM_DEVOPS" ]; then
1546 if [ -n "$TEST_INSTALLER" ]; then
1547 echo -e "\nUsing local devops repo for OSM installation"
1548 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1549 else
1550 echo -e "\nCreating temporary dir for OSM installation"
1551 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1552 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1553
1554 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1555
1556 if [ -z "$COMMIT_ID" ]; then
1557 echo -e "\nGuessing the current stable release"
1558 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1559 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1560
1561 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1562 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1563 else
1564 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1565 fi
1566 git -C $OSM_DEVOPS checkout $COMMIT_ID
1567 fi
1568 fi
1569
1570 . $OSM_DEVOPS/common/all_funcs
1571
1572 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1573 [ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0
1574 [ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0
1575 [ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0
1576 [ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0
1577 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1578 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1579 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1580 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1581
1582 #Installation starts here
1583 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README.txt &> /dev/null
1584 track start
1585
1586 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1587 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1588 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1589 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1590 fi
1591
1592 echo -e "Checking required packages: lxd"
1593 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1594 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1595
1596 # use local devops for containers
1597 export OSM_USE_LOCAL_DEVOPS=true
1598 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1599 echo -e "\nCreating the containers and building from source ..."
1600 $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')"
1601 ro_is_up && track RO
1602 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed"
1603 vca_is_up && track VCA
1604 $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed"
1605 mon_is_up && track MON
1606 $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')"
1607 $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')"
1608 #so_is_up && track SOUI
1609 track SOUI
1610 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1611 echo -e "\nInstalling from lxd images ..."
1612 install_from_lxdimages
1613 else #install from binaries
1614 echo -e "\nCreating the containers and installing from binaries ..."
1615 $OSM_DEVOPS/jenkins/host/install RO ${REPO_ARGS[@]} || FATAL "RO install failed"
1616 ro_is_up && track RO
1617 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed"
1618 vca_is_up && track VCA
1619 $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed"
1620 mon_is_up && track MON
1621 $OSM_DEVOPS/jenkins/host/install SO ${REPO_ARGS[@]} || FATAL "SO install failed"
1622 $OSM_DEVOPS/jenkins/host/install UI ${REPO_ARGS[@]} || FATAL "UI install failed"
1623 #so_is_up && track SOUI
1624 track SOUI
1625 fi
1626
1627 #Install iptables-persistent and configure NAT rules
1628 [ -z "$NOCONFIGURE" ] && nat
1629
1630 #Configure components
1631 [ -z "$NOCONFIGURE" ] && configure
1632
1633 #Install osmclient
1634 [ -z "$NOCONFIGURE" ] && install_osmclient
1635
1636 #Install vim-emu (optional)
1637 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1638
1639 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1640 track end
1641 echo -e "\nDONE"