Merge "Disable parallel make for easier debugging"
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -s <stack name> user defined stack name, default is osm"
30 echo -e " -H <VCA host> use specific juju host controller IP"
31 echo -e " -S <VCA secret> use VCA/juju secret key"
32 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
33 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
34 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
35 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (RO, LCM, NBI, LW-UI, MON, KAFKA, MONGO, NONE)"
36 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
37 echo -e " -D <devops path> use local devops installation path"
38 echo -e " -w <work dir> Location to store runtime installation"
39 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
40 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
41 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
42 echo -e " --nojuju: do not juju, assumes already installed"
43 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
44 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
45 echo -e " --nohostclient: do not install the osmclient"
46 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
47 echo -e " --source: install OSM from source code using the latest stable tag"
48 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
49 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
50 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
51 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
52 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
53 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
54 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
55 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
56 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
57 echo -e " --showopts: print chosen options and exit (only for debugging)"
58 echo -e " -y: do not prompt for confirmation, assumes yes"
59 echo -e " -h / --help: print this help"
60 }
61
62 #Uninstall OSM: remove containers
63 function uninstall(){
64 echo -e "\nUninstalling OSM"
65 if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
66 $OSM_DEVOPS/jenkins/host/clean_container RO
67 $OSM_DEVOPS/jenkins/host/clean_container VCA
68 $OSM_DEVOPS/jenkins/host/clean_container MON
69 $OSM_DEVOPS/jenkins/host/clean_container SO
70 #$OSM_DEVOPS/jenkins/host/clean_container UI
71 else
72 lxc stop RO && lxc delete RO
73 lxc stop VCA && lxc delete VCA
74 lxc stop MON && lxc delete MON
75 lxc stop SO-ub && lxc delete SO-ub
76 fi
77 echo -e "\nDeleting imported lxd images if they exist"
78 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
79 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
80 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
81 return 0
82 }
83
84 # takes a juju/accounts.yaml file and returns the password specific
85 # for a controller. I wrote this using only bash tools to minimize
86 # additions of other packages
87 function parse_juju_password {
88 password_file="${HOME}/.local/share/juju/accounts.yaml"
89 local controller_name=$1
90 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
91 sed -ne "s|^\($s\):|\1|" \
92 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
93 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
94 awk -F$fs -v controller=$controller_name '{
95 indent = length($1)/2;
96 vname[indent] = $2;
97 for (i in vname) {if (i > indent) {delete vname[i]}}
98 if (length($3) > 0) {
99 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
100 if (match(vn,controller) && match($2,"password")) {
101 printf("%s",$3);
102 }
103 }
104 }'
105 }
106
107 function remove_volumes() {
108 stack=$1
109 volumes="mongo_db mon_db osm_packages ro_db"
110 for volume in $volumes; do
111 sg docker -c "docker volume rm ${stack}_${volume}"
112 done
113 }
114
115 function remove_network() {
116 stack=$1
117 sg docker -c "docker network rm net${stack}"
118 }
119
120 function remove_stack() {
121 stack=$1
122 if sg docker -c "docker stack ps ${stack}" ; then
123 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
124 COUNTER=0
125 result=1
126 while [ ${COUNTER} -lt 30 ]; do
127 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
128 #echo "Dockers running: $result"
129 if [ "${result}" == "0" ]; then
130 break
131 fi
132 let COUNTER=COUNTER+1
133 sleep 1
134 done
135 if [ "${result}" == "0" ]; then
136 echo "All dockers of the stack ${stack} were removed"
137 else
138 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
139 fi
140 sleep 5
141 fi
142 }
143
144 #Uninstall lightweight OSM: remove dockers
145 function uninstall_lightweight() {
146 if [ -n "$INSTALL_ONLY" ]; then
147 if [ -n "$INSTALL_ELK" ]; then
148 echo -e "\nUninstalling OSM ELK stack"
149 remove_stack osm_elk
150 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
151 fi
152 if [ -n "$INSTALL_PERFMON" ]; then
153 echo -e "\nUninstalling OSM Performance Monitoring stack"
154 remove_stack osm_metrics
155 sg docker -c "docker image rm osm/kafka-exporter"
156 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_metrics
157 fi
158 else
159 echo -e "\nUninstalling OSM"
160 remove_stack $OSM_STACK_NAME
161 remove_stack osm_elk
162 remove_stack osm_metrics
163 echo "Now osm docker images and volumes will be deleted"
164 newgrp docker << EONG
165 docker image rm osm/ro
166 docker image rm osm/lcm
167 docker image rm osm/light-ui
168 docker image rm osm/keystone
169 docker image rm osm/nbi
170 docker image rm osm/mon
171 docker image rm osm/pm
172 docker image rm osm/kafka-exporter
173 EONG
174 remove_volumes $OSM_STACK_NAME
175 remove_network $OSM_STACK_NAME
176 echo "Removing $OSM_DOCKER_WORK_DIR"
177 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
178 sg lxd -c "juju destroy-controller --yes $OSM_STACK_NAME"
179 fi
180 echo "Some docker images will be kept in case they are used by other docker stacks"
181 echo "To remove them, just run 'docker image prune' in a terminal"
182 return 0
183 }
184
185 #Configure NAT rules, based on the current IP addresses of containers
186 function nat(){
187 echo -e "\nChecking required packages: iptables-persistent"
188 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
189 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
190 echo -e "\nConfiguring NAT rules"
191 echo -e " Required root privileges"
192 sudo $OSM_DEVOPS/installers/nat_osm
193 }
194
195 function FATAL(){
196 echo "FATAL error: Cannot install OSM due to \"$1\""
197 exit 1
198 }
199
200 #Update RO, SO and UI:
201 function update(){
202 echo -e "\nUpdating components"
203
204 echo -e " Updating RO"
205 CONTAINER="RO"
206 MDG="RO"
207 INSTALL_FOLDER="/opt/openmano"
208 echo -e " Fetching the repo"
209 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
210 BRANCH=""
211 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
212 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
213 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
214 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
215 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
216 # COMMIT_ID either was previously set with -b option, or is an empty string
217 CHECKOUT_ID=$COMMIT_ID
218 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
219 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
220 if [[ $CHECKOUT_ID == "tags/"* ]]; then
221 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
222 else
223 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
224 fi
225 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
226 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
227 echo " Nothing to be done."
228 else
229 echo " Update required."
230 lxc exec $CONTAINER -- service osm-ro stop
231 lxc exec $CONTAINER -- git -C /opt/openmano stash
232 lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
233 lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
234 lxc exec $CONTAINER -- git -C /opt/openmano stash pop
235 lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
236 lxc exec $CONTAINER -- service osm-ro start
237 fi
238 echo
239
240 echo -e " Updating SO and UI"
241 CONTAINER="SO-ub"
242 MDG="SO"
243 INSTALL_FOLDER="" # To be filled in
244 echo -e " Fetching the repo"
245 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
246 BRANCH=""
247 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
248 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
249 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
250 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
251 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
252 # COMMIT_ID either was previously set with -b option, or is an empty string
253 CHECKOUT_ID=$COMMIT_ID
254 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
255 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
256 if [[ $CHECKOUT_ID == "tags/"* ]]; then
257 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
258 else
259 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
260 fi
261 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
262 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
263 echo " Nothing to be done."
264 else
265 echo " Update required."
266 # Instructions to be added
267 # lxc exec SO-ub -- ...
268 fi
269 echo
270 echo -e "Updating MON Container"
271 CONTAINER="MON"
272 MDG="MON"
273 INSTALL_FOLDER="/root/MON"
274 echo -e " Fetching the repo"
275 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
276 BRANCH=""
277 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
278 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
279 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
280 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
281 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
282 # COMMIT_ID either was previously set with -b option, or is an empty string
283 CHECKOUT_ID=$COMMIT_ID
284 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
285 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
286 if [[ $CHECKOUT_ID == "tags/"* ]]; then
287 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
288 else
289 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
290 fi
291 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
292 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
293 echo " Nothing to be done."
294 else
295 echo " Update required."
296 fi
297 echo
298 }
299
300 function so_is_up() {
301 if [ -n "$1" ]; then
302 SO_IP=$1
303 else
304 SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
305 fi
306 time=0
307 step=5
308 timelength=300
309 while [ $time -le $timelength ]
310 do
311 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
312 -H 'accept: application/vnd.yang.data+json' \
313 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
314 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
315 then
316 echo "RW.Restconf running....SO is up"
317 return 0
318 fi
319
320 sleep $step
321 echo -n "."
322 time=$((time+step))
323 done
324
325 FATAL "OSM Failed to startup. SO failed to startup"
326 }
327
328 function vca_is_up() {
329 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
330 echo "VCA is up and running"
331 return 0
332 fi
333
334 FATAL "OSM Failed to startup. VCA failed to startup"
335 }
336
337 function mon_is_up() {
338 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
339 echo "MON is up and running"
340 return 0
341 fi
342
343 FATAL "OSM Failed to startup. MON failed to startup"
344 }
345
346 function ro_is_up() {
347 if [ -n "$1" ]; then
348 RO_IP=$1
349 else
350 RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
351 fi
352 time=0
353 step=2
354 timelength=20
355 while [ $time -le $timelength ]; do
356 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
357 echo "RO is up and running"
358 return 0
359 fi
360 sleep $step
361 echo -n "."
362 time=$((time+step))
363 done
364
365 FATAL "OSM Failed to startup. RO failed to startup"
366 }
367
368
369 function configure_RO(){
370 . $OSM_DEVOPS/installers/export_ips
371 echo -e " Configuring RO"
372 lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
373 lxc exec RO -- service osm-ro restart
374
375 ro_is_up
376
377 lxc exec RO -- openmano tenant-delete -f osm >/dev/null
378 lxc exec RO -- openmano tenant-create osm > /dev/null
379 lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
380 lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
381 lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
382 }
383
384 function configure_VCA(){
385 echo -e " Configuring VCA"
386 JUJU_PASSWD=`date +%s | sha256sum | base64 | head -c 32`
387 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
388 }
389
390 function configure_SOUI(){
391 . $OSM_DEVOPS/installers/export_ips
392 JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
393 RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
394
395 echo -e " Configuring MON"
396 #Information to be added about SO socket for logging
397
398 echo -e " Configuring SO"
399 sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
400 sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
401 sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
402 sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
403 # make journaling persistent
404 lxc exec SO-ub -- mkdir -p /var/log/journal
405 lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
406 lxc exec SO-ub -- systemctl restart systemd-journald
407
408 echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
409
410 lxc exec SO-ub -- systemctl restart launchpad
411
412 so_is_up $SO_CONTAINER_IP
413
414 #delete existing config agent (could be there on reconfigure)
415 curl -k --request DELETE \
416 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
417 --header 'accept: application/vnd.yang.data+json' \
418 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
419 --header 'cache-control: no-cache' \
420 --header 'content-type: application/vnd.yang.data+json' &> /dev/null
421
422 result=$(curl -k --request POST \
423 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
424 --header 'accept: application/vnd.yang.data+json' \
425 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
426 --header 'cache-control: no-cache' \
427 --header 'content-type: application/vnd.yang.data+json' \
428 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
429 [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
430
431 #R1/R2 config line
432 #result=$(curl -k --request PUT \
433 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
434 # --header 'accept: application/vnd.yang.data+json' \
435 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
436 # --header 'cache-control: no-cache' \
437 # --header 'content-type: application/vnd.yang.data+json' \
438 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
439
440 result=$(curl -k --request PUT \
441 --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
442 --header 'accept: application/vnd.yang.data+json' \
443 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
444 --header 'cache-control: no-cache' \
445 --header 'content-type: application/vnd.yang.data+json' \
446 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
447 [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
448
449 result=$(curl -k --request PATCH \
450 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
451 --header 'accept: application/vnd.yang.data+json' \
452 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
453 --header 'cache-control: no-cache' \
454 --header 'content-type: application/vnd.yang.data+json' \
455 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
456 [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
457
458 result=$(curl -k --request PATCH \
459 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
460 --header 'accept: application/vnd.yang.data+json' \
461 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
462 --header 'cache-control: no-cache' \
463 --header 'content-type: application/vnd.yang.data+json' \
464 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
465 [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
466
467 lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
468 auto lo:1
469 iface lo:1 inet static
470 address $DEFAULT_IP
471 netmask 255.255.255.255
472 EOF
473 lxc exec SO-ub ifup lo:1
474 }
475
476 #Configure RO, VCA, and SO with the initial configuration:
477 # RO -> tenant:osm, logs to be sent to SO
478 # VCA -> juju-password
479 # SO -> route to Juju Controller, add RO account, add VCA account
480 function configure(){
481 #Configure components
482 echo -e "\nConfiguring components"
483 configure_RO
484 configure_VCA
485 configure_SOUI
486 }
487
488 function install_lxd() {
489 sudo apt-get update
490 sudo apt-get install -y lxd
491 newgrp lxd
492 lxd init --auto
493 lxd waitready
494 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
495 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
496 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
497 lxc profile device set default eth0 mtu $DEFAULT_MTU
498 #sudo systemctl stop lxd-bridge
499 #sudo systemctl --system daemon-reload
500 #sudo systemctl enable lxd-bridge
501 #sudo systemctl start lxd-bridge
502 }
503
504 function ask_user(){
505 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
506 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
507 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
508 read -e -p "$1" USER_CONFIRMATION
509 while true ; do
510 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
511 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
512 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
513 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
514 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
515 done
516 }
517
518 function launch_container_from_lxd(){
519 export OSM_MDG=$1
520 OSM_load_config
521 export OSM_BASE_IMAGE=$2
522 if ! container_exists $OSM_BUILD_CONTAINER; then
523 CONTAINER_OPTS=""
524 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
525 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
526 create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
527 wait_container_up $OSM_BUILD_CONTAINER
528 fi
529 }
530
531 function install_osmclient(){
532 CLIENT_RELEASE=${RELEASE#"-R "}
533 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
534 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
535 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
536 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
537 curl $key_location | sudo apt-key add -
538 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient"
539 sudo apt-get update
540 sudo apt-get install -y python-pip
541 sudo -H pip install pip==9.0.3
542 sudo -H pip install python-magic
543 sudo apt-get install -y python-osmclient
544 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
545 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
546 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
547 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
548 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
549 [ -n "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=127.0.0.1
550 echo -e "\nOSM client installed"
551 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
552 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
553 [ -n "$INSTALL_LIGHTWEIGHT" ] && echo " export OSM_SOL005=True"
554 [ -z "$INSTALL_LIGHTWEIGHT" ] && echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
555 return 0
556 }
557
558 function install_from_lxdimages(){
559 LXD_RELEASE=${RELEASE#"-R "}
560 if [ -n "$LXD_REPOSITORY_PATH" ]; then
561 LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
562 else
563 LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
564 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
565 fi
566 echo -e "\nDeleting previous lxd images if they exist"
567 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
568 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
569 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
570 echo -e "\nImporting osm-ro"
571 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
572 lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
573 rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
574 echo -e "\nImporting osm-vca"
575 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
576 lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
577 rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
578 echo -e "\nImporting osm-soui"
579 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
580 lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
581 rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
582 launch_container_from_lxd RO osm-ro
583 ro_is_up && track RO
584 launch_container_from_lxd VCA osm-vca
585 vca_is_up && track VCA
586 launch_container_from_lxd MON osm-mon
587 mon_is_up && track MON
588 launch_container_from_lxd SO osm-soui
589 #so_is_up && track SOUI
590 track SOUI
591 }
592
593 function install_docker_ce() {
594 # installs and configures Docker CE
595 echo "Installing Docker CE ..."
596 sudo apt-get -qq update
597 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
598 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
599 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
600 sudo apt-get -qq update
601 sudo apt-get install -y docker-ce
602 echo "Adding user to group 'docker'"
603 sudo groupadd -f docker
604 sudo usermod -aG docker $USER
605 sleep 2
606 sudo service docker restart
607 echo "... restarted Docker service"
608 sg docker -c "docker version" || FATAL "Docker installation failed"
609 echo "... Docker CE installation done"
610 return 0
611 }
612
613 function install_docker_compose() {
614 # installs and configures docker-compose
615 echo "Installing Docker Compose ..."
616 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
617 sudo chmod +x /usr/local/bin/docker-compose
618 echo "... Docker Compose installation done"
619 }
620
621 function install_juju() {
622 echo "Installing juju"
623 sudo snap install juju --classic
624 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
625 echo "Finished installation of juju"
626 return 0
627 }
628
629 function juju_createcontroller() {
630 if ! sg lxd -c "juju show-controller $OSM_STACK_NAME &> /dev/null"; then
631 # Not found created, create the controller
632 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
633 fi
634 [ $(sg lxd -c "juju controllers" | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
635 }
636
637 function generate_docker_images() {
638 echo "Pulling and generating docker images"
639 _build_from=$COMMIT_ID
640 [ -z "$_build_from" ] && _build_from="master"
641
642 echo "OSM Docker images generated from $_build_from"
643
644 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
645 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
646 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
647 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
648
649 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
650 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
651 sg docker -c "docker pull wurstmeister/kafka" || FATAL "cannot get kafka docker image"
652 fi
653 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
654 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
655 fi
656 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
657 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
658 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
659 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t osm/mon --no-cache" || FATAL "cannot build MON docker image"
660 sg docker -c "docker build ${LWTEMPDIR}/MON/policy_module -f ${LWTEMPDIR}/MON/policy_module/Dockerfile -t osm/pm --no-cache" || FATAL "cannot build PM docker image"
661 fi
662 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
663 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
664 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
665 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t osm/nbi --no-cache" || FATAL "cannot build NBI docker image"
666 fi
667 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
668 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
669 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
670 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
671 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/docker/Dockerfile-local -t osm/ro --no-cache" || FATAL "cannot build RO docker image"
672 fi
673 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
674 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
675 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
676 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t osm/lcm --no-cache" || FATAL "cannot build LCM docker image"
677 fi
678 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
679 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
680 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
681 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -t osm/light-ui -f ${LWTEMPDIR}/LW-UI/Dockerfile --no-cache" || FATAL "cannot build LW-UI docker image"
682 fi
683 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
684 sg docker -c "docker build -t osm/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
685 fi
686 echo "Finished generation of docker images"
687 }
688
689 function cmp_overwrite() {
690 file1="$1"
691 file2="$2"
692 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
693 if [ -f "${file2}" ]; then
694 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
695 else
696 cp -b ${file1} ${file2}
697 fi
698 fi
699 }
700
701 function generate_config_log_folders() {
702 echo "Generating config and log folders"
703 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
704 echo "Finished generation of config and log folders"
705 }
706
707 function generate_docker_env_files() {
708 echo "Generating docker env files"
709 echo "OSMLCM_VCA_HOST=${OSMLCM_VCA_HOST}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/lcm.env
710 echo "OSMLCM_VCA_SECRET=${OSMLCM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
711
712 MYSQL_ROOT_PASSWORD=`date +%s | sha256sum | base64 | head -c 32`
713 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
714 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
715 fi
716 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
717 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
718 fi
719
720 MYSQL_ROOT_PASSWORD=`date +%s | sha256sum | base64 | head -c 32` && sleep 1
721 KEYSTONE_DB_PASSWORD=`date +%s | sha256sum | base64 | head -c 32` && sleep 1
722 #ADMIN_PASSWORD=`date +%s | sha256sum | base64 | head -c 32` && sleep 1
723 NBI_PASSWORD=`date +%s | sha256sum | base64 | head -c 32`
724 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
725 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
726 fi
727 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
728 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
729 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
730 #echo "ADMIN_PASSWORD=${ADMIN_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
731 echo "NBI_PASSWORD=${NBI_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
732 fi
733
734 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
735 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${NBI_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
736 fi
737
738 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/mon.env
739
740 echo "Finished generation of docker env files"
741 }
742
743 function generate_osmclient_script () {
744 echo "docker run -ti --network net${OSM_STACK_NAME} osm/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
745 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
746 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
747 }
748
749 function init_docker_swarm() {
750 if [ "${DEFAULT_MTU}" != "1500" ]; then
751 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
752 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
753 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
754 fi
755 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
756 return 0
757 }
758
759 function create_docker_network() {
760 echo "creating network"
761 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
762 echo "creating network DONE"
763 }
764
765 function deploy_lightweight() {
766
767 echo "Deploying lightweight build"
768 OSM_NBI_PORT=9999
769 OSM_RO_PORT=9090
770 OSM_KEYSTONE_PORT=5000
771 OSM_UI_PORT=80
772
773 if [ -n "$NO_HOST_PORTS" ]; then
774 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
775 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
776 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
777 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
778 else
779 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
780 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
781 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
782 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
783 fi
784 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
785 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
786 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
787
788 pushd $OSM_DOCKER_WORK_DIR
789 sg docker -c "source ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
790 popd
791
792 echo "Finished deployment of lightweight build"
793 }
794
795 function deploy_elk() {
796 echo "Pulling docker images for ELK"
797 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.3" || FATAL "cannot get elasticsearch docker image"
798 sg docker -c "docker pull docker.elastic.co/logstash/logstash-oss:6.2.3" || FATAL "cannot get logstash docker image"
799 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:6.2.3" || FATAL "cannot get kibana docker image"
800 echo "Finished pulling elk docker images"
801 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
802 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
803 remove_stack osm_elk
804 echo "Deploying ELK stack"
805 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
806 echo "Waiting for ELK stack to be up and running"
807 time=0
808 step=5
809 timelength=40
810 elk_is_up=1
811 while [ $time -le $timelength ]; do
812 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
813 elk_is_up=0
814 break
815 fi
816 sleep $step
817 time=$((time+step))
818 done
819 if [ $elk_is_up -eq 0 ]; then
820 echo "ELK is up and running. Trying to create index pattern..."
821 #Create index pattern
822 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
823 "http://127.0.0.1:5601/api/saved_objects/index-pattern/logstash-*" \
824 -d"{\"attributes\":{\"title\":\"logstash-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
825 #Make it the default index
826 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
827 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
828 -d"{\"value\":\"logstash-*\"}" 2>/dev/null
829 else
830 echo "Cannot connect to Kibana to create index pattern."
831 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
832 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
833 "http://127.0.0.1:5601/api/saved_objects/index-pattern/logstash-*" \
834 -d"{\"attributes\":{\"title\":\"logstash-*\",\"timeFieldName\":\"@timestamp\"}}"'
835 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
836 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
837 -d"{\"value\":\"logstash-*\"}"'
838 fi
839 echo "Finished deployment of ELK stack"
840 return 0
841 }
842
843 function deploy_perfmon() {
844 echo "Pulling docker images for PM (Grafana and Prometheus)"
845 sg docker -c "docker pull prom/prometheus" || FATAL "cannot get prometheus docker image"
846 sg docker -c "docker pull grafana/grafana" || FATAL "cannot get grafana docker image"
847 echo "Finished pulling PM docker images"
848 echo "Generating osm/kafka-exporter docker image"
849 sg docker -c "docker build ${OSM_DEVOPS}/installers/docker/osm_metrics/kafka-exporter -f ${OSM_DEVOPS}/installers/docker/osm_metrics/kafka-exporter/Dockerfile -t osm/kafka-exporter --no-cache" || FATAL "cannot build kafka-exporter docker image"
850 echo "Finished generation of osm/kafka-exporter docker image"
851 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_metrics
852 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_metrics/*.yml $OSM_DOCKER_WORK_DIR/osm_metrics
853 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_metrics/*.json $OSM_DOCKER_WORK_DIR/osm_metrics
854 remove_stack osm_metrics
855 echo "Deploying PM stack (Kafka exporter + Prometheus + Grafana)"
856 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_metrics/docker-compose.yml osm_metrics"
857 echo "Finished deployment of PM stack"
858 return 0
859 }
860
861 function install_lightweight() {
862 OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
863 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
864
865 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
866 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
867 track proceed
868 echo "Installing lightweight build of OSM"
869 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
870 trap 'rm -rf "${LWTEMPDIR}"' EXIT
871 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
872 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
873 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
874 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
875 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
876
877 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
878 if [ -z "$OSMLCM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
879 need_packages_lw="lxd"
880 echo -e "Checking required packages: $need_packages_lw"
881 dpkg -l $need_packages_lw &>/dev/null \
882 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
883 || sudo apt-get update \
884 || FATAL "failed to run apt-get update"
885 dpkg -l $need_packages_lw &>/dev/null \
886 || ! echo -e "Installing $need_packages_lw requires root privileges." \
887 || sudo apt-get install -y $need_packages_lw \
888 || FATAL "failed to install $need_packages_lw"
889 fi
890 track prereqok
891 [ -z "$INSTALL_NOJUJU" ] && install_juju
892
893 if [ -z "$OSMLCM_VCA_HOST" ]; then
894 juju_createcontroller
895 OSMLCM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
896 [ -z "$OSMLCM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
897 fi
898 if [ -z "$OSMLCM_VCA_SECRET" ]; then
899 OSMLCM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
900 [ -z "$OSMLCM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
901 fi
902
903 track juju
904 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
905 track docker_ce
906 #install_docker_compose
907 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
908 track docker_build
909 generate_docker_env_files
910 generate_config_log_folders
911
912 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
913 # remove old stack
914 remove_stack $OSM_STACK_NAME
915 create_docker_network
916 deploy_lightweight
917 generate_osmclient_script
918 track docker_deploy
919 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
920 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
921 [ -n "$INSTALL_PERFMON" ] && deploy_perfmon && track perfmon
922 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
923 track osmclient
924 wget -q -O- https://osm-download.etsi.org/ftp/osm-4.0-four/README2.txt &> /dev/null
925 track end
926 return 0
927 }
928
929 function install_vimemu() {
930 echo "\nInstalling vim-emu"
931 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
932 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
933 # clone vim-emu repository (attention: branch is currently master only)
934 echo "Cloning vim-emu repository ..."
935 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
936 # build vim-emu docker
937 echo "Building vim-emu Docker container..."
938
939 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
940 # start vim-emu container as daemon
941 echo "Starting vim-emu Docker container 'vim-emu' ..."
942 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
943 # in lightweight mode, the emulator needs to be attached to netOSM
944 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
945 else
946 # classic build mode
947 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
948 fi
949 echo "Waiting for 'vim-emu' container to start ..."
950 sleep 5
951 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
952 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
953 # print vim-emu connection info
954 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
955 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
956 echo -e "To add the emulated VIM to OSM you should do:"
957 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
958 }
959
960 function dump_vars(){
961 echo "DEVELOP=$DEVELOP"
962 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
963 echo "UNINSTALL=$UNINSTALL"
964 echo "NAT=$NAT"
965 echo "UPDATE=$UPDATE"
966 echo "RECONFIGURE=$RECONFIGURE"
967 echo "TEST_INSTALLER=$TEST_INSTALLER"
968 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
969 echo "INSTALL_LXD=$INSTALL_LXD"
970 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
971 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
972 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
973 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
974 echo "INSTALL_ONLY=$INSTALL_ONLY"
975 echo "INSTALL_ELK=$INSTALL_ELK"
976 echo "INSTALL_PERFMON=$INSTALL_PERFMON"
977 echo "TO_REBUILD=$TO_REBUILD"
978 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
979 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
980 echo "RELEASE=$RELEASE"
981 echo "REPOSITORY=$REPOSITORY"
982 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
983 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
984 echo "NOCONFIGURE=$NOCONFIGURE"
985 echo "SHOWOPTS=$SHOWOPTS"
986 echo "Install from specific refspec (-b): $COMMIT_ID"
987 }
988
989 function track(){
990 ctime=`date +%s`
991 duration=$((ctime - SESSION_ID))
992 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
993 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
994 event_name="bin"
995 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
996 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
997 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
998 event_name="${event_name}_$1"
999 url="${url}&event=${event_name}&ce_duration=${duration}"
1000 wget -q -O /dev/null $url
1001 }
1002
1003 UNINSTALL=""
1004 DEVELOP=""
1005 NAT=""
1006 UPDATE=""
1007 RECONFIGURE=""
1008 TEST_INSTALLER=""
1009 INSTALL_LXD=""
1010 SHOWOPTS=""
1011 COMMIT_ID=""
1012 ASSUME_YES=""
1013 INSTALL_FROM_SOURCE=""
1014 RELEASE="ReleaseFOUR"
1015 REPOSITORY="stable"
1016 INSTALL_VIMEMU=""
1017 INSTALL_FROM_LXDIMAGES=""
1018 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1019 LXD_REPOSITORY_PATH=""
1020 INSTALL_LIGHTWEIGHT="y"
1021 INSTALL_ONLY=""
1022 INSTALL_ELK=""
1023 INSTALL_PERFMON=""
1024 TO_REBUILD=""
1025 INSTALL_NOLXD=""
1026 INSTALL_NODOCKER=""
1027 INSTALL_NOJUJU=""
1028 NOCONFIGURE=""
1029 RELEASE_DAILY=""
1030 SESSION_ID=`date +%s`
1031 OSM_DEVOPS=
1032 OSMLCM_VCA_HOST=
1033 OSMLCM_VCA_SECRET=
1034 OSM_STACK_NAME=osm
1035 NO_HOST_PORTS=""
1036 DOCKER_NOBUILD=""
1037 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1038 REPOSITORY_BASE="http://osm-download.etsi.org/repository/osm/debian"
1039 WORKDIR_SUDO=sudo
1040 OSM_WORK_DIR="/etc/osm"
1041 OSM_DOCKER_TAG=latest
1042
1043 while getopts ":hy-:b:r:k:u:R:l:p:D:o:m:H:S:s:w:t:" o; do
1044 case "${o}" in
1045 h)
1046 usage && exit 0
1047 ;;
1048 b)
1049 COMMIT_ID=${OPTARG}
1050 ;;
1051 r)
1052 REPOSITORY="${OPTARG}"
1053 REPO_ARGS+=(-r "$REPOSITORY")
1054 ;;
1055 R)
1056 RELEASE="${OPTARG}"
1057 REPO_ARGS+=(-R "$RELEASE")
1058 ;;
1059 k)
1060 REPOSITORY_KEY="${OPTARG}"
1061 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1062 ;;
1063 u)
1064 REPOSITORY_BASE="${OPTARG}"
1065 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1066 ;;
1067 l)
1068 LXD_REPOSITORY_BASE="${OPTARG}"
1069 ;;
1070 p)
1071 LXD_REPOSITORY_PATH="${OPTARG}"
1072 ;;
1073 D)
1074 OSM_DEVOPS="${OPTARG}"
1075 ;;
1076 s)
1077 OSM_STACK_NAME="${OPTARG}"
1078 ;;
1079 H)
1080 OSMLCM_VCA_HOST="${OPTARG}"
1081 ;;
1082 S)
1083 OSMLCM_VCA_SECRET="${OPTARG}"
1084 ;;
1085 w)
1086 # when specifying workdir, do not use sudo for access
1087 WORKDIR_SUDO=
1088 OSM_WORK_DIR="${OPTARG}"
1089 ;;
1090 t)
1091 OSM_DOCKER_TAG="${OPTARG}"
1092 ;;
1093 o)
1094 INSTALL_ONLY="y"
1095 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1096 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1097 [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1098 ;;
1099 m)
1100 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1101 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1102 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1103 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1104 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1105 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1106 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1107 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1108 ;;
1109 -)
1110 [ "${OPTARG}" == "help" ] && usage && exit 0
1111 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && continue
1112 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1113 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1114 [ "${OPTARG}" == "nat" ] && NAT="y" && continue
1115 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1116 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1117 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1118 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1119 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1120 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1121 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue
1122 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1123 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue
1124 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1125 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1126 [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1127 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue
1128 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1129 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue
1130 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1131 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1132 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1133 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1134 echo -e "Invalid option: '--$OPTARG'\n" >&2
1135 usage && exit 1
1136 ;;
1137 \?)
1138 echo -e "Invalid option: '-$OPTARG'\n" >&2
1139 usage && exit 1
1140 ;;
1141 y)
1142 ASSUME_YES="y"
1143 ;;
1144 *)
1145 usage && exit 1
1146 ;;
1147 esac
1148 done
1149
1150 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui"
1151 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui"
1152 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui"
1153 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --daily can only be used with --soui"
1154 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nolxd cannot be used with --soui"
1155 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nodocker cannot be used with --soui"
1156 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: -m cannot be used with --soui"
1157 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1158
1159 if [ -n "$SHOWOPTS" ]; then
1160 dump_vars
1161 exit 0
1162 fi
1163
1164 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master"
1165
1166 # if develop, we force master
1167 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1168
1169 need_packages="git jq wget curl tar"
1170 echo -e "Checking required packages: $need_packages"
1171 dpkg -l $need_packages &>/dev/null \
1172 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1173 || sudo apt-get update \
1174 || FATAL "failed to run apt-get update"
1175 dpkg -l $need_packages &>/dev/null \
1176 || ! echo -e "Installing $need_packages requires root privileges." \
1177 || sudo apt-get install -y $need_packages \
1178 || FATAL "failed to install $need_packages"
1179
1180 if [ -z "$OSM_DEVOPS" ]; then
1181 if [ -n "$TEST_INSTALLER" ]; then
1182 echo -e "\nUsing local devops repo for OSM installation"
1183 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1184 else
1185 echo -e "\nCreating temporary dir for OSM installation"
1186 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1187 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1188
1189 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1190
1191 if [ -z "$COMMIT_ID" ]; then
1192 echo -e "\nGuessing the current stable release"
1193 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1194 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1195
1196 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1197 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1198 else
1199 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1200 fi
1201 git -C $OSM_DEVOPS checkout $COMMIT_ID
1202 fi
1203 fi
1204
1205 OSM_JENKINS="$OSM_DEVOPS/jenkins"
1206 . $OSM_JENKINS/common/all_funcs
1207
1208 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1209 [ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0
1210 [ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0
1211 [ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0
1212 [ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0
1213 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1214 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1215 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1216 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1217
1218 #Installation starts here
1219 wget -q -O- https://osm-download.etsi.org/ftp/osm-4.0-four/README.txt &> /dev/null
1220 track start
1221
1222 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1223 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1224 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1225 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1226 fi
1227
1228 echo -e "Checking required packages: lxd"
1229 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1230 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1231
1232 # use local devops for containers
1233 export OSM_USE_LOCAL_DEVOPS=true
1234 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1235 echo -e "\nCreating the containers and building from source ..."
1236 $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')"
1237 ro_is_up && track RO
1238 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed"
1239 vca_is_up && track VCA
1240 $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed"
1241 mon_is_up && track MON
1242 $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')"
1243 $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')"
1244 #so_is_up && track SOUI
1245 track SOUI
1246 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1247 echo -e "\nInstalling from lxd images ..."
1248 install_from_lxdimages
1249 else #install from binaries
1250 echo -e "\nCreating the containers and installing from binaries ..."
1251 $OSM_DEVOPS/jenkins/host/install RO ${REPO_ARGS[@]} || FATAL "RO install failed"
1252 ro_is_up && track RO
1253 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed"
1254 vca_is_up && track VCA
1255 $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed"
1256 mon_is_up && track MON
1257 $OSM_DEVOPS/jenkins/host/install SO ${REPO_ARGS[@]} || FATAL "SO install failed"
1258 $OSM_DEVOPS/jenkins/host/install UI ${REPO_ARGS[@]} || FATAL "UI install failed"
1259 #so_is_up && track SOUI
1260 track SOUI
1261 fi
1262
1263 #Install iptables-persistent and configure NAT rules
1264 [ -z "$NOCONFIGURE" ] && nat
1265
1266 #Configure components
1267 [ -z "$NOCONFIGURE" ] && configure
1268
1269 #Install osmclient
1270 [ -z "$NOCONFIGURE" ] && install_osmclient
1271
1272 #Install vim-emu (optional)
1273 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1274
1275 wget -q -O- https://osm-download.etsi.org/ftp/osm-4.0-four/README2.txt &> /dev/null
1276 track end
1277 echo -e "\nDONE"
1278