Partial fix for bug 936
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
54 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
57 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
58 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
59 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
60 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
61 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
62 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
63 echo -e " --showopts: print chosen options and exit (only for debugging)"
64 echo -e " -y: do not prompt for confirmation, assumes yes"
65 echo -e " -h / --help: print this help"
66 }
67
68 #Uninstall OSM: remove containers
69 function uninstall(){
70 echo -e "\nUninstalling OSM"
71 if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
72 $OSM_DEVOPS/jenkins/host/clean_container RO
73 $OSM_DEVOPS/jenkins/host/clean_container VCA
74 $OSM_DEVOPS/jenkins/host/clean_container MON
75 $OSM_DEVOPS/jenkins/host/clean_container SO
76 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 else
78 lxc stop RO && lxc delete RO
79 lxc stop VCA && lxc delete VCA
80 lxc stop MON && lxc delete MON
81 lxc stop SO-ub && lxc delete SO-ub
82 fi
83 echo -e "\nDeleting imported lxd images if they exist"
84 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
85 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
86 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
87 return 0
88 }
89
90 # takes a juju/accounts.yaml file and returns the password specific
91 # for a controller. I wrote this using only bash tools to minimize
92 # additions of other packages
93 function parse_juju_password {
94 password_file="${HOME}/.local/share/juju/accounts.yaml"
95 local controller_name=$1
96 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
97 sed -ne "s|^\($s\):|\1|" \
98 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
99 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
100 awk -F$fs -v controller=$controller_name '{
101 indent = length($1)/2;
102 vname[indent] = $2;
103 for (i in vname) {if (i > indent) {delete vname[i]}}
104 if (length($3) > 0) {
105 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
106 if (match(vn,controller) && match($2,"password")) {
107 printf("%s",$3);
108 }
109 }
110 }'
111 }
112
113 function generate_secret() {
114 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
115 }
116
117 function remove_volumes() {
118 if [ -n "$KUBERNETES" ]; then
119 k8_volume=$1
120 echo "Removing ${k8_volume}"
121 $WORKDIR_SUDO rm -rf ${k8_volume}
122 else
123 stack=$1
124 volumes="mongo_db mon_db osm_packages ro_db"
125 for volume in $volumes; do
126 sg docker -c "docker volume rm ${stack}_${volume}"
127 done
128 fi
129 }
130
131 function remove_network() {
132 stack=$1
133 sg docker -c "docker network rm net${stack}"
134 }
135
136 function remove_iptables() {
137 stack=$1
138 if [ -z "$OSM_VCA_HOST" ]; then
139 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
140 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
141 fi
142
143 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
144 sudo iptables -t nat -D PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
145 sudo netfilter-persistent save
146 fi
147 }
148
149 function remove_stack() {
150 stack=$1
151 if sg docker -c "docker stack ps ${stack}" ; then
152 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
153 COUNTER=0
154 result=1
155 while [ ${COUNTER} -lt 30 ]; do
156 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
157 #echo "Dockers running: $result"
158 if [ "${result}" == "0" ]; then
159 break
160 fi
161 let COUNTER=COUNTER+1
162 sleep 1
163 done
164 if [ "${result}" == "0" ]; then
165 echo "All dockers of the stack ${stack} were removed"
166 else
167 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
168 fi
169 sleep 5
170 fi
171 }
172
173 #removes osm deployments and services
174 function remove_k8s_namespace() {
175 kubectl delete ns $1
176 }
177
178 #Uninstall lightweight OSM: remove dockers
179 function uninstall_lightweight() {
180 if [ -n "$INSTALL_ONLY" ]; then
181 if [ -n "$INSTALL_ELK" ]; then
182 echo -e "\nUninstalling OSM ELK stack"
183 remove_stack osm_elk
184 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
185 fi
186 else
187 echo -e "\nUninstalling OSM"
188 if [ -n "$KUBERNETES" ]; then
189 if [ -n "$K8S_MONITOR" ]; then
190 # uninstall OSM MONITORING
191 uninstall_k8s_monitoring
192 fi
193 remove_k8s_namespace $OSM_STACK_NAME
194 else
195
196 remove_stack $OSM_STACK_NAME
197 remove_stack osm_elk
198 uninstall_prometheus_nodeexporter
199 fi
200 echo "Now osm docker images and volumes will be deleted"
201 newgrp docker << EONG
202 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
208 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
209 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
210 EONG
211
212 if [ -n "$KUBERNETES" ]; then
213 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
214 remove_volumes $OSM_NAMESPACE_VOL
215 else
216 remove_volumes $OSM_STACK_NAME
217 remove_network $OSM_STACK_NAME
218 fi
219 remove_iptables $OSM_STACK_NAME
220 echo "Removing $OSM_DOCKER_WORK_DIR"
221 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
222 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
223 fi
224 echo "Some docker images will be kept in case they are used by other docker stacks"
225 echo "To remove them, just run 'docker image prune' in a terminal"
226 return 0
227 }
228
229 #Configure NAT rules, based on the current IP addresses of containers
230 function nat(){
231 echo -e "\nChecking required packages: iptables-persistent"
232 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
233 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
234 echo -e "\nConfiguring NAT rules"
235 echo -e " Required root privileges"
236 sudo $OSM_DEVOPS/installers/nat_osm
237 }
238
239 function FATAL(){
240 echo "FATAL error: Cannot install OSM due to \"$1\""
241 exit 1
242 }
243
244 #Update RO, SO and UI:
245 function update(){
246 echo -e "\nUpdating components"
247
248 echo -e " Updating RO"
249 CONTAINER="RO"
250 MDG="RO"
251 INSTALL_FOLDER="/opt/openmano"
252 echo -e " Fetching the repo"
253 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
254 BRANCH=""
255 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
256 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
257 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
258 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
259 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
260 # COMMIT_ID either was previously set with -b option, or is an empty string
261 CHECKOUT_ID=$COMMIT_ID
262 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
263 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
264 if [[ $CHECKOUT_ID == "tags/"* ]]; then
265 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
266 else
267 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
268 fi
269 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
270 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
271 echo " Nothing to be done."
272 else
273 echo " Update required."
274 lxc exec $CONTAINER -- service osm-ro stop
275 lxc exec $CONTAINER -- git -C /opt/openmano stash
276 lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
277 lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
278 lxc exec $CONTAINER -- git -C /opt/openmano stash pop
279 lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
280 lxc exec $CONTAINER -- service osm-ro start
281 fi
282 echo
283
284 echo -e " Updating SO and UI"
285 CONTAINER="SO-ub"
286 MDG="SO"
287 INSTALL_FOLDER="" # To be filled in
288 echo -e " Fetching the repo"
289 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
290 BRANCH=""
291 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
292 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
293 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
294 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
295 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
296 # COMMIT_ID either was previously set with -b option, or is an empty string
297 CHECKOUT_ID=$COMMIT_ID
298 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
299 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
300 if [[ $CHECKOUT_ID == "tags/"* ]]; then
301 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
302 else
303 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
304 fi
305 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
306 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
307 echo " Nothing to be done."
308 else
309 echo " Update required."
310 # Instructions to be added
311 # lxc exec SO-ub -- ...
312 fi
313 echo
314 echo -e "Updating MON Container"
315 CONTAINER="MON"
316 MDG="MON"
317 INSTALL_FOLDER="/root/MON"
318 echo -e " Fetching the repo"
319 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
320 BRANCH=""
321 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
322 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
323 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
324 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
325 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
326 # COMMIT_ID either was previously set with -b option, or is an empty string
327 CHECKOUT_ID=$COMMIT_ID
328 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
329 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
330 if [[ $CHECKOUT_ID == "tags/"* ]]; then
331 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
332 else
333 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
334 fi
335 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
336 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
337 echo " Nothing to be done."
338 else
339 echo " Update required."
340 fi
341 echo
342 }
343
344 function so_is_up() {
345 if [ -n "$1" ]; then
346 SO_IP=$1
347 else
348 SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
349 fi
350 time=0
351 step=5
352 timelength=300
353 while [ $time -le $timelength ]
354 do
355 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
356 -H 'accept: application/vnd.yang.data+json' \
357 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
358 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
359 then
360 echo "RW.Restconf running....SO is up"
361 return 0
362 fi
363
364 sleep $step
365 echo -n "."
366 time=$((time+step))
367 done
368
369 FATAL "OSM Failed to startup. SO failed to startup"
370 }
371
372 function vca_is_up() {
373 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
374 echo "VCA is up and running"
375 return 0
376 fi
377
378 FATAL "OSM Failed to startup. VCA failed to startup"
379 }
380
381 function mon_is_up() {
382 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
383 echo "MON is up and running"
384 return 0
385 fi
386
387 FATAL "OSM Failed to startup. MON failed to startup"
388 }
389
390 function ro_is_up() {
391 if [ -n "$1" ]; then
392 RO_IP=$1
393 else
394 RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
395 fi
396 time=0
397 step=2
398 timelength=20
399 while [ $time -le $timelength ]; do
400 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
401 echo "RO is up and running"
402 return 0
403 fi
404 sleep $step
405 echo -n "."
406 time=$((time+step))
407 done
408
409 FATAL "OSM Failed to startup. RO failed to startup"
410 }
411
412
413 function configure_RO(){
414 . $OSM_DEVOPS/installers/export_ips
415 echo -e " Configuring RO"
416 lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
417 lxc exec RO -- service osm-ro restart
418
419 ro_is_up
420
421 lxc exec RO -- openmano tenant-delete -f osm >/dev/null
422 lxc exec RO -- openmano tenant-create osm > /dev/null
423 lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
424 lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
425 lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
426 }
427
428 function configure_VCA(){
429 echo -e " Configuring VCA"
430 JUJU_PASSWD=$(generate_secret)
431 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
432 }
433
434 function configure_SOUI(){
435 . $OSM_DEVOPS/installers/export_ips
436 JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
437 RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
438
439 echo -e " Configuring MON"
440 #Information to be added about SO socket for logging
441
442 echo -e " Configuring SO"
443 sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
444 sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
445 sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
446 sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
447 # make journaling persistent
448 lxc exec SO-ub -- mkdir -p /var/log/journal
449 lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
450 lxc exec SO-ub -- systemctl restart systemd-journald
451
452 echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
453
454 lxc exec SO-ub -- systemctl restart launchpad
455
456 so_is_up $SO_CONTAINER_IP
457
458 #delete existing config agent (could be there on reconfigure)
459 curl -k --request DELETE \
460 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
461 --header 'accept: application/vnd.yang.data+json' \
462 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
463 --header 'cache-control: no-cache' \
464 --header 'content-type: application/vnd.yang.data+json' &> /dev/null
465
466 result=$(curl -k --request POST \
467 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
468 --header 'accept: application/vnd.yang.data+json' \
469 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
470 --header 'cache-control: no-cache' \
471 --header 'content-type: application/vnd.yang.data+json' \
472 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
473 [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
474
475 #R1/R2 config line
476 #result=$(curl -k --request PUT \
477 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
478 # --header 'accept: application/vnd.yang.data+json' \
479 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
480 # --header 'cache-control: no-cache' \
481 # --header 'content-type: application/vnd.yang.data+json' \
482 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
483
484 result=$(curl -k --request PUT \
485 --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
486 --header 'accept: application/vnd.yang.data+json' \
487 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
488 --header 'cache-control: no-cache' \
489 --header 'content-type: application/vnd.yang.data+json' \
490 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
491 [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
492
493 result=$(curl -k --request PATCH \
494 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
495 --header 'accept: application/vnd.yang.data+json' \
496 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
497 --header 'cache-control: no-cache' \
498 --header 'content-type: application/vnd.yang.data+json' \
499 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
500 [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
501
502 result=$(curl -k --request PATCH \
503 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
504 --header 'accept: application/vnd.yang.data+json' \
505 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
506 --header 'cache-control: no-cache' \
507 --header 'content-type: application/vnd.yang.data+json' \
508 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
509 [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
510
511 lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
512 auto lo:1
513 iface lo:1 inet static
514 address $DEFAULT_IP
515 netmask 255.255.255.255
516 EOF
517 lxc exec SO-ub ifup lo:1
518 }
519
520 #Configure RO, VCA, and SO with the initial configuration:
521 # RO -> tenant:osm, logs to be sent to SO
522 # VCA -> juju-password
523 # SO -> route to Juju Controller, add RO account, add VCA account
524 function configure(){
525 #Configure components
526 echo -e "\nConfiguring components"
527 configure_RO
528 configure_VCA
529 configure_SOUI
530 }
531
532 function install_lxd() {
533 sudo apt-get update
534 sudo apt-get install -y lxd
535 newgrp lxd
536 lxd init --auto
537 lxd waitready
538 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
539 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
540 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
541 lxc profile device set default eth0 mtu $DEFAULT_MTU
542 #sudo systemctl stop lxd-bridge
543 #sudo systemctl --system daemon-reload
544 #sudo systemctl enable lxd-bridge
545 #sudo systemctl start lxd-bridge
546 }
547
548 function ask_user(){
549 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
550 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
551 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
552 read -e -p "$1" USER_CONFIRMATION
553 while true ; do
554 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
555 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
556 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
557 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
558 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
559 done
560 }
561
562 function launch_container_from_lxd(){
563 export OSM_MDG=$1
564 OSM_load_config
565 export OSM_BASE_IMAGE=$2
566 if ! container_exists $OSM_BUILD_CONTAINER; then
567 CONTAINER_OPTS=""
568 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
569 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
570 create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
571 wait_container_up $OSM_BUILD_CONTAINER
572 fi
573 }
574
575 function install_osmclient(){
576 CLIENT_RELEASE=${RELEASE#"-R "}
577 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
578 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
579 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
580 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
581 curl $key_location | sudo apt-key add -
582 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
583 sudo apt-get update
584 sudo apt-get install -y python3-pip
585 sudo -H LC_ALL=C python3 -m pip install -U pip
586 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind
587 sudo apt-get install -y python3-osm-im python3-osmclient
588 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
589 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
590 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
591 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
592 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
593 echo -e "\nOSM client installed"
594 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
595 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
596 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
597 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
598 else
599 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
600 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
601 echo " export OSM_HOSTNAME=<OSM_host>"
602 fi
603 return 0
604 }
605
606 function install_prometheus_nodeexporter(){
607 sudo useradd --no-create-home --shell /bin/false node_exporter
608 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
609 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
610 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
611 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
612 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
613 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
614 sudo systemctl daemon-reload
615 sudo systemctl restart node_exporter
616 sudo systemctl enable node_exporter
617 return 0
618 }
619
620 function uninstall_prometheus_nodeexporter(){
621 sudo systemctl stop node_exporter
622 sudo systemctl disable node_exporter
623 sudo rm /etc/systemd/system/node_exporter.service
624 sudo systemctl daemon-reload
625 sudo userdel node_exporter
626 sudo rm /usr/local/bin/node_exporter
627 return 0
628 }
629
630 function install_from_lxdimages(){
631 LXD_RELEASE=${RELEASE#"-R "}
632 if [ -n "$LXD_REPOSITORY_PATH" ]; then
633 LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
634 else
635 LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
636 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
637 fi
638 echo -e "\nDeleting previous lxd images if they exist"
639 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
640 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
641 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
642 echo -e "\nImporting osm-ro"
643 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
644 lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
645 rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
646 echo -e "\nImporting osm-vca"
647 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
648 lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
649 rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
650 echo -e "\nImporting osm-soui"
651 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
652 lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
653 rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
654 launch_container_from_lxd RO osm-ro
655 ro_is_up && track RO
656 launch_container_from_lxd VCA osm-vca
657 vca_is_up && track VCA
658 launch_container_from_lxd MON osm-mon
659 mon_is_up && track MON
660 launch_container_from_lxd SO osm-soui
661 #so_is_up && track SOUI
662 track SOUI
663 }
664
665 function install_docker_ce() {
666 # installs and configures Docker CE
667 echo "Installing Docker CE ..."
668 sudo apt-get -qq update
669 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
670 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
671 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
672 sudo apt-get -qq update
673 sudo apt-get install -y docker-ce
674 echo "Adding user to group 'docker'"
675 sudo groupadd -f docker
676 sudo usermod -aG docker $USER
677 sleep 2
678 sudo service docker restart
679 echo "... restarted Docker service"
680 sg docker -c "docker version" || FATAL "Docker installation failed"
681 echo "... Docker CE installation done"
682 return 0
683 }
684
685 function install_docker_compose() {
686 # installs and configures docker-compose
687 echo "Installing Docker Compose ..."
688 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
689 sudo chmod +x /usr/local/bin/docker-compose
690 echo "... Docker Compose installation done"
691 }
692
693 function install_juju() {
694 echo "Installing juju"
695 sudo snap install juju --classic
696 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
697 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
698 echo "Finished installation of juju"
699 return 0
700 }
701
702 function juju_createcontroller() {
703 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
704 # Not found created, create the controller
705 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
706 fi
707 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
708 }
709
710 function juju_createproxy() {
711 echo -e "\nChecking required packages: iptables-persistent"
712 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
713 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
714
715 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
716 sudo iptables -t nat -A PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
717 sudo netfilter-persistent save
718 fi
719 }
720
721 function generate_docker_images() {
722 echo "Pulling and generating docker images"
723 _build_from=$COMMIT_ID
724 [ -z "$_build_from" ] && _build_from="master"
725
726 echo "OSM Docker images generated from $_build_from"
727
728 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
729 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
730 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
731 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
732
733 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
734 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
735 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
736 fi
737
738 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
739 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
740 fi
741
742 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
743 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
744 fi
745
746 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
747 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
748 fi
749
750 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
751 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
752 fi
753
754 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
755 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
756 fi
757
758 if [ -n "$PULL_IMAGES" ]; then
759 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
760 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
761 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
762 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
763 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
764 fi
765
766 if [ -n "$PULL_IMAGES" ]; then
767 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
768 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
769 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
770 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
771 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
772 fi
773
774 if [ -n "$PULL_IMAGES" ]; then
775 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
776 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
777 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
778 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
779 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
780 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
781 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
782 fi
783
784 if [ -n "$PULL_IMAGES" ]; then
785 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
786 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
787 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
788 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
789 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
790 fi
791
792 if [ -n "$PULL_IMAGES" ]; then
793 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
794 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
795 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
796 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
797 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
798 fi
799
800 if [ -n "$PULL_IMAGES" ]; then
801 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
802 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
803 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
804 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
805 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
806 fi
807
808 if [ -n "$PULL_IMAGES" ]; then
809 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
810 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
811 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
812 fi
813
814 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
815 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
816 fi
817
818 echo "Finished generation of docker images"
819 }
820
821 function cmp_overwrite() {
822 file1="$1"
823 file2="$2"
824 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
825 if [ -f "${file2}" ]; then
826 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
827 else
828 cp -b ${file1} ${file2}
829 fi
830 fi
831 }
832
833 function generate_docker_env_files() {
834 echo "Doing a backup of existing env files"
835 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
836 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
837 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
838 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
839 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
840 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
841 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
842 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
843 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
844
845 echo "Generating docker env files"
846 if [ -n "$KUBERNETES" ]; then
847 #Kubernetes resources
848 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
849 else
850 # Docker-compose
851 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
852
853 # Prometheus
854 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
855
856 # Grafana & Prometheus Exporter files
857 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
858 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
859 fi
860
861 # LCM
862 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
863 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
864 fi
865
866 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
867 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
868 else
869 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
870 fi
871
872 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
873 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
874 else
875 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
876 fi
877
878 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
879 echo "OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
880 else
881 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"|g" $OSM_DOCKER_WORK_DIR/lcm.env
882 fi
883
884 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
885 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
886 else
887 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
888 fi
889
890 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
891 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
892 else
893 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
894 fi
895
896 # RO
897 MYSQL_ROOT_PASSWORD=$(generate_secret)
898 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
899 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
900 fi
901 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
902 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
903 fi
904
905 # Keystone
906 KEYSTONE_DB_PASSWORD=$(generate_secret)
907 SERVICE_PASSWORD=$(generate_secret)
908 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
909 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
910 fi
911 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
912 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
913 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
914 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
915 fi
916
917 # NBI
918 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
919 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
920 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
921 fi
922
923 # MON
924 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
925 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
926 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
927 fi
928
929 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
930 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
931 else
932 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
933 fi
934
935 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
936 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
937 else
938 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
939 fi
940
941 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
942 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
943 else
944 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
945 fi
946
947 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
948 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
949 else
950 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
951 fi
952
953
954 # POL
955 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
956 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
957 fi
958
959 # LW-UI
960 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
961 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
962 fi
963
964 echo "Finished generation of docker env files"
965 }
966
967 function generate_osmclient_script () {
968 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
969 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
970 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
971 }
972
973 #installs kubernetes packages
974 function install_kube() {
975 sudo apt-get update && sudo apt-get install -y apt-transport-https
976 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
977 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
978 sudo apt-get update
979 echo "Installing Kubernetes Packages ..."
980 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
981 }
982
983 #initializes kubernetes control plane
984 function init_kubeadm() {
985 sudo swapoff -a
986 sudo kubeadm init --config $1
987 sleep 5
988 }
989
990 function kube_config_dir() {
991 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
992 mkdir -p $HOME/.kube
993 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
994 sudo chown $(id -u):$(id -g) $HOME/.kube/config
995 }
996
997 #deploys flannel as daemonsets
998 function deploy_cni_provider() {
999 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
1000 trap 'rm -rf "${CNI_DIR}"' EXIT
1001 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
1002 kubectl apply -f $CNI_DIR
1003 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
1004 }
1005
1006 #creates secrets from env files which will be used by containers
1007 function kube_secrets(){
1008 kubectl create ns $OSM_STACK_NAME
1009 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
1010 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
1011 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
1012 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
1013 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
1014 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
1015 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
1016 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
1017 }
1018
1019 #deploys osm pods and services
1020 function deploy_osm_services() {
1021 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
1022 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1023 sleep 5
1024 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1025 }
1026
1027 function parse_yaml() {
1028 osm_services="nbi lcm ro pol mon light-ui keystone"
1029 TAG=$1
1030 for osm in $osm_services; do
1031 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
1032 done
1033 }
1034
1035 function namespace_vol() {
1036 osm_services="nbi lcm ro pol mon kafka mongo mysql"
1037 for osm in $osm_services; do
1038 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1039 done
1040 }
1041
1042 function init_docker_swarm() {
1043 if [ "${DEFAULT_MTU}" != "1500" ]; then
1044 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1045 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1046 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1047 fi
1048 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1049 return 0
1050 }
1051
1052 function create_docker_network() {
1053 echo "creating network"
1054 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1055 echo "creating network DONE"
1056 }
1057
1058 function deploy_lightweight() {
1059
1060 echo "Deploying lightweight build"
1061 OSM_NBI_PORT=9999
1062 OSM_RO_PORT=9090
1063 OSM_KEYSTONE_PORT=5000
1064 OSM_UI_PORT=80
1065 OSM_MON_PORT=8662
1066 OSM_PROM_PORT=9090
1067 OSM_PROM_CADVISOR_PORT=8080
1068 OSM_PROM_HOSTPORT=9091
1069 OSM_GRAFANA_PORT=3000
1070 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1071 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1072
1073 if [ -n "$NO_HOST_PORTS" ]; then
1074 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1075 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1076 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1077 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1078 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1079 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1080 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1081 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1082 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1083 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1084 else
1085 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1086 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1087 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1088 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1089 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1090 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1091 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1092 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1093 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1094 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1095 fi
1096 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1097 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1098 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1099 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1100 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1101 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1102 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1103 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1104 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1105
1106 pushd $OSM_DOCKER_WORK_DIR
1107 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1108 popd
1109
1110 echo "Finished deployment of lightweight build"
1111 }
1112
1113 function deploy_elk() {
1114 echo "Pulling docker images for ELK"
1115 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1116 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1117 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1118 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1119 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1120 echo "Finished pulling elk docker images"
1121 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1122 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1123 remove_stack osm_elk
1124 echo "Deploying ELK stack"
1125 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1126 echo "Waiting for ELK stack to be up and running"
1127 time=0
1128 step=5
1129 timelength=40
1130 elk_is_up=1
1131 while [ $time -le $timelength ]; do
1132 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1133 elk_is_up=0
1134 break
1135 fi
1136 sleep $step
1137 time=$((time+step))
1138 done
1139 if [ $elk_is_up -eq 0 ]; then
1140 echo "ELK is up and running. Trying to create index pattern..."
1141 #Create index pattern
1142 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1143 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1144 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1145 #Make it the default index
1146 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1147 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1148 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1149 else
1150 echo "Cannot connect to Kibana to create index pattern."
1151 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1152 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1153 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1154 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1155 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1156 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1157 -d"{\"value\":\"filebeat-*\"}"'
1158 fi
1159 echo "Finished deployment of ELK stack"
1160 return 0
1161 }
1162
1163 function install_lightweight() {
1164 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1165 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1166 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1167 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1168
1169 track checkingroot
1170 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1171 track noroot
1172
1173 if [ -n "$KUBERNETES" ]; then
1174 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1175 1. Install and configure LXD
1176 2. Install juju
1177 3. Install docker CE
1178 4. Disable swap space
1179 5. Install and initialize Kubernetes
1180 as pre-requirements.
1181 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1182
1183 else
1184 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1185 fi
1186 track proceed
1187
1188 echo "Installing lightweight build of OSM"
1189 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1190 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1191 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1192 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1193 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1194 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1195 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1196
1197 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1198 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1199 need_packages_lw="lxd snapd"
1200 echo -e "Checking required packages: $need_packages_lw"
1201 dpkg -l $need_packages_lw &>/dev/null \
1202 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1203 || sudo apt-get update \
1204 || FATAL "failed to run apt-get update"
1205 dpkg -l $need_packages_lw &>/dev/null \
1206 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1207 || sudo apt-get install -y $need_packages_lw \
1208 || FATAL "failed to install $need_packages_lw"
1209 fi
1210 track prereqok
1211
1212 [ -z "$INSTALL_NOJUJU" ] && install_juju
1213 track juju_install
1214
1215 if [ -z "$OSM_VCA_HOST" ]; then
1216 juju_createcontroller
1217 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1218 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1219 fi
1220 track juju_controller
1221
1222 if [ -z "$OSM_VCA_SECRET" ]; then
1223 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1224 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1225 fi
1226 if [ -z "$OSM_VCA_PUBKEY" ]; then
1227 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1228 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1229 fi
1230 if [ -z "$OSM_VCA_APIPROXY" ]; then
1231 OSM_VCA_APIPROXY=$DEFAULT_IP
1232 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1233 fi
1234 juju_createproxy
1235
1236 if [ -z "$OSM_VCA_CACERT" ]; then
1237 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
1238 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1239 fi
1240 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1241 OSM_DATABASE_COMMONKEY=$(generate_secret)
1242 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1243 fi
1244 track juju
1245
1246 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1247 track docker_ce
1248
1249 #Installs Kubernetes and deploys osm services
1250 if [ -n "$KUBERNETES" ]; then
1251 install_kube
1252 track install_k8s
1253 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1254 kube_config_dir
1255 track init_k8s
1256 else
1257 #install_docker_compose
1258 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1259 track docker_swarm
1260 fi
1261
1262 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1263 track docker_build
1264
1265 generate_docker_env_files
1266
1267 if [ -n "$KUBERNETES" ]; then
1268 if [ -n "$K8S_MONITOR" ]; then
1269 # uninstall OSM MONITORING
1270 uninstall_k8s_monitoring
1271 fi
1272 #remove old namespace
1273 remove_k8s_namespace $OSM_STACK_NAME
1274 deploy_cni_provider
1275 kube_secrets
1276 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml $OSM_DOCKER_TAG
1277 namespace_vol
1278 deploy_osm_services
1279 track deploy_osm_services_k8s
1280 else
1281 # remove old stack
1282 remove_stack $OSM_STACK_NAME
1283 create_docker_network
1284 deploy_lightweight
1285 generate_osmclient_script
1286 track docker_deploy
1287 install_prometheus_nodeexporter
1288 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1289 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1290 fi
1291
1292 if [ -n "$KUBERNETES" ] && [ -n "$K8S_MONITOR" ]; then
1293 # install OSM MONITORING
1294 install_k8s_monitoring
1295 track install_k8s_monitoring
1296 fi
1297
1298 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1299 track osmclient
1300
1301 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1302 track end
1303 return 0
1304 }
1305
1306 function install_vimemu() {
1307 echo "\nInstalling vim-emu"
1308 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1309 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1310 # clone vim-emu repository (attention: branch is currently master only)
1311 echo "Cloning vim-emu repository ..."
1312 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1313 # build vim-emu docker
1314 echo "Building vim-emu Docker container..."
1315
1316 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1317 # start vim-emu container as daemon
1318 echo "Starting vim-emu Docker container 'vim-emu' ..."
1319 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1320 # in lightweight mode, the emulator needs to be attached to netOSM
1321 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1322 else
1323 # classic build mode
1324 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1325 fi
1326 echo "Waiting for 'vim-emu' container to start ..."
1327 sleep 5
1328 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1329 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1330 # print vim-emu connection info
1331 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1332 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1333 echo -e "To add the emulated VIM to OSM you should do:"
1334 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1335 }
1336
1337 function install_k8s_monitoring() {
1338 # install OSM monitoring
1339 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1340 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1341 }
1342
1343 function uninstall_k8s_monitoring() {
1344 # install OSM monitoring
1345 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1346 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1347 }
1348
1349 function dump_vars(){
1350 echo "DEVELOP=$DEVELOP"
1351 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1352 echo "UNINSTALL=$UNINSTALL"
1353 echo "NAT=$NAT"
1354 echo "UPDATE=$UPDATE"
1355 echo "RECONFIGURE=$RECONFIGURE"
1356 echo "TEST_INSTALLER=$TEST_INSTALLER"
1357 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1358 echo "INSTALL_LXD=$INSTALL_LXD"
1359 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1360 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1361 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1362 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1363 echo "INSTALL_ONLY=$INSTALL_ONLY"
1364 echo "INSTALL_ELK=$INSTALL_ELK"
1365 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1366 echo "TO_REBUILD=$TO_REBUILD"
1367 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1368 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1369 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1370 echo "RELEASE=$RELEASE"
1371 echo "REPOSITORY=$REPOSITORY"
1372 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1373 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1374 echo "NOCONFIGURE=$NOCONFIGURE"
1375 echo "OSM_DEVOPS=$OSM_DEVOPS"
1376 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1377 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1378 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1379 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1380 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1381 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1382 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1383 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1384 echo "DOCKER_USER=$DOCKER_USER"
1385 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1386 echo "PULL_IMAGES=$PULL_IMAGES"
1387 echo "KUBERNETES=$KUBERNETES"
1388 echo "SHOWOPTS=$SHOWOPTS"
1389 echo "Install from specific refspec (-b): $COMMIT_ID"
1390 }
1391
1392 function track(){
1393 ctime=`date +%s`
1394 duration=$((ctime - SESSION_ID))
1395 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1396 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1397 event_name="bin"
1398 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1399 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1400 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1401 event_name="${event_name}_$1"
1402 url="${url}&event=${event_name}&ce_duration=${duration}"
1403 wget -q -O /dev/null $url
1404 }
1405
1406 UNINSTALL=""
1407 DEVELOP=""
1408 NAT=""
1409 UPDATE=""
1410 RECONFIGURE=""
1411 TEST_INSTALLER=""
1412 INSTALL_LXD=""
1413 SHOWOPTS=""
1414 COMMIT_ID=""
1415 ASSUME_YES=""
1416 INSTALL_FROM_SOURCE=""
1417 RELEASE="ReleaseSIX"
1418 REPOSITORY="stable"
1419 INSTALL_VIMEMU=""
1420 INSTALL_FROM_LXDIMAGES=""
1421 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1422 LXD_REPOSITORY_PATH=""
1423 INSTALL_LIGHTWEIGHT="y"
1424 INSTALL_ONLY=""
1425 INSTALL_ELK=""
1426 #INSTALL_PERFMON=""
1427 TO_REBUILD=""
1428 INSTALL_NOLXD=""
1429 INSTALL_NODOCKER=""
1430 INSTALL_NOJUJU=""
1431 KUBERNETES=""
1432 K8S_MONITOR=""
1433 INSTALL_NOHOSTCLIENT=""
1434 NOCONFIGURE=""
1435 RELEASE_DAILY=""
1436 SESSION_ID=`date +%s`
1437 OSM_DEVOPS=
1438 OSM_VCA_HOST=
1439 OSM_VCA_SECRET=
1440 OSM_VCA_PUBKEY=
1441 OSM_STACK_NAME=osm
1442 NO_HOST_PORTS=""
1443 DOCKER_NOBUILD=""
1444 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1445 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1446 WORKDIR_SUDO=sudo
1447 OSM_WORK_DIR="/etc/osm"
1448 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1449 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1450 OSM_HOST_VOL="/var/lib/osm"
1451 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1452 OSM_DOCKER_TAG=latest
1453 DOCKER_USER=opensourcemano
1454 PULL_IMAGES="y"
1455 KAFKA_TAG=2.11-1.0.2
1456 PROMETHEUS_TAG=v2.4.3
1457 GRAFANA_TAG=latest
1458 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1459 PROMETHEUS_CADVISOR_TAG=latest
1460 KEYSTONEDB_TAG=10
1461 OSM_DATABASE_COMMONKEY=
1462 ELASTIC_VERSION=6.4.2
1463 ELASTIC_CURATOR_VERSION=5.5.4
1464 POD_NETWORK_CIDR=10.244.0.0/16
1465 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1466 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1467
1468 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o; do
1469 case "${o}" in
1470 h)
1471 usage && exit 0
1472 ;;
1473 b)
1474 COMMIT_ID=${OPTARG}
1475 PULL_IMAGES=""
1476 ;;
1477 r)
1478 REPOSITORY="${OPTARG}"
1479 REPO_ARGS+=(-r "$REPOSITORY")
1480 ;;
1481 c)
1482 [ "${OPTARG}" == "swarm" ] && continue
1483 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1484 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1485 usage && exit 1
1486 ;;
1487 R)
1488 RELEASE="${OPTARG}"
1489 REPO_ARGS+=(-R "$RELEASE")
1490 ;;
1491 k)
1492 REPOSITORY_KEY="${OPTARG}"
1493 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1494 ;;
1495 u)
1496 REPOSITORY_BASE="${OPTARG}"
1497 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1498 ;;
1499 U)
1500 DOCKER_USER="${OPTARG}"
1501 ;;
1502 l)
1503 LXD_REPOSITORY_BASE="${OPTARG}"
1504 ;;
1505 p)
1506 LXD_REPOSITORY_PATH="${OPTARG}"
1507 ;;
1508 D)
1509 OSM_DEVOPS="${OPTARG}"
1510 ;;
1511 s)
1512 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1513 ;;
1514 H)
1515 OSM_VCA_HOST="${OPTARG}"
1516 ;;
1517 S)
1518 OSM_VCA_SECRET="${OPTARG}"
1519 ;;
1520 P)
1521 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1522 ;;
1523 A)
1524 OSM_VCA_APIPROXY="${OPTARG}"
1525 ;;
1526 w)
1527 # when specifying workdir, do not use sudo for access
1528 WORKDIR_SUDO=
1529 OSM_WORK_DIR="${OPTARG}"
1530 ;;
1531 t)
1532 OSM_DOCKER_TAG="${OPTARG}"
1533 ;;
1534 o)
1535 INSTALL_ONLY="y"
1536 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1537 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1538 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1539 ;;
1540 m)
1541 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1542 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1543 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1544 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1545 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1546 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1547 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1548 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1549 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1550 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1551 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1552 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1553 ;;
1554 -)
1555 [ "${OPTARG}" == "help" ] && usage && exit 0
1556 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1557 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1558 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1559 [ "${OPTARG}" == "nat" ] && NAT="y" && continue
1560 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1561 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1562 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1563 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1564 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1565 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1566 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue
1567 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1568 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue
1569 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1570 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1571 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1572 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue
1573 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1574 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue
1575 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1576 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1577 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1578 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1579 [ "${OPTARG}" == "pullimages" ] && continue
1580 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR="y" && continue
1581 echo -e "Invalid option: '--$OPTARG'\n" >&2
1582 usage && exit 1
1583 ;;
1584 \?)
1585 echo -e "Invalid option: '-$OPTARG'\n" >&2
1586 usage && exit 1
1587 ;;
1588 y)
1589 ASSUME_YES="y"
1590 ;;
1591 *)
1592 usage && exit 1
1593 ;;
1594 esac
1595 done
1596
1597 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui"
1598 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui"
1599 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui"
1600 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --daily can only be used with --soui"
1601 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nolxd cannot be used with --soui"
1602 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nodocker cannot be used with --soui"
1603 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: -m cannot be used with --soui"
1604 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1605
1606 if [ -n "$SHOWOPTS" ]; then
1607 dump_vars
1608 exit 0
1609 fi
1610
1611 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master"
1612
1613 # if develop, we force master
1614 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1615
1616 need_packages="git jq wget curl tar"
1617 echo -e "Checking required packages: $need_packages"
1618 dpkg -l $need_packages &>/dev/null \
1619 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1620 || sudo apt-get update \
1621 || FATAL "failed to run apt-get update"
1622 dpkg -l $need_packages &>/dev/null \
1623 || ! echo -e "Installing $need_packages requires root privileges." \
1624 || sudo apt-get install -y $need_packages \
1625 || FATAL "failed to install $need_packages"
1626
1627 if [ -z "$OSM_DEVOPS" ]; then
1628 if [ -n "$TEST_INSTALLER" ]; then
1629 echo -e "\nUsing local devops repo for OSM installation"
1630 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1631 else
1632 echo -e "\nCreating temporary dir for OSM installation"
1633 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1634 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1635
1636 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1637
1638 if [ -z "$COMMIT_ID" ]; then
1639 echo -e "\nGuessing the current stable release"
1640 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1641 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1642
1643 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1644 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1645 else
1646 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1647 fi
1648 git -C $OSM_DEVOPS checkout $COMMIT_ID
1649 fi
1650 fi
1651
1652 . $OSM_DEVOPS/common/all_funcs
1653
1654 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1655 [ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0
1656 [ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0
1657 [ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0
1658 [ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0
1659 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1660 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1661 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1662 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1663
1664 #Installation starts here
1665 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README.txt &> /dev/null
1666 track start
1667
1668 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1669 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1670 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1671 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1672 fi
1673
1674 echo -e "Checking required packages: lxd"
1675 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1676 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1677
1678 # use local devops for containers
1679 export OSM_USE_LOCAL_DEVOPS=true
1680 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1681 echo -e "\nCreating the containers and building from source ..."
1682 $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')"
1683 ro_is_up && track RO
1684 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed"
1685 vca_is_up && track VCA
1686 $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed"
1687 mon_is_up && track MON
1688 $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')"
1689 $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')"
1690 #so_is_up && track SOUI
1691 track SOUI
1692 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1693 echo -e "\nInstalling from lxd images ..."
1694 install_from_lxdimages
1695 else #install from binaries
1696 echo -e "\nCreating the containers and installing from binaries ..."
1697 $OSM_DEVOPS/jenkins/host/install RO ${REPO_ARGS[@]} || FATAL "RO install failed"
1698 ro_is_up && track RO
1699 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed"
1700 vca_is_up && track VCA
1701 $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed"
1702 mon_is_up && track MON
1703 $OSM_DEVOPS/jenkins/host/install SO ${REPO_ARGS[@]} || FATAL "SO install failed"
1704 $OSM_DEVOPS/jenkins/host/install UI ${REPO_ARGS[@]} || FATAL "UI install failed"
1705 #so_is_up && track SOUI
1706 track SOUI
1707 fi
1708
1709 #Install iptables-persistent and configure NAT rules
1710 [ -z "$NOCONFIGURE" ] && nat
1711
1712 #Configure components
1713 [ -z "$NOCONFIGURE" ] && configure
1714
1715 #Install osmclient
1716 [ -z "$NOCONFIGURE" ] && install_osmclient
1717
1718 #Install vim-emu (optional)
1719 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1720
1721 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1722 track end
1723 echo -e "\nDONE"