Fix: The vim-emu installation requires OVS to be installed
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
53 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
54 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
55 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
56 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
57 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
60 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
61 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
62 echo -e " --showopts: print chosen options and exit (only for debugging)"
63 echo -e " -y: do not prompt for confirmation, assumes yes"
64 echo -e " -h / --help: print this help"
65 }
66
67 #Uninstall OSM: remove containers
68 function uninstall(){
69 echo -e "\nUninstalling OSM"
70 if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
71 $OSM_DEVOPS/jenkins/host/clean_container RO
72 $OSM_DEVOPS/jenkins/host/clean_container VCA
73 $OSM_DEVOPS/jenkins/host/clean_container MON
74 $OSM_DEVOPS/jenkins/host/clean_container SO
75 #$OSM_DEVOPS/jenkins/host/clean_container UI
76 else
77 lxc stop RO && lxc delete RO
78 lxc stop VCA && lxc delete VCA
79 lxc stop MON && lxc delete MON
80 lxc stop SO-ub && lxc delete SO-ub
81 fi
82 echo -e "\nDeleting imported lxd images if they exist"
83 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
84 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
85 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
86 return 0
87 }
88
89 # takes a juju/accounts.yaml file and returns the password specific
90 # for a controller. I wrote this using only bash tools to minimize
91 # additions of other packages
92 function parse_juju_password {
93 password_file="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name=$1
95 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller=$controller_name '{
100 indent = length($1)/2;
101 vname[indent] = $2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
106 printf("%s",$3);
107 }
108 }
109 }'
110 }
111
112 function generate_secret() {
113 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
114 }
115
116 function remove_volumes() {
117 if [ -n "$KUBERNETES" ]; then
118 k8_volume=$1
119 echo "Removing ${k8_volume}"
120 $WORKDIR_SUDO rm -rf ${k8_volume}
121 else
122 stack=$1
123 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
124 for volume in $volumes; do
125 sg docker -c "docker volume rm ${stack}_${volume}"
126 done
127 fi
128 }
129
130 function remove_network() {
131 stack=$1
132 sg docker -c "docker network rm net${stack}"
133 }
134
135 function remove_iptables() {
136 stack=$1
137 if [ -z "$OSM_VCA_HOST" ]; then
138 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
139 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
140 fi
141
142 if [ -z "$DEFAULT_IP" ]; then
143 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
144 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
145 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
146 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
147 fi
148
149 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
150 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
151 sudo netfilter-persistent save
152 fi
153 }
154
155 function remove_stack() {
156 stack=$1
157 if sg docker -c "docker stack ps ${stack}" ; then
158 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
159 COUNTER=0
160 result=1
161 while [ ${COUNTER} -lt 30 ]; do
162 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
163 #echo "Dockers running: $result"
164 if [ "${result}" == "0" ]; then
165 break
166 fi
167 let COUNTER=COUNTER+1
168 sleep 1
169 done
170 if [ "${result}" == "0" ]; then
171 echo "All dockers of the stack ${stack} were removed"
172 else
173 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
174 fi
175 sleep 5
176 fi
177 }
178
179 #removes osm deployments and services
180 function remove_k8s_namespace() {
181 kubectl delete ns $1
182 }
183
184 #Uninstall lightweight OSM: remove dockers
185 function uninstall_lightweight() {
186 if [ -n "$INSTALL_ONLY" ]; then
187 if [ -n "$INSTALL_ELK" ]; then
188 echo -e "\nUninstalling OSM ELK stack"
189 remove_stack osm_elk
190 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
191 fi
192 else
193 echo -e "\nUninstalling OSM"
194 if [ -n "$KUBERNETES" ]; then
195 if [ -n "$K8S_MONITOR" ]; then
196 # uninstall OSM MONITORING
197 uninstall_k8s_monitoring
198 fi
199 remove_k8s_namespace $OSM_STACK_NAME
200 else
201
202 remove_stack $OSM_STACK_NAME
203 remove_stack osm_elk
204 fi
205 echo "Now osm docker images and volumes will be deleted"
206 newgrp docker << EONG
207 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
208 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
209 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
210 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
211 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
212 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
213 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
214 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
215 EONG
216
217 if [ -n "$KUBERNETES" ]; then
218 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
219 remove_volumes $OSM_NAMESPACE_VOL
220 else
221 remove_volumes $OSM_STACK_NAME
222 remove_network $OSM_STACK_NAME
223 fi
224 remove_iptables $OSM_STACK_NAME
225 echo "Removing $OSM_DOCKER_WORK_DIR"
226 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
227 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
228 fi
229 echo "Some docker images will be kept in case they are used by other docker stacks"
230 echo "To remove them, just run 'docker image prune' in a terminal"
231 return 0
232 }
233
234 #Configure NAT rules, based on the current IP addresses of containers
235 function nat(){
236 echo -e "\nChecking required packages: iptables-persistent"
237 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
238 sudo apt-get -yq install iptables-persistent
239 echo -e "\nConfiguring NAT rules"
240 echo -e " Required root privileges"
241 sudo $OSM_DEVOPS/installers/nat_osm
242 }
243
244 function FATAL(){
245 echo "FATAL error: Cannot install OSM due to \"$1\""
246 exit 1
247 }
248
249 #Update RO, SO and UI:
250 function update(){
251 echo -e "\nUpdating components"
252
253 echo -e " Updating RO"
254 CONTAINER="RO"
255 MDG="RO"
256 INSTALL_FOLDER="/opt/openmano"
257 echo -e " Fetching the repo"
258 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
259 BRANCH=""
260 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
261 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
262 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
263 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
264 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
265 # COMMIT_ID either was previously set with -b option, or is an empty string
266 CHECKOUT_ID=$COMMIT_ID
267 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
268 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
269 if [[ $CHECKOUT_ID == "tags/"* ]]; then
270 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
271 else
272 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
273 fi
274 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
275 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
276 echo " Nothing to be done."
277 else
278 echo " Update required."
279 lxc exec $CONTAINER -- service osm-ro stop
280 lxc exec $CONTAINER -- git -C /opt/openmano stash
281 lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
282 lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
283 lxc exec $CONTAINER -- git -C /opt/openmano stash pop
284 lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
285 lxc exec $CONTAINER -- service osm-ro start
286 fi
287 echo
288
289 echo -e " Updating SO and UI"
290 CONTAINER="SO-ub"
291 MDG="SO"
292 INSTALL_FOLDER="" # To be filled in
293 echo -e " Fetching the repo"
294 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
295 BRANCH=""
296 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
297 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
298 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
299 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
300 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
301 # COMMIT_ID either was previously set with -b option, or is an empty string
302 CHECKOUT_ID=$COMMIT_ID
303 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
304 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
305 if [[ $CHECKOUT_ID == "tags/"* ]]; then
306 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
307 else
308 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
309 fi
310 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
311 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
312 echo " Nothing to be done."
313 else
314 echo " Update required."
315 # Instructions to be added
316 # lxc exec SO-ub -- ...
317 fi
318 echo
319 echo -e "Updating MON Container"
320 CONTAINER="MON"
321 MDG="MON"
322 INSTALL_FOLDER="/root/MON"
323 echo -e " Fetching the repo"
324 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
325 BRANCH=""
326 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
327 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
328 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
329 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
330 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
331 # COMMIT_ID either was previously set with -b option, or is an empty string
332 CHECKOUT_ID=$COMMIT_ID
333 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
334 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
335 if [[ $CHECKOUT_ID == "tags/"* ]]; then
336 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
337 else
338 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
339 fi
340 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
341 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
342 echo " Nothing to be done."
343 else
344 echo " Update required."
345 fi
346 echo
347 }
348
349 function so_is_up() {
350 if [ -n "$1" ]; then
351 SO_IP=$1
352 else
353 SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
354 fi
355 time=0
356 step=5
357 timelength=300
358 while [ $time -le $timelength ]
359 do
360 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
361 -H 'accept: application/vnd.yang.data+json' \
362 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
363 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
364 then
365 echo "RW.Restconf running....SO is up"
366 return 0
367 fi
368
369 sleep $step
370 echo -n "."
371 time=$((time+step))
372 done
373
374 FATAL "OSM Failed to startup. SO failed to startup"
375 }
376
377 function vca_is_up() {
378 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
379 echo "VCA is up and running"
380 return 0
381 fi
382
383 FATAL "OSM Failed to startup. VCA failed to startup"
384 }
385
386 function mon_is_up() {
387 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
388 echo "MON is up and running"
389 return 0
390 fi
391
392 FATAL "OSM Failed to startup. MON failed to startup"
393 }
394
395 function ro_is_up() {
396 if [ -n "$1" ]; then
397 RO_IP=$1
398 else
399 RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
400 fi
401 time=0
402 step=2
403 timelength=20
404 while [ $time -le $timelength ]; do
405 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
406 echo "RO is up and running"
407 return 0
408 fi
409 sleep $step
410 echo -n "."
411 time=$((time+step))
412 done
413
414 FATAL "OSM Failed to startup. RO failed to startup"
415 }
416
417
418 function configure_RO(){
419 . $OSM_DEVOPS/installers/export_ips
420 echo -e " Configuring RO"
421 lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
422 lxc exec RO -- service osm-ro restart
423
424 ro_is_up
425
426 lxc exec RO -- openmano tenant-delete -f osm >/dev/null
427 lxc exec RO -- openmano tenant-create osm > /dev/null
428 lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
429 lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
430 lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
431 }
432
433 function configure_VCA(){
434 echo -e " Configuring VCA"
435 JUJU_PASSWD=$(generate_secret)
436 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
437 }
438
439 function configure_SOUI(){
440 . $OSM_DEVOPS/installers/export_ips
441 JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
442 RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
443
444 echo -e " Configuring MON"
445 #Information to be added about SO socket for logging
446
447 echo -e " Configuring SO"
448 sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
449 sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
450 sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
451 sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
452 # make journaling persistent
453 lxc exec SO-ub -- mkdir -p /var/log/journal
454 lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
455 lxc exec SO-ub -- systemctl restart systemd-journald
456
457 echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
458
459 lxc exec SO-ub -- systemctl restart launchpad
460
461 so_is_up $SO_CONTAINER_IP
462
463 #delete existing config agent (could be there on reconfigure)
464 curl -k --request DELETE \
465 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
466 --header 'accept: application/vnd.yang.data+json' \
467 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
468 --header 'cache-control: no-cache' \
469 --header 'content-type: application/vnd.yang.data+json' &> /dev/null
470
471 result=$(curl -k --request POST \
472 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
473 --header 'accept: application/vnd.yang.data+json' \
474 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
475 --header 'cache-control: no-cache' \
476 --header 'content-type: application/vnd.yang.data+json' \
477 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
478 [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
479
480 #R1/R2 config line
481 #result=$(curl -k --request PUT \
482 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
483 # --header 'accept: application/vnd.yang.data+json' \
484 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
485 # --header 'cache-control: no-cache' \
486 # --header 'content-type: application/vnd.yang.data+json' \
487 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
488
489 result=$(curl -k --request PUT \
490 --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
491 --header 'accept: application/vnd.yang.data+json' \
492 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
493 --header 'cache-control: no-cache' \
494 --header 'content-type: application/vnd.yang.data+json' \
495 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
496 [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
497
498 result=$(curl -k --request PATCH \
499 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
500 --header 'accept: application/vnd.yang.data+json' \
501 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
502 --header 'cache-control: no-cache' \
503 --header 'content-type: application/vnd.yang.data+json' \
504 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
505 [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
506
507 result=$(curl -k --request PATCH \
508 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
509 --header 'accept: application/vnd.yang.data+json' \
510 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
511 --header 'cache-control: no-cache' \
512 --header 'content-type: application/vnd.yang.data+json' \
513 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
514 [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
515
516 lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
517 auto lo:1
518 iface lo:1 inet static
519 address $DEFAULT_IP
520 netmask 255.255.255.255
521 EOF
522 lxc exec SO-ub ifup lo:1
523 }
524
525 #Configure RO, VCA, and SO with the initial configuration:
526 # RO -> tenant:osm, logs to be sent to SO
527 # VCA -> juju-password
528 # SO -> route to Juju Controller, add RO account, add VCA account
529 function configure(){
530 #Configure components
531 echo -e "\nConfiguring components"
532 configure_RO
533 configure_VCA
534 configure_SOUI
535 }
536
537 function install_lxd() {
538 sudo apt-get update
539 sudo apt-get install -y lxd
540 newgrp lxd
541 lxd init --auto
542 lxd waitready
543 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
544 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
545 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
546 lxc profile device set default eth0 mtu $DEFAULT_MTU
547 #sudo systemctl stop lxd-bridge
548 #sudo systemctl --system daemon-reload
549 #sudo systemctl enable lxd-bridge
550 #sudo systemctl start lxd-bridge
551 }
552
553 function ask_user(){
554 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
555 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
556 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
557 read -e -p "$1" USER_CONFIRMATION
558 while true ; do
559 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
560 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
561 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
562 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
563 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
564 done
565 }
566
567 function launch_container_from_lxd(){
568 export OSM_MDG=$1
569 OSM_load_config
570 export OSM_BASE_IMAGE=$2
571 if ! container_exists $OSM_BUILD_CONTAINER; then
572 CONTAINER_OPTS=""
573 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
574 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
575 create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
576 wait_container_up $OSM_BUILD_CONTAINER
577 fi
578 }
579
580 function install_osmclient(){
581 CLIENT_RELEASE=${RELEASE#"-R "}
582 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
583 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
584 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
585 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
586 curl $key_location | sudo apt-key add -
587 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
588 sudo apt-get update
589 sudo apt-get install -y python3-pip
590 sudo -H LC_ALL=C python3 -m pip install -U pip
591 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
592 sudo apt-get install -y python3-osm-im python3-osmclient
593 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
594 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
595 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
596 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
597 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
598 echo -e "\nOSM client installed"
599 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
600 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
601 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
602 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
603 else
604 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
605 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
606 echo " export OSM_HOSTNAME=<OSM_host>"
607 fi
608 return 0
609 }
610
611 function install_prometheus_nodeexporter(){
612 if (systemctl -q is-active node_exporter)
613 then
614 echo "Node Exporter is already running."
615 else
616 echo "Node Exporter is not active, installing..."
617 if getent passwd node_exporter > /dev/null 2>&1; then
618 echo "node_exporter user exists"
619 else
620 echo "Creating user node_exporter"
621 sudo useradd --no-create-home --shell /bin/false node_exporter
622 fi
623 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
624 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
625 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
626 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
627 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
628 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
629 sudo systemctl daemon-reload
630 sudo systemctl restart node_exporter
631 sudo systemctl enable node_exporter
632 echo "Node Exporter has been activated in this host."
633 fi
634 return 0
635 }
636
637 function uninstall_prometheus_nodeexporter(){
638 sudo systemctl stop node_exporter
639 sudo systemctl disable node_exporter
640 sudo rm /etc/systemd/system/node_exporter.service
641 sudo systemctl daemon-reload
642 sudo userdel node_exporter
643 sudo rm /usr/local/bin/node_exporter
644 return 0
645 }
646
647 function install_from_lxdimages(){
648 LXD_RELEASE=${RELEASE#"-R "}
649 if [ -n "$LXD_REPOSITORY_PATH" ]; then
650 LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
651 else
652 LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
653 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
654 fi
655 echo -e "\nDeleting previous lxd images if they exist"
656 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
657 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
658 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
659 echo -e "\nImporting osm-ro"
660 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
661 lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
662 rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
663 echo -e "\nImporting osm-vca"
664 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
665 lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
666 rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
667 echo -e "\nImporting osm-soui"
668 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
669 lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
670 rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
671 launch_container_from_lxd RO osm-ro
672 ro_is_up && track RO
673 launch_container_from_lxd VCA osm-vca
674 vca_is_up && track VCA
675 launch_container_from_lxd MON osm-mon
676 mon_is_up && track MON
677 launch_container_from_lxd SO osm-soui
678 #so_is_up && track SOUI
679 track SOUI
680 }
681
682 function install_docker_ce() {
683 # installs and configures Docker CE
684 echo "Installing Docker CE ..."
685 sudo apt-get -qq update
686 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
687 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
688 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
689 sudo apt-get -qq update
690 sudo apt-get install -y docker-ce
691 echo "Adding user to group 'docker'"
692 sudo groupadd -f docker
693 sudo usermod -aG docker $USER
694 sleep 2
695 sudo service docker restart
696 echo "... restarted Docker service"
697 sg docker -c "docker version" || FATAL "Docker installation failed"
698 echo "... Docker CE installation done"
699 return 0
700 }
701
702 function install_docker_compose() {
703 # installs and configures docker-compose
704 echo "Installing Docker Compose ..."
705 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
706 sudo chmod +x /usr/local/bin/docker-compose
707 echo "... Docker Compose installation done"
708 }
709
710 function install_juju() {
711 echo "Installing juju"
712 sudo snap install juju --classic
713 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
714 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
715 echo "Finished installation of juju"
716 return 0
717 }
718
719 function juju_createcontroller() {
720 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
721 # Not found created, create the controller
722 sudo usermod -a -G lxd ${USER}
723 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
724 fi
725 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
726 }
727
728 function juju_createproxy() {
729 echo -e "\nChecking required packages: iptables-persistent"
730 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
731 sudo apt-get -yq install iptables-persistent
732
733 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
734 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
735 sudo netfilter-persistent save
736 fi
737 }
738
739 function generate_docker_images() {
740 echo "Pulling and generating docker images"
741 _build_from=$COMMIT_ID
742 [ -z "$_build_from" ] && _build_from="master"
743
744 echo "OSM Docker images generated from $_build_from"
745
746 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
747 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
748 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
749 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
750
751 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
752 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
753 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
754 fi
755
756 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
757 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
758 fi
759
760 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
761 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
762 fi
763
764 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
765 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
766 fi
767
768 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
769 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
770 fi
771
772 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
773 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
774 fi
775
776 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
777 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
778 fi
779
780 if [ -n "$PULL_IMAGES" ]; then
781 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
782 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
783 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
784 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
785 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
786 fi
787
788 if [ -n "$PULL_IMAGES" ]; then
789 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
790 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
791 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
792 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
793 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
794 fi
795
796 if [ -n "$PULL_IMAGES" ]; then
797 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
798 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
799 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
800 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
801 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
802 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
803 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
804 fi
805
806 if [ -n "$PULL_IMAGES" ]; then
807 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
808 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
809 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
810 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
811 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
812 fi
813
814 if [ -n "$PULL_IMAGES" ]; then
815 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
816 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
817 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
818 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
819 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
820 fi
821
822 if [ -n "$PULL_IMAGES" ]; then
823 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
824 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
825 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
826 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
827 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
828 fi
829
830 if [ -n "$PULL_IMAGES" ]; then
831 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
832 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
833 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
834 fi
835
836 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
837 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
838 fi
839
840 echo "Finished generation of docker images"
841 }
842
843 function cmp_overwrite() {
844 file1="$1"
845 file2="$2"
846 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
847 if [ -f "${file2}" ]; then
848 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
849 else
850 cp -b ${file1} ${file2}
851 fi
852 fi
853 }
854
855 function generate_docker_env_files() {
856 echo "Doing a backup of existing env files"
857 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
858 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
859 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
860 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
861 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
862 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
863 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
864 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
865 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
866
867 echo "Generating docker env files"
868 if [ -n "$KUBERNETES" ]; then
869 #Kubernetes resources
870 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
871 else
872 # Docker-compose
873 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
874
875 # Prometheus
876 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
877
878 # Grafana & Prometheus Exporter files
879 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
880 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
881 fi
882
883 # LCM
884 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
885 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
886 fi
887
888 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
889 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
890 else
891 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
892 fi
893
894 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
895 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
896 else
897 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
898 fi
899
900 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
901 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
902 else
903 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
904 fi
905
906 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
907 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
908 else
909 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
910 fi
911
912 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
913 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
914 else
915 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
916 fi
917
918 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
919 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
920 fi
921
922 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
923 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
924 fi
925
926 # RO
927 MYSQL_ROOT_PASSWORD=$(generate_secret)
928 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
929 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
930 fi
931 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
932 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
933 fi
934
935 # Keystone
936 KEYSTONE_DB_PASSWORD=$(generate_secret)
937 SERVICE_PASSWORD=$(generate_secret)
938 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
939 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
940 fi
941 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
942 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
943 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
944 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
945 fi
946
947 # NBI
948 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
949 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
950 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
951 fi
952
953 # MON
954 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
955 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
956 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
957 fi
958
959 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
960 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
961 else
962 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
963 fi
964
965 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
966 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
967 else
968 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
969 fi
970
971 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
972 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
973 else
974 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
975 fi
976
977 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
978 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
979 else
980 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
981 fi
982
983
984 # POL
985 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
986 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
987 fi
988
989 # LW-UI
990 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
991 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
992 fi
993
994 echo "Finished generation of docker env files"
995 }
996
997 function generate_osmclient_script () {
998 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
999 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
1000 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
1001 }
1002
1003 #installs kubernetes packages
1004 function install_kube() {
1005 sudo apt-get update && sudo apt-get install -y apt-transport-https
1006 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
1007 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
1008 sudo apt-get update
1009 echo "Installing Kubernetes Packages ..."
1010 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
1011 }
1012
1013 #initializes kubernetes control plane
1014 function init_kubeadm() {
1015 sudo swapoff -a
1016 sudo kubeadm init --config $1
1017 sleep 5
1018 }
1019
1020 function kube_config_dir() {
1021 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
1022 mkdir -p $HOME/.kube
1023 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
1024 sudo chown $(id -u):$(id -g) $HOME/.kube/config
1025 }
1026
1027 #deploys flannel as daemonsets
1028 function deploy_cni_provider() {
1029 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
1030 trap 'rm -rf "${CNI_DIR}"' EXIT
1031 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
1032 kubectl apply -f $CNI_DIR
1033 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
1034 }
1035
1036 #creates secrets from env files which will be used by containers
1037 function kube_secrets(){
1038 kubectl create ns $OSM_STACK_NAME
1039 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
1040 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
1041 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
1042 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
1043 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
1044 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
1045 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
1046 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
1047 }
1048
1049 #deploys osm pods and services
1050 function deploy_osm_services() {
1051 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
1052 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1053 sleep 5
1054 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1055 }
1056
1057 function parse_yaml() {
1058 osm_services="nbi lcm ro pol mon light-ui keystone"
1059 TAG=$1
1060 for osm in $osm_services; do
1061 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
1062 done
1063 }
1064
1065 function namespace_vol() {
1066 osm_services="nbi lcm ro pol mon kafka mongo mysql"
1067 for osm in $osm_services; do
1068 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1069 done
1070 }
1071
1072 function init_docker_swarm() {
1073 if [ "${DEFAULT_MTU}" != "1500" ]; then
1074 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1075 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1076 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1077 fi
1078 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1079 return 0
1080 }
1081
1082 function create_docker_network() {
1083 echo "creating network"
1084 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1085 echo "creating network DONE"
1086 }
1087
1088 function deploy_lightweight() {
1089
1090 echo "Deploying lightweight build"
1091 OSM_NBI_PORT=9999
1092 OSM_RO_PORT=9090
1093 OSM_KEYSTONE_PORT=5000
1094 OSM_UI_PORT=80
1095 OSM_MON_PORT=8662
1096 OSM_PROM_PORT=9090
1097 OSM_PROM_CADVISOR_PORT=8080
1098 OSM_PROM_HOSTPORT=9091
1099 OSM_GRAFANA_PORT=3000
1100 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1101 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1102
1103 if [ -n "$NO_HOST_PORTS" ]; then
1104 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1105 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1106 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1107 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1108 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1109 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1110 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1111 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1112 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1113 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1114 else
1115 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1116 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1117 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1118 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1119 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1120 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1121 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1122 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1123 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1124 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1125 fi
1126 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1127 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1128 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1129 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1130 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1131 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1132 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1133 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1134 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1135
1136 pushd $OSM_DOCKER_WORK_DIR
1137 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1138 popd
1139
1140 echo "Finished deployment of lightweight build"
1141 }
1142
1143 function deploy_elk() {
1144 echo "Pulling docker images for ELK"
1145 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1146 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1147 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1148 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1149 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1150 echo "Finished pulling elk docker images"
1151 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1152 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1153 remove_stack osm_elk
1154 echo "Deploying ELK stack"
1155 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1156 echo "Waiting for ELK stack to be up and running"
1157 time=0
1158 step=5
1159 timelength=40
1160 elk_is_up=1
1161 while [ $time -le $timelength ]; do
1162 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1163 elk_is_up=0
1164 break
1165 fi
1166 sleep $step
1167 time=$((time+step))
1168 done
1169 if [ $elk_is_up -eq 0 ]; then
1170 echo "ELK is up and running. Trying to create index pattern..."
1171 #Create index pattern
1172 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1173 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1174 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1175 #Make it the default index
1176 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1177 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1178 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1179 else
1180 echo "Cannot connect to Kibana to create index pattern."
1181 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1182 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1183 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1184 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1185 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1186 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1187 -d"{\"value\":\"filebeat-*\"}"'
1188 fi
1189 echo "Finished deployment of ELK stack"
1190 return 0
1191 }
1192
1193 function install_lightweight() {
1194 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1195 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1196 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1197 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1198
1199 track checkingroot
1200 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1201 track noroot
1202
1203 if [ -n "$KUBERNETES" ]; then
1204 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1205 1. Install and configure LXD
1206 2. Install juju
1207 3. Install docker CE
1208 4. Disable swap space
1209 5. Install and initialize Kubernetes
1210 as pre-requirements.
1211 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1212
1213 else
1214 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1215 fi
1216 track proceed
1217
1218 echo "Installing lightweight build of OSM"
1219 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1220 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1221 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1222 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1223 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1224 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1225 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1226
1227 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1228 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1229 need_packages_lw="lxd snapd"
1230 echo -e "Checking required packages: $need_packages_lw"
1231 dpkg -l $need_packages_lw &>/dev/null \
1232 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1233 || sudo apt-get update \
1234 || FATAL "failed to run apt-get update"
1235 dpkg -l $need_packages_lw &>/dev/null \
1236 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1237 || sudo apt-get install -y $need_packages_lw \
1238 || FATAL "failed to install $need_packages_lw"
1239 fi
1240 track prereqok
1241
1242 [ -z "$INSTALL_NOJUJU" ] && install_juju
1243 track juju_install
1244
1245 if [ -z "$OSM_VCA_HOST" ]; then
1246 juju_createcontroller
1247 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1248 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1249 fi
1250 track juju_controller
1251
1252 if [ -z "$OSM_VCA_SECRET" ]; then
1253 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1254 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1255 fi
1256 if [ -z "$OSM_VCA_PUBKEY" ]; then
1257 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1258 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1259 fi
1260 if [ -z "$OSM_VCA_CACERT" ]; then
1261 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
1262 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1263 fi
1264 if [ -z "$OSM_VCA_APIPROXY" ]; then
1265 OSM_VCA_APIPROXY=$DEFAULT_IP
1266 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1267 fi
1268 juju_createproxy
1269 track juju
1270
1271 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1272 OSM_DATABASE_COMMONKEY=$(generate_secret)
1273 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1274 fi
1275
1276 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1277 track docker_ce
1278
1279 #Installs Kubernetes and deploys osm services
1280 if [ -n "$KUBERNETES" ]; then
1281 install_kube
1282 track install_k8s
1283 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1284 kube_config_dir
1285 track init_k8s
1286 else
1287 #install_docker_compose
1288 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1289 track docker_swarm
1290 fi
1291
1292 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1293 track docker_build
1294
1295 generate_docker_env_files
1296
1297 if [ -n "$KUBERNETES" ]; then
1298 if [ -n "$K8S_MONITOR" ]; then
1299 # uninstall OSM MONITORING
1300 uninstall_k8s_monitoring
1301 track uninstall_k8s_monitoring
1302 fi
1303 #remove old namespace
1304 remove_k8s_namespace $OSM_STACK_NAME
1305 deploy_cni_provider
1306 kube_secrets
1307 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1308 namespace_vol
1309 deploy_osm_services
1310 track deploy_osm_services_k8s
1311 if [ -n "$K8S_MONITOR" ]; then
1312 # install OSM MONITORING
1313 install_k8s_monitoring
1314 track install_k8s_monitoring
1315 fi
1316 else
1317 # remove old stack
1318 remove_stack $OSM_STACK_NAME
1319 create_docker_network
1320 deploy_lightweight
1321 generate_osmclient_script
1322 track docker_deploy
1323 install_prometheus_nodeexporter
1324 track nodeexporter
1325 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1326 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1327 fi
1328
1329 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1330 track osmclient
1331
1332 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1333 track end
1334 return 0
1335 }
1336
1337 function install_vimemu() {
1338 echo "\nInstalling vim-emu"
1339 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1340 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1341 # install prerequisites (OVS is a must for the emulator to work)
1342 sudo apt-get install openvswitch-switch
1343 # clone vim-emu repository (attention: branch is currently master only)
1344 echo "Cloning vim-emu repository ..."
1345 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1346 # build vim-emu docker
1347 echo "Building vim-emu Docker container..."
1348
1349 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1350 # start vim-emu container as daemon
1351 echo "Starting vim-emu Docker container 'vim-emu' ..."
1352 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1353 # in lightweight mode, the emulator needs to be attached to netOSM
1354 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1355 else
1356 # classic build mode
1357 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1358 fi
1359 echo "Waiting for 'vim-emu' container to start ..."
1360 sleep 5
1361 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1362 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1363 # print vim-emu connection info
1364 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1365 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1366 echo -e "To add the emulated VIM to OSM you should do:"
1367 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1368 }
1369
1370 function install_k8s_monitoring() {
1371 # install OSM monitoring
1372 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1373 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1374 }
1375
1376 function uninstall_k8s_monitoring() {
1377 # uninstall OSM monitoring
1378 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1379 }
1380
1381 function dump_vars(){
1382 echo "DEVELOP=$DEVELOP"
1383 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1384 echo "UNINSTALL=$UNINSTALL"
1385 echo "NAT=$NAT"
1386 echo "UPDATE=$UPDATE"
1387 echo "RECONFIGURE=$RECONFIGURE"
1388 echo "TEST_INSTALLER=$TEST_INSTALLER"
1389 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1390 echo "INSTALL_LXD=$INSTALL_LXD"
1391 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1392 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1393 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1394 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1395 echo "INSTALL_ONLY=$INSTALL_ONLY"
1396 echo "INSTALL_ELK=$INSTALL_ELK"
1397 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1398 echo "TO_REBUILD=$TO_REBUILD"
1399 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1400 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1401 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1402 echo "RELEASE=$RELEASE"
1403 echo "REPOSITORY=$REPOSITORY"
1404 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1405 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1406 echo "NOCONFIGURE=$NOCONFIGURE"
1407 echo "OSM_DEVOPS=$OSM_DEVOPS"
1408 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1409 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1410 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1411 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1412 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1413 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1414 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1415 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1416 echo "DOCKER_USER=$DOCKER_USER"
1417 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1418 echo "PULL_IMAGES=$PULL_IMAGES"
1419 echo "KUBERNETES=$KUBERNETES"
1420 echo "SHOWOPTS=$SHOWOPTS"
1421 echo "Install from specific refspec (-b): $COMMIT_ID"
1422 }
1423
1424 function track(){
1425 ctime=`date +%s`
1426 duration=$((ctime - SESSION_ID))
1427 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1428 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1429 event_name="bin"
1430 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1431 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1432 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1433 event_name="${event_name}_$1"
1434 url="${url}&event=${event_name}&ce_duration=${duration}"
1435 wget -q -O /dev/null $url
1436 }
1437
1438 UNINSTALL=""
1439 DEVELOP=""
1440 NAT=""
1441 UPDATE=""
1442 RECONFIGURE=""
1443 TEST_INSTALLER=""
1444 INSTALL_LXD=""
1445 SHOWOPTS=""
1446 COMMIT_ID=""
1447 ASSUME_YES=""
1448 INSTALL_FROM_SOURCE=""
1449 RELEASE="ReleaseSEVEN"
1450 REPOSITORY="stable"
1451 INSTALL_VIMEMU=""
1452 INSTALL_FROM_LXDIMAGES=""
1453 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1454 LXD_REPOSITORY_PATH=""
1455 INSTALL_LIGHTWEIGHT="y"
1456 INSTALL_ONLY=""
1457 INSTALL_ELK=""
1458 TO_REBUILD=""
1459 INSTALL_NOLXD=""
1460 INSTALL_NODOCKER=""
1461 INSTALL_NOJUJU=""
1462 KUBERNETES=""
1463 K8S_MONITOR=""
1464 INSTALL_NOHOSTCLIENT=""
1465 NOCONFIGURE=""
1466 RELEASE_DAILY=""
1467 SESSION_ID=`date +%s`
1468 OSM_DEVOPS=
1469 OSM_VCA_HOST=
1470 OSM_VCA_SECRET=
1471 OSM_VCA_PUBKEY=
1472 OSM_STACK_NAME=osm
1473 NO_HOST_PORTS=""
1474 DOCKER_NOBUILD=""
1475 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1476 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1477 WORKDIR_SUDO=sudo
1478 OSM_WORK_DIR="/etc/osm"
1479 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1480 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1481 OSM_HOST_VOL="/var/lib/osm"
1482 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1483 OSM_DOCKER_TAG=latest
1484 DOCKER_USER=opensourcemano
1485 PULL_IMAGES="y"
1486 KAFKA_TAG=2.11-1.0.2
1487 PROMETHEUS_TAG=v2.4.3
1488 GRAFANA_TAG=latest
1489 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1490 PROMETHEUS_CADVISOR_TAG=latest
1491 KEYSTONEDB_TAG=10
1492 OSM_DATABASE_COMMONKEY=
1493 ELASTIC_VERSION=6.4.2
1494 ELASTIC_CURATOR_VERSION=5.5.4
1495 POD_NETWORK_CIDR=10.244.0.0/16
1496 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1497 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1498
1499 while getopts ":b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:-: hy" o; do
1500 case "${o}" in
1501 b)
1502 COMMIT_ID=${OPTARG}
1503 PULL_IMAGES=""
1504 ;;
1505 r)
1506 REPOSITORY="${OPTARG}"
1507 REPO_ARGS+=(-r "$REPOSITORY")
1508 ;;
1509 c)
1510 [ "${OPTARG}" == "swarm" ] && continue
1511 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1512 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1513 usage && exit 1
1514 ;;
1515 k)
1516 REPOSITORY_KEY="${OPTARG}"
1517 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1518 ;;
1519 u)
1520 REPOSITORY_BASE="${OPTARG}"
1521 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1522 ;;
1523 R)
1524 RELEASE="${OPTARG}"
1525 REPO_ARGS+=(-R "$RELEASE")
1526 ;;
1527 l)
1528 LXD_REPOSITORY_BASE="${OPTARG}"
1529 ;;
1530 p)
1531 LXD_REPOSITORY_PATH="${OPTARG}"
1532 ;;
1533 D)
1534 OSM_DEVOPS="${OPTARG}"
1535 ;;
1536 o)
1537 INSTALL_ONLY="y"
1538 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1539 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1540 ;;
1541 m)
1542 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1543 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1544 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1545 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1546 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1547 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1548 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1549 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1550 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1551 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1552 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1553 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1554 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1555 ;;
1556 H)
1557 OSM_VCA_HOST="${OPTARG}"
1558 ;;
1559 S)
1560 OSM_VCA_SECRET="${OPTARG}"
1561 ;;
1562 s)
1563 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1564 ;;
1565 w)
1566 # when specifying workdir, do not use sudo for access
1567 WORKDIR_SUDO=
1568 OSM_WORK_DIR="${OPTARG}"
1569 ;;
1570 t)
1571 OSM_DOCKER_TAG="${OPTARG}"
1572 ;;
1573 U)
1574 DOCKER_USER="${OPTARG}"
1575 ;;
1576 P)
1577 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1578 ;;
1579 A)
1580 OSM_VCA_APIPROXY="${OPTARG}"
1581 ;;
1582 -)
1583 [ "${OPTARG}" == "help" ] && usage && exit 0
1584 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1585 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1586 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1587 [ "${OPTARG}" == "nat" ] && NAT="y" && continue
1588 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1589 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1590 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1591 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1592 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1593 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1594 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue
1595 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1596 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue
1597 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1598 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1599 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue
1600 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1601 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue
1602 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1603 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1604 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1605 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1606 [ "${OPTARG}" == "pullimages" ] && continue
1607 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR="y" && continue
1608 echo -e "Invalid option: '--$OPTARG'\n" >&2
1609 usage && exit 1
1610 ;;
1611 :)
1612 echo "Option -$OPTARG requires an argument" >&2
1613 usage && exit 1
1614 ;;
1615 \?)
1616 echo -e "Invalid option: '-$OPTARG'\n" >&2
1617 usage && exit 1
1618 ;;
1619 h)
1620 usage && exit 0
1621 ;;
1622 y)
1623 ASSUME_YES="y"
1624 ;;
1625 *)
1626 usage && exit 1
1627 ;;
1628 esac
1629 done
1630
1631 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui"
1632 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui"
1633 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui"
1634 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --daily can only be used with --soui"
1635 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nolxd cannot be used with --soui"
1636 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nodocker cannot be used with --soui"
1637 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: -m cannot be used with --soui"
1638 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1639
1640 if [ -n "$SHOWOPTS" ]; then
1641 dump_vars
1642 exit 0
1643 fi
1644
1645 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master"
1646
1647 # if develop, we force master
1648 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1649
1650 need_packages="git jq wget curl tar"
1651 echo -e "Checking required packages: $need_packages"
1652 dpkg -l $need_packages &>/dev/null \
1653 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1654 || sudo apt-get update \
1655 || FATAL "failed to run apt-get update"
1656 dpkg -l $need_packages &>/dev/null \
1657 || ! echo -e "Installing $need_packages requires root privileges." \
1658 || sudo apt-get install -y $need_packages \
1659 || FATAL "failed to install $need_packages"
1660
1661 if [ -z "$OSM_DEVOPS" ]; then
1662 if [ -n "$TEST_INSTALLER" ]; then
1663 echo -e "\nUsing local devops repo for OSM installation"
1664 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1665 else
1666 echo -e "\nCreating temporary dir for OSM installation"
1667 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1668 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1669
1670 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1671
1672 if [ -z "$COMMIT_ID" ]; then
1673 echo -e "\nGuessing the current stable release"
1674 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1675 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1676
1677 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1678 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1679 else
1680 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1681 fi
1682 git -C $OSM_DEVOPS checkout $COMMIT_ID
1683 fi
1684 fi
1685
1686 . $OSM_DEVOPS/common/all_funcs
1687
1688 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1689 [ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0
1690 [ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0
1691 [ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0
1692 [ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0
1693 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1694 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1695 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1696 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1697
1698 #Installation starts here
1699 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1700 track start
1701
1702 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1703 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1704 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1705 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1706 fi
1707
1708 echo -e "Checking required packages: lxd"
1709 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1710 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1711
1712 # use local devops for containers
1713 export OSM_USE_LOCAL_DEVOPS=true
1714 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1715 echo -e "\nCreating the containers and building from source ..."
1716 $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')"
1717 ro_is_up && track RO
1718 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed"
1719 vca_is_up && track VCA
1720 $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed"
1721 mon_is_up && track MON
1722 $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')"
1723 $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')"
1724 #so_is_up && track SOUI
1725 track SOUI
1726 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1727 echo -e "\nInstalling from lxd images ..."
1728 install_from_lxdimages
1729 else #install from binaries
1730 echo -e "\nCreating the containers and installing from binaries ..."
1731 $OSM_DEVOPS/jenkins/host/install RO ${REPO_ARGS[@]} || FATAL "RO install failed"
1732 ro_is_up && track RO
1733 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed"
1734 vca_is_up && track VCA
1735 $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed"
1736 mon_is_up && track MON
1737 $OSM_DEVOPS/jenkins/host/install SO ${REPO_ARGS[@]} || FATAL "SO install failed"
1738 $OSM_DEVOPS/jenkins/host/install UI ${REPO_ARGS[@]} || FATAL "UI install failed"
1739 #so_is_up && track SOUI
1740 track SOUI
1741 fi
1742
1743 #Install iptables-persistent and configure NAT rules
1744 [ -z "$NOCONFIGURE" ] && nat
1745
1746 #Configure components
1747 [ -z "$NOCONFIGURE" ] && configure
1748
1749 #Install osmclient
1750 [ -z "$NOCONFIGURE" ] && install_osmclient
1751
1752 #Install vim-emu (optional)
1753 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1754
1755 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1756 track end
1757 echo -e "\nDONE"