Feature 7898: Helm chart and script for monitoring K8s based OSM system
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
54 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
57 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
58 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
59 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
60 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
61 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
62 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
63 echo -e " --showopts: print chosen options and exit (only for debugging)"
64 echo -e " -y: do not prompt for confirmation, assumes yes"
65 echo -e " -h / --help: print this help"
66 }
67
68 #Uninstall OSM: remove containers
69 function uninstall(){
70 echo -e "\nUninstalling OSM"
71 if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
72 $OSM_DEVOPS/jenkins/host/clean_container RO
73 $OSM_DEVOPS/jenkins/host/clean_container VCA
74 $OSM_DEVOPS/jenkins/host/clean_container MON
75 $OSM_DEVOPS/jenkins/host/clean_container SO
76 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 else
78 lxc stop RO && lxc delete RO
79 lxc stop VCA && lxc delete VCA
80 lxc stop MON && lxc delete MON
81 lxc stop SO-ub && lxc delete SO-ub
82 fi
83 echo -e "\nDeleting imported lxd images if they exist"
84 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
85 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
86 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
87 return 0
88 }
89
90 # takes a juju/accounts.yaml file and returns the password specific
91 # for a controller. I wrote this using only bash tools to minimize
92 # additions of other packages
93 function parse_juju_password {
94 password_file="${HOME}/.local/share/juju/accounts.yaml"
95 local controller_name=$1
96 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
97 sed -ne "s|^\($s\):|\1|" \
98 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
99 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
100 awk -F$fs -v controller=$controller_name '{
101 indent = length($1)/2;
102 vname[indent] = $2;
103 for (i in vname) {if (i > indent) {delete vname[i]}}
104 if (length($3) > 0) {
105 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
106 if (match(vn,controller) && match($2,"password")) {
107 printf("%s",$3);
108 }
109 }
110 }'
111 }
112
113 function generate_secret() {
114 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
115 }
116
117 function remove_volumes() {
118 if [ -n "$KUBERNETES" ]; then
119 k8_volume=$1
120 echo "Removing ${k8_volume}"
121 $WORKDIR_SUDO rm -rf ${k8_volume}
122 else
123 stack=$1
124 volumes="mongo_db mon_db osm_packages ro_db"
125 for volume in $volumes; do
126 sg docker -c "docker volume rm ${stack}_${volume}"
127 done
128 fi
129 }
130
131 function remove_network() {
132 stack=$1
133 sg docker -c "docker network rm net${stack}"
134 }
135
136 function remove_iptables() {
137 stack=$1
138 if [ -z "$OSM_VCA_HOST" ]; then
139 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
140 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
141 fi
142
143 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
144 sudo iptables -t nat -D PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
145 sudo netfilter-persistent save
146 fi
147 }
148
149 function remove_stack() {
150 stack=$1
151 if sg docker -c "docker stack ps ${stack}" ; then
152 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
153 COUNTER=0
154 result=1
155 while [ ${COUNTER} -lt 30 ]; do
156 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
157 #echo "Dockers running: $result"
158 if [ "${result}" == "0" ]; then
159 break
160 fi
161 let COUNTER=COUNTER+1
162 sleep 1
163 done
164 if [ "${result}" == "0" ]; then
165 echo "All dockers of the stack ${stack} were removed"
166 else
167 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
168 fi
169 sleep 5
170 fi
171 }
172
173 #removes osm deployments and services
174 function remove_k8s_namespace() {
175 kubectl delete ns $1
176 }
177
178 #Uninstall lightweight OSM: remove dockers
179 function uninstall_lightweight() {
180 if [ -n "$INSTALL_ONLY" ]; then
181 if [ -n "$INSTALL_ELK" ]; then
182 echo -e "\nUninstalling OSM ELK stack"
183 remove_stack osm_elk
184 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
185 fi
186 else
187 echo -e "\nUninstalling OSM"
188 if [ -n "$KUBERNETES" ]; then
189 if [ -n "$K8S_MONITOR" ]; then
190 # uninstall OSM MONITORING
191 uninstall_k8s_monitoring
192 fi
193 remove_k8s_namespace $OSM_STACK_NAME
194 else
195 remove_stack $OSM_STACK_NAME
196 remove_stack osm_elk
197 fi
198 echo "Now osm docker images and volumes will be deleted"
199 newgrp docker << EONG
200 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
201 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
208 EONG
209
210 if [ -n "$KUBERNETES" ]; then
211 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
212 remove_volumes $OSM_NAMESPACE_VOL
213 else
214 remove_volumes $OSM_STACK_NAME
215 remove_network $OSM_STACK_NAME
216 fi
217 remove_iptables $OSM_STACK_NAME
218 echo "Removing $OSM_DOCKER_WORK_DIR"
219 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
220 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
221 fi
222 echo "Some docker images will be kept in case they are used by other docker stacks"
223 echo "To remove them, just run 'docker image prune' in a terminal"
224 return 0
225 }
226
227 #Configure NAT rules, based on the current IP addresses of containers
228 function nat(){
229 echo -e "\nChecking required packages: iptables-persistent"
230 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
231 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
232 echo -e "\nConfiguring NAT rules"
233 echo -e " Required root privileges"
234 sudo $OSM_DEVOPS/installers/nat_osm
235 }
236
237 function FATAL(){
238 echo "FATAL error: Cannot install OSM due to \"$1\""
239 exit 1
240 }
241
242 #Update RO, SO and UI:
243 function update(){
244 echo -e "\nUpdating components"
245
246 echo -e " Updating RO"
247 CONTAINER="RO"
248 MDG="RO"
249 INSTALL_FOLDER="/opt/openmano"
250 echo -e " Fetching the repo"
251 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
252 BRANCH=""
253 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
254 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
255 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
256 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
257 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
258 # COMMIT_ID either was previously set with -b option, or is an empty string
259 CHECKOUT_ID=$COMMIT_ID
260 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
261 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
262 if [[ $CHECKOUT_ID == "tags/"* ]]; then
263 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
264 else
265 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
266 fi
267 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
268 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
269 echo " Nothing to be done."
270 else
271 echo " Update required."
272 lxc exec $CONTAINER -- service osm-ro stop
273 lxc exec $CONTAINER -- git -C /opt/openmano stash
274 lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
275 lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
276 lxc exec $CONTAINER -- git -C /opt/openmano stash pop
277 lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
278 lxc exec $CONTAINER -- service osm-ro start
279 fi
280 echo
281
282 echo -e " Updating SO and UI"
283 CONTAINER="SO-ub"
284 MDG="SO"
285 INSTALL_FOLDER="" # To be filled in
286 echo -e " Fetching the repo"
287 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
288 BRANCH=""
289 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
290 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
291 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
292 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
293 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
294 # COMMIT_ID either was previously set with -b option, or is an empty string
295 CHECKOUT_ID=$COMMIT_ID
296 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
297 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
298 if [[ $CHECKOUT_ID == "tags/"* ]]; then
299 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
300 else
301 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
302 fi
303 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
304 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
305 echo " Nothing to be done."
306 else
307 echo " Update required."
308 # Instructions to be added
309 # lxc exec SO-ub -- ...
310 fi
311 echo
312 echo -e "Updating MON Container"
313 CONTAINER="MON"
314 MDG="MON"
315 INSTALL_FOLDER="/root/MON"
316 echo -e " Fetching the repo"
317 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
318 BRANCH=""
319 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
320 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
321 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
322 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
323 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
324 # COMMIT_ID either was previously set with -b option, or is an empty string
325 CHECKOUT_ID=$COMMIT_ID
326 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
327 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
328 if [[ $CHECKOUT_ID == "tags/"* ]]; then
329 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
330 else
331 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
332 fi
333 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
334 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
335 echo " Nothing to be done."
336 else
337 echo " Update required."
338 fi
339 echo
340 }
341
342 function so_is_up() {
343 if [ -n "$1" ]; then
344 SO_IP=$1
345 else
346 SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
347 fi
348 time=0
349 step=5
350 timelength=300
351 while [ $time -le $timelength ]
352 do
353 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
354 -H 'accept: application/vnd.yang.data+json' \
355 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
356 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
357 then
358 echo "RW.Restconf running....SO is up"
359 return 0
360 fi
361
362 sleep $step
363 echo -n "."
364 time=$((time+step))
365 done
366
367 FATAL "OSM Failed to startup. SO failed to startup"
368 }
369
370 function vca_is_up() {
371 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
372 echo "VCA is up and running"
373 return 0
374 fi
375
376 FATAL "OSM Failed to startup. VCA failed to startup"
377 }
378
379 function mon_is_up() {
380 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
381 echo "MON is up and running"
382 return 0
383 fi
384
385 FATAL "OSM Failed to startup. MON failed to startup"
386 }
387
388 function ro_is_up() {
389 if [ -n "$1" ]; then
390 RO_IP=$1
391 else
392 RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
393 fi
394 time=0
395 step=2
396 timelength=20
397 while [ $time -le $timelength ]; do
398 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
399 echo "RO is up and running"
400 return 0
401 fi
402 sleep $step
403 echo -n "."
404 time=$((time+step))
405 done
406
407 FATAL "OSM Failed to startup. RO failed to startup"
408 }
409
410
411 function configure_RO(){
412 . $OSM_DEVOPS/installers/export_ips
413 echo -e " Configuring RO"
414 lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
415 lxc exec RO -- service osm-ro restart
416
417 ro_is_up
418
419 lxc exec RO -- openmano tenant-delete -f osm >/dev/null
420 lxc exec RO -- openmano tenant-create osm > /dev/null
421 lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
422 lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
423 lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
424 }
425
426 function configure_VCA(){
427 echo -e " Configuring VCA"
428 JUJU_PASSWD=$(generate_secret)
429 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
430 }
431
432 function configure_SOUI(){
433 . $OSM_DEVOPS/installers/export_ips
434 JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
435 RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
436
437 echo -e " Configuring MON"
438 #Information to be added about SO socket for logging
439
440 echo -e " Configuring SO"
441 sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
442 sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
443 sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
444 sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
445 # make journaling persistent
446 lxc exec SO-ub -- mkdir -p /var/log/journal
447 lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
448 lxc exec SO-ub -- systemctl restart systemd-journald
449
450 echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
451
452 lxc exec SO-ub -- systemctl restart launchpad
453
454 so_is_up $SO_CONTAINER_IP
455
456 #delete existing config agent (could be there on reconfigure)
457 curl -k --request DELETE \
458 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
459 --header 'accept: application/vnd.yang.data+json' \
460 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
461 --header 'cache-control: no-cache' \
462 --header 'content-type: application/vnd.yang.data+json' &> /dev/null
463
464 result=$(curl -k --request POST \
465 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
466 --header 'accept: application/vnd.yang.data+json' \
467 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
468 --header 'cache-control: no-cache' \
469 --header 'content-type: application/vnd.yang.data+json' \
470 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
471 [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
472
473 #R1/R2 config line
474 #result=$(curl -k --request PUT \
475 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
476 # --header 'accept: application/vnd.yang.data+json' \
477 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
478 # --header 'cache-control: no-cache' \
479 # --header 'content-type: application/vnd.yang.data+json' \
480 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
481
482 result=$(curl -k --request PUT \
483 --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
484 --header 'accept: application/vnd.yang.data+json' \
485 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
486 --header 'cache-control: no-cache' \
487 --header 'content-type: application/vnd.yang.data+json' \
488 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
489 [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
490
491 result=$(curl -k --request PATCH \
492 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
493 --header 'accept: application/vnd.yang.data+json' \
494 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
495 --header 'cache-control: no-cache' \
496 --header 'content-type: application/vnd.yang.data+json' \
497 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
498 [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
499
500 result=$(curl -k --request PATCH \
501 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
502 --header 'accept: application/vnd.yang.data+json' \
503 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
504 --header 'cache-control: no-cache' \
505 --header 'content-type: application/vnd.yang.data+json' \
506 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
507 [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
508
509 lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
510 auto lo:1
511 iface lo:1 inet static
512 address $DEFAULT_IP
513 netmask 255.255.255.255
514 EOF
515 lxc exec SO-ub ifup lo:1
516 }
517
518 #Configure RO, VCA, and SO with the initial configuration:
519 # RO -> tenant:osm, logs to be sent to SO
520 # VCA -> juju-password
521 # SO -> route to Juju Controller, add RO account, add VCA account
522 function configure(){
523 #Configure components
524 echo -e "\nConfiguring components"
525 configure_RO
526 configure_VCA
527 configure_SOUI
528 }
529
530 function install_lxd() {
531 sudo apt-get update
532 sudo apt-get install -y lxd
533 newgrp lxd
534 lxd init --auto
535 lxd waitready
536 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
537 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
538 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
539 lxc profile device set default eth0 mtu $DEFAULT_MTU
540 #sudo systemctl stop lxd-bridge
541 #sudo systemctl --system daemon-reload
542 #sudo systemctl enable lxd-bridge
543 #sudo systemctl start lxd-bridge
544 }
545
546 function ask_user(){
547 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
548 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
549 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
550 read -e -p "$1" USER_CONFIRMATION
551 while true ; do
552 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
553 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
554 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
555 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
556 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
557 done
558 }
559
560 function launch_container_from_lxd(){
561 export OSM_MDG=$1
562 OSM_load_config
563 export OSM_BASE_IMAGE=$2
564 if ! container_exists $OSM_BUILD_CONTAINER; then
565 CONTAINER_OPTS=""
566 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
567 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
568 create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
569 wait_container_up $OSM_BUILD_CONTAINER
570 fi
571 }
572
573 function install_osmclient(){
574 CLIENT_RELEASE=${RELEASE#"-R "}
575 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
576 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
577 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
578 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
579 curl $key_location | sudo apt-key add -
580 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
581 sudo apt-get update
582 sudo apt-get install -y python3-pip
583 sudo -H LC_ALL=C python3 -m pip install -U pip
584 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind
585 sudo apt-get install -y python3-osm-im python3-osmclient
586 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
587 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
588 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
589 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
590 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
591 echo -e "\nOSM client installed"
592 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
593 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
594 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
595 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
596 else
597 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
598 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
599 echo " export OSM_HOSTNAME=<OSM_host>"
600 fi
601 return 0
602 }
603
604 function install_prometheus_nodeexporter(){
605 sudo useradd --no-create-home --shell /bin/false node_exporter
606 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
607 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
608 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
609 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
610 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
611 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
612 sudo systemctl daemon-reload
613 sudo systemctl restart node_exporter
614 sudo systemctl enable node_exporter
615 return 0
616 }
617
618 function install_from_lxdimages(){
619 LXD_RELEASE=${RELEASE#"-R "}
620 if [ -n "$LXD_REPOSITORY_PATH" ]; then
621 LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
622 else
623 LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
624 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
625 fi
626 echo -e "\nDeleting previous lxd images if they exist"
627 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
628 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
629 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
630 echo -e "\nImporting osm-ro"
631 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
632 lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
633 rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
634 echo -e "\nImporting osm-vca"
635 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
636 lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
637 rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
638 echo -e "\nImporting osm-soui"
639 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
640 lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
641 rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
642 launch_container_from_lxd RO osm-ro
643 ro_is_up && track RO
644 launch_container_from_lxd VCA osm-vca
645 vca_is_up && track VCA
646 launch_container_from_lxd MON osm-mon
647 mon_is_up && track MON
648 launch_container_from_lxd SO osm-soui
649 #so_is_up && track SOUI
650 track SOUI
651 }
652
653 function install_docker_ce() {
654 # installs and configures Docker CE
655 echo "Installing Docker CE ..."
656 sudo apt-get -qq update
657 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
658 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
659 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
660 sudo apt-get -qq update
661 sudo apt-get install -y docker-ce
662 echo "Adding user to group 'docker'"
663 sudo groupadd -f docker
664 sudo usermod -aG docker $USER
665 sleep 2
666 sudo service docker restart
667 echo "... restarted Docker service"
668 sg docker -c "docker version" || FATAL "Docker installation failed"
669 echo "... Docker CE installation done"
670 return 0
671 }
672
673 function install_docker_compose() {
674 # installs and configures docker-compose
675 echo "Installing Docker Compose ..."
676 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
677 sudo chmod +x /usr/local/bin/docker-compose
678 echo "... Docker Compose installation done"
679 }
680
681 function install_juju() {
682 echo "Installing juju"
683 sudo snap install juju --classic
684 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
685 echo "Finished installation of juju"
686 return 0
687 }
688
689 function juju_createcontroller() {
690 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
691 # Not found created, create the controller
692 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
693 fi
694 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
695 }
696
697 function juju_createproxy() {
698 echo -e "\nChecking required packages: iptables-persistent"
699 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
700 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
701
702 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
703 sudo iptables -t nat -A PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
704 sudo netfilter-persistent save
705 fi
706 }
707
708 function generate_docker_images() {
709 echo "Pulling and generating docker images"
710 _build_from=$COMMIT_ID
711 [ -z "$_build_from" ] && _build_from="master"
712
713 echo "OSM Docker images generated from $_build_from"
714
715 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
716 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
717 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
718 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
719
720 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
721 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
722 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
723 fi
724
725 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
726 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
727 fi
728
729 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
730 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
731 fi
732
733 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
734 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
735 fi
736
737 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
738 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
739 fi
740
741 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
742 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
743 fi
744
745 if [ -n "$PULL_IMAGES" ]; then
746 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
747 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
748 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
749 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
750 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
751 fi
752
753 if [ -n "$PULL_IMAGES" ]; then
754 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
755 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
756 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
757 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
758 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
759 fi
760
761 if [ -n "$PULL_IMAGES" ]; then
762 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
763 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
764 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
765 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
766 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
767 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
768 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
769 fi
770
771 if [ -n "$PULL_IMAGES" ]; then
772 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
773 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
774 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
775 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
776 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
777 fi
778
779 if [ -n "$PULL_IMAGES" ]; then
780 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
781 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
782 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
783 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
784 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
785 fi
786
787 if [ -n "$PULL_IMAGES" ]; then
788 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
789 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
790 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
791 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
792 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
793 fi
794
795 if [ -n "$PULL_IMAGES" ]; then
796 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
797 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
798 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
799 fi
800
801 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
802 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
803 fi
804
805 echo "Finished generation of docker images"
806 }
807
808 function cmp_overwrite() {
809 file1="$1"
810 file2="$2"
811 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
812 if [ -f "${file2}" ]; then
813 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
814 else
815 cp -b ${file1} ${file2}
816 fi
817 fi
818 }
819
820 function generate_docker_env_files() {
821 echo "Doing a backup of existing env files"
822 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
823 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
824 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
825 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
826 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
827 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
828 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
829 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
830 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
831
832 echo "Generating docker env files"
833 if [ -n "$KUBERNETES" ]; then
834 #Kubernetes resources
835 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
836 else
837 # Docker-compose
838 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
839
840 # Prometheus
841 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
842
843 # Grafana & Prometheus Exporter files
844 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
845 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
846 fi
847
848 # LCM
849 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
850 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
851 fi
852
853 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
854 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
855 else
856 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
857 fi
858
859 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
860 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
861 else
862 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
863 fi
864
865 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
866 echo "OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
867 else
868 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"|g" $OSM_DOCKER_WORK_DIR/lcm.env
869 fi
870
871 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
872 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
873 else
874 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
875 fi
876
877 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
878 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
879 else
880 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
881 fi
882
883 # RO
884 MYSQL_ROOT_PASSWORD=$(generate_secret)
885 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
886 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
887 fi
888 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
889 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
890 fi
891
892 # Keystone
893 KEYSTONE_DB_PASSWORD=$(generate_secret)
894 SERVICE_PASSWORD=$(generate_secret)
895 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
896 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
897 fi
898 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
899 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
900 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
901 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
902 fi
903
904 # NBI
905 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
906 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
907 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
908 fi
909
910 # MON
911 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
912 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
913 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
914 fi
915
916 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
917 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
918 else
919 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
920 fi
921
922 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
923 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
924 else
925 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
926 fi
927
928 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
929 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
930 else
931 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
932 fi
933
934 # POL
935 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
936 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
937 fi
938
939 # LW-UI
940 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
941 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
942 fi
943
944 echo "Finished generation of docker env files"
945 }
946
947 function generate_osmclient_script () {
948 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
949 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
950 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
951 }
952
953 #installs kubernetes packages
954 function install_kube() {
955 sudo apt-get update && sudo apt-get install -y apt-transport-https
956 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
957 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
958 sudo apt-get update
959 echo "Installing Kubernetes Packages ..."
960 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
961 }
962
963 #initializes kubernetes control plane
964 function init_kubeadm() {
965 sudo swapoff -a
966 sudo kubeadm init --config $1
967 sleep 5
968 }
969
970 function kube_config_dir() {
971 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
972 mkdir -p $HOME/.kube
973 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
974 sudo chown $(id -u):$(id -g) $HOME/.kube/config
975 }
976
977 #deploys flannel as daemonsets
978 function deploy_cni_provider() {
979 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
980 trap 'rm -rf "${CNI_DIR}"' EXIT
981 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
982 kubectl apply -f $CNI_DIR
983 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
984 }
985
986 #creates secrets from env files which will be used by containers
987 function kube_secrets(){
988 kubectl create ns $OSM_STACK_NAME
989 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
990 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
991 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
992 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
993 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
994 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
995 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
996 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
997 }
998
999 #deploys osm pods and services
1000 function deploy_osm_services() {
1001 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
1002 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1003 sleep 5
1004 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1005 }
1006
1007 function parse_yaml() {
1008 osm_services="nbi lcm ro pol mon light-ui keystone"
1009 TAG=$1
1010 for osm in $osm_services; do
1011 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
1012 done
1013 }
1014
1015 function namespace_vol() {
1016 osm_services="nbi lcm ro pol mon kafka mongo mysql"
1017 for osm in $osm_services; do
1018 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1019 done
1020 }
1021
1022 function init_docker_swarm() {
1023 if [ "${DEFAULT_MTU}" != "1500" ]; then
1024 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1025 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1026 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1027 fi
1028 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1029 return 0
1030 }
1031
1032 function create_docker_network() {
1033 echo "creating network"
1034 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1035 echo "creating network DONE"
1036 }
1037
1038 function deploy_lightweight() {
1039
1040 echo "Deploying lightweight build"
1041 OSM_NBI_PORT=9999
1042 OSM_RO_PORT=9090
1043 OSM_KEYSTONE_PORT=5000
1044 OSM_UI_PORT=80
1045 OSM_MON_PORT=8662
1046 OSM_PROM_PORT=9090
1047 OSM_PROM_HOSTPORT=9091
1048 OSM_GRAFANA_PORT=3000
1049 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1050 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1051
1052 if [ -n "$NO_HOST_PORTS" ]; then
1053 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1054 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1055 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1056 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1057 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1058 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1059 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1060 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1061 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1062 else
1063 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1064 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1065 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1066 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1067 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1068 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1069 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1070 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1071 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1072 fi
1073 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1074 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1075 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1076 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1077 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1078 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1079 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1080 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1081 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1082
1083 pushd $OSM_DOCKER_WORK_DIR
1084 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1085 popd
1086
1087 echo "Finished deployment of lightweight build"
1088 }
1089
1090 function deploy_elk() {
1091 echo "Pulling docker images for ELK"
1092 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1093 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1094 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1095 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1096 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1097 echo "Finished pulling elk docker images"
1098 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1099 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1100 remove_stack osm_elk
1101 echo "Deploying ELK stack"
1102 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1103 echo "Waiting for ELK stack to be up and running"
1104 time=0
1105 step=5
1106 timelength=40
1107 elk_is_up=1
1108 while [ $time -le $timelength ]; do
1109 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1110 elk_is_up=0
1111 break
1112 fi
1113 sleep $step
1114 time=$((time+step))
1115 done
1116 if [ $elk_is_up -eq 0 ]; then
1117 echo "ELK is up and running. Trying to create index pattern..."
1118 #Create index pattern
1119 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1120 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1121 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1122 #Make it the default index
1123 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1124 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1125 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1126 else
1127 echo "Cannot connect to Kibana to create index pattern."
1128 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1129 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1130 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1131 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1132 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1133 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1134 -d"{\"value\":\"filebeat-*\"}"'
1135 fi
1136 echo "Finished deployment of ELK stack"
1137 return 0
1138 }
1139
1140 function install_lightweight() {
1141 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1142 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1143 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1144 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1145
1146 track checkingroot
1147 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1148 track noroot
1149
1150 if [ -n "$KUBERNETES" ]; then
1151 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1152 1. Install and configure LXD
1153 2. Install juju
1154 3. Install docker CE
1155 4. Disable swap space
1156 5. Install and initialize Kubernetes
1157 as pre-requirements.
1158 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1159
1160 else
1161 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1162 fi
1163 track proceed
1164
1165 echo "Installing lightweight build of OSM"
1166 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1167 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1168 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1169 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1170 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1171 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1172 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1173
1174 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1175 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1176 need_packages_lw="lxd snapd"
1177 echo -e "Checking required packages: $need_packages_lw"
1178 dpkg -l $need_packages_lw &>/dev/null \
1179 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1180 || sudo apt-get update \
1181 || FATAL "failed to run apt-get update"
1182 dpkg -l $need_packages_lw &>/dev/null \
1183 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1184 || sudo apt-get install -y $need_packages_lw \
1185 || FATAL "failed to install $need_packages_lw"
1186 fi
1187 track prereqok
1188
1189 [ -z "$INSTALL_NOJUJU" ] && install_juju
1190 track juju_install
1191
1192 if [ -z "$OSM_VCA_HOST" ]; then
1193 juju_createcontroller
1194 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1195 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1196 fi
1197 track juju_controller
1198
1199 if [ -z "$OSM_VCA_SECRET" ]; then
1200 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1201 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1202 fi
1203 if [ -z "$OSM_VCA_PUBKEY" ]; then
1204 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1205 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1206 fi
1207 if [ -z "$OSM_VCA_APIPROXY" ]; then
1208 OSM_VCA_APIPROXY=$DEFAULT_IP
1209 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1210 fi
1211 juju_createproxy
1212
1213 if [ -z "$OSM_VCA_CACERT" ]; then
1214 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
1215 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1216 fi
1217 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1218 OSM_DATABASE_COMMONKEY=$(generate_secret)
1219 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1220 fi
1221 track juju
1222
1223 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1224 track docker_ce
1225
1226 #Installs Kubernetes and deploys osm services
1227 if [ -n "$KUBERNETES" ]; then
1228 install_kube
1229 track install_k8s
1230 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1231 kube_config_dir
1232 track init_k8s
1233 else
1234 #install_docker_compose
1235 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1236 track docker_swarm
1237 fi
1238
1239 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1240 track docker_build
1241
1242 generate_docker_env_files
1243
1244 if [ -n "$KUBERNETES" ]; then
1245 if [ -n "$K8S_MONITOR" ]; then
1246 # uninstall OSM MONITORING
1247 uninstall_k8s_monitoring
1248 fi
1249 #remove old namespace
1250 remove_k8s_namespace $OSM_STACK_NAME
1251 deploy_cni_provider
1252 kube_secrets
1253 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml $OSM_DOCKER_TAG
1254 namespace_vol
1255 deploy_osm_services
1256 track deploy_osm_services_k8s
1257 else
1258 # remove old stack
1259 remove_stack $OSM_STACK_NAME
1260 create_docker_network
1261 deploy_lightweight
1262 generate_osmclient_script
1263 track docker_deploy
1264 install_prometheus_nodeexporter
1265 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1266 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1267 fi
1268
1269 if [ -n "$KUBERNETES" ] && [ -n "$K8S_MONITOR" ]; then
1270 # install OSM MONITORING
1271 install_k8s_monitoring
1272 track install_k8s_monitoring
1273 fi
1274
1275 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1276 track osmclient
1277
1278 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1279 track end
1280 return 0
1281 }
1282
1283 function install_vimemu() {
1284 echo "\nInstalling vim-emu"
1285 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1286 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1287 # clone vim-emu repository (attention: branch is currently master only)
1288 echo "Cloning vim-emu repository ..."
1289 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1290 # build vim-emu docker
1291 echo "Building vim-emu Docker container..."
1292
1293 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1294 # start vim-emu container as daemon
1295 echo "Starting vim-emu Docker container 'vim-emu' ..."
1296 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1297 # in lightweight mode, the emulator needs to be attached to netOSM
1298 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1299 else
1300 # classic build mode
1301 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1302 fi
1303 echo "Waiting for 'vim-emu' container to start ..."
1304 sleep 5
1305 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1306 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1307 # print vim-emu connection info
1308 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1309 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1310 echo -e "To add the emulated VIM to OSM you should do:"
1311 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1312 }
1313
1314 function install_k8s_monitoring() {
1315 # install OSM monitoring
1316 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1317 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1318 }
1319
1320 function uninstall_k8s_monitoring() {
1321 # install OSM monitoring
1322 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1323 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1324 }
1325
1326 function dump_vars(){
1327 echo "DEVELOP=$DEVELOP"
1328 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1329 echo "UNINSTALL=$UNINSTALL"
1330 echo "NAT=$NAT"
1331 echo "UPDATE=$UPDATE"
1332 echo "RECONFIGURE=$RECONFIGURE"
1333 echo "TEST_INSTALLER=$TEST_INSTALLER"
1334 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1335 echo "INSTALL_LXD=$INSTALL_LXD"
1336 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1337 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1338 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1339 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1340 echo "INSTALL_ONLY=$INSTALL_ONLY"
1341 echo "INSTALL_ELK=$INSTALL_ELK"
1342 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1343 echo "TO_REBUILD=$TO_REBUILD"
1344 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1345 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1346 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1347 echo "RELEASE=$RELEASE"
1348 echo "REPOSITORY=$REPOSITORY"
1349 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1350 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1351 echo "NOCONFIGURE=$NOCONFIGURE"
1352 echo "OSM_DEVOPS=$OSM_DEVOPS"
1353 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1354 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1355 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1356 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1357 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1358 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1359 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1360 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1361 echo "DOCKER_USER=$DOCKER_USER"
1362 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1363 echo "PULL_IMAGES=$PULL_IMAGES"
1364 echo "KUBERNETES=$KUBERNETES"
1365 echo "SHOWOPTS=$SHOWOPTS"
1366 echo "Install from specific refspec (-b): $COMMIT_ID"
1367 }
1368
1369 function track(){
1370 ctime=`date +%s`
1371 duration=$((ctime - SESSION_ID))
1372 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1373 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1374 event_name="bin"
1375 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1376 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1377 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1378 event_name="${event_name}_$1"
1379 url="${url}&event=${event_name}&ce_duration=${duration}"
1380 wget -q -O /dev/null $url
1381 }
1382
1383 UNINSTALL=""
1384 DEVELOP=""
1385 NAT=""
1386 UPDATE=""
1387 RECONFIGURE=""
1388 TEST_INSTALLER=""
1389 INSTALL_LXD=""
1390 SHOWOPTS=""
1391 COMMIT_ID=""
1392 ASSUME_YES=""
1393 INSTALL_FROM_SOURCE=""
1394 RELEASE="ReleaseSIX"
1395 REPOSITORY="stable"
1396 INSTALL_VIMEMU=""
1397 INSTALL_FROM_LXDIMAGES=""
1398 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1399 LXD_REPOSITORY_PATH=""
1400 INSTALL_LIGHTWEIGHT="y"
1401 INSTALL_ONLY=""
1402 INSTALL_ELK=""
1403 #INSTALL_PERFMON=""
1404 TO_REBUILD=""
1405 INSTALL_NOLXD=""
1406 INSTALL_NODOCKER=""
1407 INSTALL_NOJUJU=""
1408 KUBERNETES=""
1409 K8S_MONITOR=""
1410 INSTALL_NOHOSTCLIENT=""
1411 NOCONFIGURE=""
1412 RELEASE_DAILY=""
1413 SESSION_ID=`date +%s`
1414 OSM_DEVOPS=
1415 OSM_VCA_HOST=
1416 OSM_VCA_SECRET=
1417 OSM_VCA_PUBKEY=
1418 OSM_STACK_NAME=osm
1419 NO_HOST_PORTS=""
1420 DOCKER_NOBUILD=""
1421 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1422 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1423 WORKDIR_SUDO=sudo
1424 OSM_WORK_DIR="/etc/osm"
1425 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1426 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1427 OSM_HOST_VOL="/var/lib/osm"
1428 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1429 OSM_DOCKER_TAG=latest
1430 DOCKER_USER=opensourcemano
1431 PULL_IMAGES="y"
1432 KAFKA_TAG=2.11-1.0.2
1433 PROMETHEUS_TAG=v2.4.3
1434 GRAFANA_TAG=latest
1435 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1436 PROMETHEUS_CADVISOR_TAG=latest
1437 KEYSTONEDB_TAG=10
1438 OSM_DATABASE_COMMONKEY=
1439 ELASTIC_VERSION=6.4.2
1440 ELASTIC_CURATOR_VERSION=5.5.4
1441 POD_NETWORK_CIDR=10.244.0.0/16
1442 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1443 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1444
1445 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o; do
1446 case "${o}" in
1447 h)
1448 usage && exit 0
1449 ;;
1450 b)
1451 COMMIT_ID=${OPTARG}
1452 PULL_IMAGES=""
1453 ;;
1454 r)
1455 REPOSITORY="${OPTARG}"
1456 REPO_ARGS+=(-r "$REPOSITORY")
1457 ;;
1458 c)
1459 [ "${OPTARG}" == "swarm" ] && continue
1460 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1461 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1462 usage && exit 1
1463 ;;
1464 R)
1465 RELEASE="${OPTARG}"
1466 REPO_ARGS+=(-R "$RELEASE")
1467 ;;
1468 k)
1469 REPOSITORY_KEY="${OPTARG}"
1470 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1471 ;;
1472 u)
1473 REPOSITORY_BASE="${OPTARG}"
1474 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1475 ;;
1476 U)
1477 DOCKER_USER="${OPTARG}"
1478 ;;
1479 l)
1480 LXD_REPOSITORY_BASE="${OPTARG}"
1481 ;;
1482 p)
1483 LXD_REPOSITORY_PATH="${OPTARG}"
1484 ;;
1485 D)
1486 OSM_DEVOPS="${OPTARG}"
1487 ;;
1488 s)
1489 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1490 ;;
1491 H)
1492 OSM_VCA_HOST="${OPTARG}"
1493 ;;
1494 S)
1495 OSM_VCA_SECRET="${OPTARG}"
1496 ;;
1497 P)
1498 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1499 ;;
1500 A)
1501 OSM_VCA_APIPROXY="${OPTARG}"
1502 ;;
1503 w)
1504 # when specifying workdir, do not use sudo for access
1505 WORKDIR_SUDO=
1506 OSM_WORK_DIR="${OPTARG}"
1507 ;;
1508 t)
1509 OSM_DOCKER_TAG="${OPTARG}"
1510 ;;
1511 o)
1512 INSTALL_ONLY="y"
1513 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1514 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1515 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1516 ;;
1517 m)
1518 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1519 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1520 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1521 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1522 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1523 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1524 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1525 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1526 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1527 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1528 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1529 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1530 ;;
1531 -)
1532 [ "${OPTARG}" == "help" ] && usage && exit 0
1533 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1534 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1535 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1536 [ "${OPTARG}" == "nat" ] && NAT="y" && continue
1537 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1538 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1539 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1540 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1541 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1542 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1543 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue
1544 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1545 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue
1546 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1547 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1548 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1549 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue
1550 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1551 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue
1552 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1553 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1554 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1555 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1556 [ "${OPTARG}" == "pullimages" ] && continue
1557 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR="y" && continue
1558 echo -e "Invalid option: '--$OPTARG'\n" >&2
1559 usage && exit 1
1560 ;;
1561 \?)
1562 echo -e "Invalid option: '-$OPTARG'\n" >&2
1563 usage && exit 1
1564 ;;
1565 y)
1566 ASSUME_YES="y"
1567 ;;
1568 *)
1569 usage && exit 1
1570 ;;
1571 esac
1572 done
1573
1574 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui"
1575 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui"
1576 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui"
1577 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --daily can only be used with --soui"
1578 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nolxd cannot be used with --soui"
1579 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nodocker cannot be used with --soui"
1580 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: -m cannot be used with --soui"
1581 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1582
1583 if [ -n "$SHOWOPTS" ]; then
1584 dump_vars
1585 exit 0
1586 fi
1587
1588 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master"
1589
1590 # if develop, we force master
1591 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1592
1593 need_packages="git jq wget curl tar"
1594 echo -e "Checking required packages: $need_packages"
1595 dpkg -l $need_packages &>/dev/null \
1596 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1597 || sudo apt-get update \
1598 || FATAL "failed to run apt-get update"
1599 dpkg -l $need_packages &>/dev/null \
1600 || ! echo -e "Installing $need_packages requires root privileges." \
1601 || sudo apt-get install -y $need_packages \
1602 || FATAL "failed to install $need_packages"
1603
1604 if [ -z "$OSM_DEVOPS" ]; then
1605 if [ -n "$TEST_INSTALLER" ]; then
1606 echo -e "\nUsing local devops repo for OSM installation"
1607 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1608 else
1609 echo -e "\nCreating temporary dir for OSM installation"
1610 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1611 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1612
1613 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1614
1615 if [ -z "$COMMIT_ID" ]; then
1616 echo -e "\nGuessing the current stable release"
1617 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1618 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1619
1620 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1621 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1622 else
1623 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1624 fi
1625 git -C $OSM_DEVOPS checkout $COMMIT_ID
1626 fi
1627 fi
1628
1629 . $OSM_DEVOPS/common/all_funcs
1630
1631 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1632 [ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0
1633 [ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0
1634 [ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0
1635 [ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0
1636 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1637 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1638 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1639 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1640
1641 #Installation starts here
1642 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README.txt &> /dev/null
1643 track start
1644
1645 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1646 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1647 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1648 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1649 fi
1650
1651 echo -e "Checking required packages: lxd"
1652 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1653 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1654
1655 # use local devops for containers
1656 export OSM_USE_LOCAL_DEVOPS=true
1657 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1658 echo -e "\nCreating the containers and building from source ..."
1659 $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')"
1660 ro_is_up && track RO
1661 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed"
1662 vca_is_up && track VCA
1663 $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed"
1664 mon_is_up && track MON
1665 $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')"
1666 $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')"
1667 #so_is_up && track SOUI
1668 track SOUI
1669 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1670 echo -e "\nInstalling from lxd images ..."
1671 install_from_lxdimages
1672 else #install from binaries
1673 echo -e "\nCreating the containers and installing from binaries ..."
1674 $OSM_DEVOPS/jenkins/host/install RO ${REPO_ARGS[@]} || FATAL "RO install failed"
1675 ro_is_up && track RO
1676 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed"
1677 vca_is_up && track VCA
1678 $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed"
1679 mon_is_up && track MON
1680 $OSM_DEVOPS/jenkins/host/install SO ${REPO_ARGS[@]} || FATAL "SO install failed"
1681 $OSM_DEVOPS/jenkins/host/install UI ${REPO_ARGS[@]} || FATAL "UI install failed"
1682 #so_is_up && track SOUI
1683 track SOUI
1684 fi
1685
1686 #Install iptables-persistent and configure NAT rules
1687 [ -z "$NOCONFIGURE" ] && nat
1688
1689 #Configure components
1690 [ -z "$NOCONFIGURE" ] && configure
1691
1692 #Install osmclient
1693 [ -z "$NOCONFIGURE" ] && install_osmclient
1694
1695 #Install vim-emu (optional)
1696 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1697
1698 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1699 track end
1700 echo -e "\nDONE"