full_install_osm.sh: added cadvisor ports to osm_ports.sh
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
54 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
57 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
58 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
59 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
60 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
61 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
62 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
63 echo -e " --showopts: print chosen options and exit (only for debugging)"
64 echo -e " -y: do not prompt for confirmation, assumes yes"
65 echo -e " -h / --help: print this help"
66 }
67
68 #Uninstall OSM: remove containers
69 function uninstall(){
70 echo -e "\nUninstalling OSM"
71 if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
72 $OSM_DEVOPS/jenkins/host/clean_container RO
73 $OSM_DEVOPS/jenkins/host/clean_container VCA
74 $OSM_DEVOPS/jenkins/host/clean_container MON
75 $OSM_DEVOPS/jenkins/host/clean_container SO
76 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 else
78 lxc stop RO && lxc delete RO
79 lxc stop VCA && lxc delete VCA
80 lxc stop MON && lxc delete MON
81 lxc stop SO-ub && lxc delete SO-ub
82 fi
83 echo -e "\nDeleting imported lxd images if they exist"
84 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
85 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
86 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
87 return 0
88 }
89
90 # takes a juju/accounts.yaml file and returns the password specific
91 # for a controller. I wrote this using only bash tools to minimize
92 # additions of other packages
93 function parse_juju_password {
94 password_file="${HOME}/.local/share/juju/accounts.yaml"
95 local controller_name=$1
96 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
97 sed -ne "s|^\($s\):|\1|" \
98 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
99 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
100 awk -F$fs -v controller=$controller_name '{
101 indent = length($1)/2;
102 vname[indent] = $2;
103 for (i in vname) {if (i > indent) {delete vname[i]}}
104 if (length($3) > 0) {
105 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
106 if (match(vn,controller) && match($2,"password")) {
107 printf("%s",$3);
108 }
109 }
110 }'
111 }
112
113 function generate_secret() {
114 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
115 }
116
117 function remove_volumes() {
118 if [ -n "$KUBERNETES" ]; then
119 k8_volume=$1
120 echo "Removing ${k8_volume}"
121 $WORKDIR_SUDO rm -rf ${k8_volume}
122 else
123 stack=$1
124 volumes="mongo_db mon_db osm_packages ro_db"
125 for volume in $volumes; do
126 sg docker -c "docker volume rm ${stack}_${volume}"
127 done
128 fi
129 }
130
131 function remove_network() {
132 stack=$1
133 sg docker -c "docker network rm net${stack}"
134 }
135
136 function remove_iptables() {
137 stack=$1
138 if [ -z "$OSM_VCA_HOST" ]; then
139 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
140 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
141 fi
142
143 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
144 sudo iptables -t nat -D PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
145 sudo netfilter-persistent save
146 fi
147 }
148
149 function remove_stack() {
150 stack=$1
151 if sg docker -c "docker stack ps ${stack}" ; then
152 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
153 COUNTER=0
154 result=1
155 while [ ${COUNTER} -lt 30 ]; do
156 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
157 #echo "Dockers running: $result"
158 if [ "${result}" == "0" ]; then
159 break
160 fi
161 let COUNTER=COUNTER+1
162 sleep 1
163 done
164 if [ "${result}" == "0" ]; then
165 echo "All dockers of the stack ${stack} were removed"
166 else
167 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
168 fi
169 sleep 5
170 fi
171 }
172
173 #removes osm deployments and services
174 function remove_k8s_namespace() {
175 kubectl delete ns $1
176 }
177
178 #Uninstall lightweight OSM: remove dockers
179 function uninstall_lightweight() {
180 if [ -n "$INSTALL_ONLY" ]; then
181 if [ -n "$INSTALL_ELK" ]; then
182 echo -e "\nUninstalling OSM ELK stack"
183 remove_stack osm_elk
184 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
185 fi
186 else
187 echo -e "\nUninstalling OSM"
188 if [ -n "$KUBERNETES" ]; then
189 if [ -n "$K8S_MONITOR" ]; then
190 # uninstall OSM MONITORING
191 uninstall_k8s_monitoring
192 fi
193 remove_k8s_namespace $OSM_STACK_NAME
194 else
195 remove_stack $OSM_STACK_NAME
196 remove_stack osm_elk
197 uninstall_prometheus_nodeexporter
198 fi
199 echo "Now osm docker images and volumes will be deleted"
200 newgrp docker << EONG
201 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
208 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
209 EONG
210
211 if [ -n "$KUBERNETES" ]; then
212 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
213 remove_volumes $OSM_NAMESPACE_VOL
214 else
215 remove_volumes $OSM_STACK_NAME
216 remove_network $OSM_STACK_NAME
217 fi
218 remove_iptables $OSM_STACK_NAME
219 echo "Removing $OSM_DOCKER_WORK_DIR"
220 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
221 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
222 fi
223 echo "Some docker images will be kept in case they are used by other docker stacks"
224 echo "To remove them, just run 'docker image prune' in a terminal"
225 return 0
226 }
227
228 #Configure NAT rules, based on the current IP addresses of containers
229 function nat(){
230 echo -e "\nChecking required packages: iptables-persistent"
231 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
232 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
233 echo -e "\nConfiguring NAT rules"
234 echo -e " Required root privileges"
235 sudo $OSM_DEVOPS/installers/nat_osm
236 }
237
238 function FATAL(){
239 echo "FATAL error: Cannot install OSM due to \"$1\""
240 exit 1
241 }
242
243 #Update RO, SO and UI:
244 function update(){
245 echo -e "\nUpdating components"
246
247 echo -e " Updating RO"
248 CONTAINER="RO"
249 MDG="RO"
250 INSTALL_FOLDER="/opt/openmano"
251 echo -e " Fetching the repo"
252 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
253 BRANCH=""
254 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
255 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
256 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
257 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
258 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
259 # COMMIT_ID either was previously set with -b option, or is an empty string
260 CHECKOUT_ID=$COMMIT_ID
261 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
262 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
263 if [[ $CHECKOUT_ID == "tags/"* ]]; then
264 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
265 else
266 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
267 fi
268 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
269 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
270 echo " Nothing to be done."
271 else
272 echo " Update required."
273 lxc exec $CONTAINER -- service osm-ro stop
274 lxc exec $CONTAINER -- git -C /opt/openmano stash
275 lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
276 lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
277 lxc exec $CONTAINER -- git -C /opt/openmano stash pop
278 lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
279 lxc exec $CONTAINER -- service osm-ro start
280 fi
281 echo
282
283 echo -e " Updating SO and UI"
284 CONTAINER="SO-ub"
285 MDG="SO"
286 INSTALL_FOLDER="" # To be filled in
287 echo -e " Fetching the repo"
288 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
289 BRANCH=""
290 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
291 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
292 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
293 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
294 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
295 # COMMIT_ID either was previously set with -b option, or is an empty string
296 CHECKOUT_ID=$COMMIT_ID
297 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
298 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
299 if [[ $CHECKOUT_ID == "tags/"* ]]; then
300 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
301 else
302 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
303 fi
304 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
305 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
306 echo " Nothing to be done."
307 else
308 echo " Update required."
309 # Instructions to be added
310 # lxc exec SO-ub -- ...
311 fi
312 echo
313 echo -e "Updating MON Container"
314 CONTAINER="MON"
315 MDG="MON"
316 INSTALL_FOLDER="/root/MON"
317 echo -e " Fetching the repo"
318 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
319 BRANCH=""
320 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
321 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
322 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
323 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
324 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
325 # COMMIT_ID either was previously set with -b option, or is an empty string
326 CHECKOUT_ID=$COMMIT_ID
327 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
328 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
329 if [[ $CHECKOUT_ID == "tags/"* ]]; then
330 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
331 else
332 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
333 fi
334 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
335 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
336 echo " Nothing to be done."
337 else
338 echo " Update required."
339 fi
340 echo
341 }
342
343 function so_is_up() {
344 if [ -n "$1" ]; then
345 SO_IP=$1
346 else
347 SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
348 fi
349 time=0
350 step=5
351 timelength=300
352 while [ $time -le $timelength ]
353 do
354 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
355 -H 'accept: application/vnd.yang.data+json' \
356 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
357 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
358 then
359 echo "RW.Restconf running....SO is up"
360 return 0
361 fi
362
363 sleep $step
364 echo -n "."
365 time=$((time+step))
366 done
367
368 FATAL "OSM Failed to startup. SO failed to startup"
369 }
370
371 function vca_is_up() {
372 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
373 echo "VCA is up and running"
374 return 0
375 fi
376
377 FATAL "OSM Failed to startup. VCA failed to startup"
378 }
379
380 function mon_is_up() {
381 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
382 echo "MON is up and running"
383 return 0
384 fi
385
386 FATAL "OSM Failed to startup. MON failed to startup"
387 }
388
389 function ro_is_up() {
390 if [ -n "$1" ]; then
391 RO_IP=$1
392 else
393 RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
394 fi
395 time=0
396 step=2
397 timelength=20
398 while [ $time -le $timelength ]; do
399 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
400 echo "RO is up and running"
401 return 0
402 fi
403 sleep $step
404 echo -n "."
405 time=$((time+step))
406 done
407
408 FATAL "OSM Failed to startup. RO failed to startup"
409 }
410
411
412 function configure_RO(){
413 . $OSM_DEVOPS/installers/export_ips
414 echo -e " Configuring RO"
415 lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
416 lxc exec RO -- service osm-ro restart
417
418 ro_is_up
419
420 lxc exec RO -- openmano tenant-delete -f osm >/dev/null
421 lxc exec RO -- openmano tenant-create osm > /dev/null
422 lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
423 lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
424 lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
425 }
426
427 function configure_VCA(){
428 echo -e " Configuring VCA"
429 JUJU_PASSWD=$(generate_secret)
430 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
431 }
432
433 function configure_SOUI(){
434 . $OSM_DEVOPS/installers/export_ips
435 JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
436 RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
437
438 echo -e " Configuring MON"
439 #Information to be added about SO socket for logging
440
441 echo -e " Configuring SO"
442 sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
443 sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
444 sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
445 sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
446 # make journaling persistent
447 lxc exec SO-ub -- mkdir -p /var/log/journal
448 lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
449 lxc exec SO-ub -- systemctl restart systemd-journald
450
451 echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
452
453 lxc exec SO-ub -- systemctl restart launchpad
454
455 so_is_up $SO_CONTAINER_IP
456
457 #delete existing config agent (could be there on reconfigure)
458 curl -k --request DELETE \
459 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
460 --header 'accept: application/vnd.yang.data+json' \
461 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
462 --header 'cache-control: no-cache' \
463 --header 'content-type: application/vnd.yang.data+json' &> /dev/null
464
465 result=$(curl -k --request POST \
466 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
467 --header 'accept: application/vnd.yang.data+json' \
468 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
469 --header 'cache-control: no-cache' \
470 --header 'content-type: application/vnd.yang.data+json' \
471 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
472 [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
473
474 #R1/R2 config line
475 #result=$(curl -k --request PUT \
476 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
477 # --header 'accept: application/vnd.yang.data+json' \
478 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
479 # --header 'cache-control: no-cache' \
480 # --header 'content-type: application/vnd.yang.data+json' \
481 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
482
483 result=$(curl -k --request PUT \
484 --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
485 --header 'accept: application/vnd.yang.data+json' \
486 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
487 --header 'cache-control: no-cache' \
488 --header 'content-type: application/vnd.yang.data+json' \
489 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
490 [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
491
492 result=$(curl -k --request PATCH \
493 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
494 --header 'accept: application/vnd.yang.data+json' \
495 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
496 --header 'cache-control: no-cache' \
497 --header 'content-type: application/vnd.yang.data+json' \
498 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
499 [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
500
501 result=$(curl -k --request PATCH \
502 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
503 --header 'accept: application/vnd.yang.data+json' \
504 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
505 --header 'cache-control: no-cache' \
506 --header 'content-type: application/vnd.yang.data+json' \
507 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
508 [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
509
510 lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
511 auto lo:1
512 iface lo:1 inet static
513 address $DEFAULT_IP
514 netmask 255.255.255.255
515 EOF
516 lxc exec SO-ub ifup lo:1
517 }
518
519 #Configure RO, VCA, and SO with the initial configuration:
520 # RO -> tenant:osm, logs to be sent to SO
521 # VCA -> juju-password
522 # SO -> route to Juju Controller, add RO account, add VCA account
523 function configure(){
524 #Configure components
525 echo -e "\nConfiguring components"
526 configure_RO
527 configure_VCA
528 configure_SOUI
529 }
530
531 function install_lxd() {
532 sudo apt-get update
533 sudo apt-get install -y lxd
534 newgrp lxd
535 lxd init --auto
536 lxd waitready
537 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
538 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
539 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
540 lxc profile device set default eth0 mtu $DEFAULT_MTU
541 #sudo systemctl stop lxd-bridge
542 #sudo systemctl --system daemon-reload
543 #sudo systemctl enable lxd-bridge
544 #sudo systemctl start lxd-bridge
545 }
546
547 function ask_user(){
548 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
549 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
550 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
551 read -e -p "$1" USER_CONFIRMATION
552 while true ; do
553 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
554 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
555 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
556 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
557 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
558 done
559 }
560
561 function launch_container_from_lxd(){
562 export OSM_MDG=$1
563 OSM_load_config
564 export OSM_BASE_IMAGE=$2
565 if ! container_exists $OSM_BUILD_CONTAINER; then
566 CONTAINER_OPTS=""
567 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
568 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
569 create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
570 wait_container_up $OSM_BUILD_CONTAINER
571 fi
572 }
573
574 function install_osmclient(){
575 CLIENT_RELEASE=${RELEASE#"-R "}
576 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
577 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
578 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
579 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
580 curl $key_location | sudo apt-key add -
581 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
582 sudo apt-get update
583 sudo apt-get install -y python3-pip
584 sudo -H LC_ALL=C python3 -m pip install -U pip
585 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind
586 sudo apt-get install -y python3-osm-im python3-osmclient
587 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
588 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
589 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
590 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
591 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
592 echo -e "\nOSM client installed"
593 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
594 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
595 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
596 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
597 else
598 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
599 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
600 echo " export OSM_HOSTNAME=<OSM_host>"
601 fi
602 return 0
603 }
604
605 function install_prometheus_nodeexporter(){
606 sudo useradd --no-create-home --shell /bin/false node_exporter
607 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
608 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
609 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
610 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
611 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
612 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
613 sudo systemctl daemon-reload
614 sudo systemctl restart node_exporter
615 sudo systemctl enable node_exporter
616 return 0
617 }
618
619 function uninstall_prometheus_nodeexporter(){
620 sudo systemctl stop node_exporter
621 sudo systemctl disable node_exporter
622 sudo rm /etc/systemd/system/node_exporter.service
623 sudo systemctl daemon-reload
624 sudo userdel node_exporter
625 sudo rm /usr/local/bin/node_exporter
626 return 0
627 }
628
629 function install_from_lxdimages(){
630 LXD_RELEASE=${RELEASE#"-R "}
631 if [ -n "$LXD_REPOSITORY_PATH" ]; then
632 LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
633 else
634 LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
635 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
636 fi
637 echo -e "\nDeleting previous lxd images if they exist"
638 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
639 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
640 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
641 echo -e "\nImporting osm-ro"
642 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
643 lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
644 rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
645 echo -e "\nImporting osm-vca"
646 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
647 lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
648 rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
649 echo -e "\nImporting osm-soui"
650 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
651 lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
652 rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
653 launch_container_from_lxd RO osm-ro
654 ro_is_up && track RO
655 launch_container_from_lxd VCA osm-vca
656 vca_is_up && track VCA
657 launch_container_from_lxd MON osm-mon
658 mon_is_up && track MON
659 launch_container_from_lxd SO osm-soui
660 #so_is_up && track SOUI
661 track SOUI
662 }
663
664 function install_docker_ce() {
665 # installs and configures Docker CE
666 echo "Installing Docker CE ..."
667 sudo apt-get -qq update
668 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
669 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
670 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
671 sudo apt-get -qq update
672 sudo apt-get install -y docker-ce
673 echo "Adding user to group 'docker'"
674 sudo groupadd -f docker
675 sudo usermod -aG docker $USER
676 sleep 2
677 sudo service docker restart
678 echo "... restarted Docker service"
679 sg docker -c "docker version" || FATAL "Docker installation failed"
680 echo "... Docker CE installation done"
681 return 0
682 }
683
684 function install_docker_compose() {
685 # installs and configures docker-compose
686 echo "Installing Docker Compose ..."
687 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
688 sudo chmod +x /usr/local/bin/docker-compose
689 echo "... Docker Compose installation done"
690 }
691
692 function install_juju() {
693 echo "Installing juju"
694 sudo snap install juju --classic
695 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
696 echo "Finished installation of juju"
697 return 0
698 }
699
700 function juju_createcontroller() {
701 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
702 # Not found created, create the controller
703 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
704 fi
705 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
706 }
707
708 function juju_createproxy() {
709 echo -e "\nChecking required packages: iptables-persistent"
710 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
711 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
712
713 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
714 sudo iptables -t nat -A PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
715 sudo netfilter-persistent save
716 fi
717 }
718
719 function generate_docker_images() {
720 echo "Pulling and generating docker images"
721 _build_from=$COMMIT_ID
722 [ -z "$_build_from" ] && _build_from="master"
723
724 echo "OSM Docker images generated from $_build_from"
725
726 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
727 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
728 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
729 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
730
731 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
732 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
733 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
734 fi
735
736 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
737 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
738 fi
739
740 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
741 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
742 fi
743
744 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
745 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
746 fi
747
748 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
749 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
750 fi
751
752 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
753 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
754 fi
755
756 if [ -n "$PULL_IMAGES" ]; then
757 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
758 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
759 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
760 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
761 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
762 fi
763
764 if [ -n "$PULL_IMAGES" ]; then
765 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
766 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
767 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
768 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
769 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
770 fi
771
772 if [ -n "$PULL_IMAGES" ]; then
773 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
774 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
775 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
776 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
777 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
778 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
779 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
780 fi
781
782 if [ -n "$PULL_IMAGES" ]; then
783 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
784 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
785 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
786 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
787 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
788 fi
789
790 if [ -n "$PULL_IMAGES" ]; then
791 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
792 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
793 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
794 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
795 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
796 fi
797
798 if [ -n "$PULL_IMAGES" ]; then
799 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
800 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
801 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
802 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
803 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
804 fi
805
806 if [ -n "$PULL_IMAGES" ]; then
807 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
808 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
809 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
810 fi
811
812 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
813 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
814 fi
815
816 echo "Finished generation of docker images"
817 }
818
819 function cmp_overwrite() {
820 file1="$1"
821 file2="$2"
822 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
823 if [ -f "${file2}" ]; then
824 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
825 else
826 cp -b ${file1} ${file2}
827 fi
828 fi
829 }
830
831 function generate_docker_env_files() {
832 echo "Doing a backup of existing env files"
833 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
834 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
835 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
836 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
837 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
838 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
839 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
840 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
841 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
842
843 echo "Generating docker env files"
844 if [ -n "$KUBERNETES" ]; then
845 #Kubernetes resources
846 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
847 else
848 # Docker-compose
849 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
850
851 # Prometheus
852 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
853
854 # Grafana & Prometheus Exporter files
855 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
856 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
857 fi
858
859 # LCM
860 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
861 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
862 fi
863
864 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
865 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
866 else
867 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
868 fi
869
870 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
871 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
872 else
873 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
874 fi
875
876 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
877 echo "OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
878 else
879 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"|g" $OSM_DOCKER_WORK_DIR/lcm.env
880 fi
881
882 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
883 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
884 else
885 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
886 fi
887
888 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
889 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
890 else
891 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
892 fi
893
894 # RO
895 MYSQL_ROOT_PASSWORD=$(generate_secret)
896 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
897 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
898 fi
899 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
900 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
901 fi
902
903 # Keystone
904 KEYSTONE_DB_PASSWORD=$(generate_secret)
905 SERVICE_PASSWORD=$(generate_secret)
906 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
907 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
908 fi
909 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
910 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
911 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
912 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
913 fi
914
915 # NBI
916 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
917 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
918 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
919 fi
920
921 # MON
922 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
923 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
924 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
925 fi
926
927 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
928 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
929 else
930 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
931 fi
932
933 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
934 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
935 else
936 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
937 fi
938
939 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
940 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
941 else
942 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
943 fi
944
945 # POL
946 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
947 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
948 fi
949
950 # LW-UI
951 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
952 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
953 fi
954
955 echo "Finished generation of docker env files"
956 }
957
958 function generate_osmclient_script () {
959 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
960 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
961 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
962 }
963
964 #installs kubernetes packages
965 function install_kube() {
966 sudo apt-get update && sudo apt-get install -y apt-transport-https
967 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
968 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
969 sudo apt-get update
970 echo "Installing Kubernetes Packages ..."
971 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
972 }
973
974 #initializes kubernetes control plane
975 function init_kubeadm() {
976 sudo swapoff -a
977 sudo kubeadm init --config $1
978 sleep 5
979 }
980
981 function kube_config_dir() {
982 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
983 mkdir -p $HOME/.kube
984 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
985 sudo chown $(id -u):$(id -g) $HOME/.kube/config
986 }
987
988 #deploys flannel as daemonsets
989 function deploy_cni_provider() {
990 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
991 trap 'rm -rf "${CNI_DIR}"' EXIT
992 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
993 kubectl apply -f $CNI_DIR
994 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
995 }
996
997 #creates secrets from env files which will be used by containers
998 function kube_secrets(){
999 kubectl create ns $OSM_STACK_NAME
1000 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
1001 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
1002 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
1003 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
1004 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
1005 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
1006 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
1007 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
1008 }
1009
1010 #deploys osm pods and services
1011 function deploy_osm_services() {
1012 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
1013 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1014 sleep 5
1015 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1016 }
1017
1018 function parse_yaml() {
1019 osm_services="nbi lcm ro pol mon light-ui keystone"
1020 TAG=$1
1021 for osm in $osm_services; do
1022 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
1023 done
1024 }
1025
1026 function namespace_vol() {
1027 osm_services="nbi lcm ro pol mon kafka mongo mysql"
1028 for osm in $osm_services; do
1029 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1030 done
1031 }
1032
1033 function init_docker_swarm() {
1034 if [ "${DEFAULT_MTU}" != "1500" ]; then
1035 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1036 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1037 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1038 fi
1039 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1040 return 0
1041 }
1042
1043 function create_docker_network() {
1044 echo "creating network"
1045 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1046 echo "creating network DONE"
1047 }
1048
1049 function deploy_lightweight() {
1050
1051 echo "Deploying lightweight build"
1052 OSM_NBI_PORT=9999
1053 OSM_RO_PORT=9090
1054 OSM_KEYSTONE_PORT=5000
1055 OSM_UI_PORT=80
1056 OSM_MON_PORT=8662
1057 OSM_PROM_PORT=9090
1058 OSM_PROM_CADVISOR_PORT=8080
1059 OSM_PROM_HOSTPORT=9091
1060 OSM_GRAFANA_PORT=3000
1061 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1062 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1063
1064 if [ -n "$NO_HOST_PORTS" ]; then
1065 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1066 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1067 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1068 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1069 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1070 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1071 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1072 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1073 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1074 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1075 else
1076 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1077 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1078 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1079 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1080 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1081 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1082 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1083 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1084 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1085 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1086 fi
1087 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1088 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1089 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1090 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1091 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1092 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1093 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1094 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1095 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1096
1097 pushd $OSM_DOCKER_WORK_DIR
1098 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1099 popd
1100
1101 echo "Finished deployment of lightweight build"
1102 }
1103
1104 function deploy_elk() {
1105 echo "Pulling docker images for ELK"
1106 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1107 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1108 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1109 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1110 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1111 echo "Finished pulling elk docker images"
1112 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1113 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1114 remove_stack osm_elk
1115 echo "Deploying ELK stack"
1116 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1117 echo "Waiting for ELK stack to be up and running"
1118 time=0
1119 step=5
1120 timelength=40
1121 elk_is_up=1
1122 while [ $time -le $timelength ]; do
1123 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1124 elk_is_up=0
1125 break
1126 fi
1127 sleep $step
1128 time=$((time+step))
1129 done
1130 if [ $elk_is_up -eq 0 ]; then
1131 echo "ELK is up and running. Trying to create index pattern..."
1132 #Create index pattern
1133 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1134 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1135 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1136 #Make it the default index
1137 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1138 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1139 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1140 else
1141 echo "Cannot connect to Kibana to create index pattern."
1142 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1143 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1144 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1145 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1146 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1147 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1148 -d"{\"value\":\"filebeat-*\"}"'
1149 fi
1150 echo "Finished deployment of ELK stack"
1151 return 0
1152 }
1153
1154 function install_lightweight() {
1155 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1156 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1157 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1158 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1159
1160 track checkingroot
1161 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1162 track noroot
1163
1164 if [ -n "$KUBERNETES" ]; then
1165 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1166 1. Install and configure LXD
1167 2. Install juju
1168 3. Install docker CE
1169 4. Disable swap space
1170 5. Install and initialize Kubernetes
1171 as pre-requirements.
1172 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1173
1174 else
1175 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1176 fi
1177 track proceed
1178
1179 echo "Installing lightweight build of OSM"
1180 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1181 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1182 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1183 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1184 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1185 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1186 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1187
1188 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1189 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1190 need_packages_lw="lxd snapd"
1191 echo -e "Checking required packages: $need_packages_lw"
1192 dpkg -l $need_packages_lw &>/dev/null \
1193 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1194 || sudo apt-get update \
1195 || FATAL "failed to run apt-get update"
1196 dpkg -l $need_packages_lw &>/dev/null \
1197 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1198 || sudo apt-get install -y $need_packages_lw \
1199 || FATAL "failed to install $need_packages_lw"
1200 fi
1201 track prereqok
1202
1203 [ -z "$INSTALL_NOJUJU" ] && install_juju
1204 track juju_install
1205
1206 if [ -z "$OSM_VCA_HOST" ]; then
1207 juju_createcontroller
1208 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1209 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1210 fi
1211 track juju_controller
1212
1213 if [ -z "$OSM_VCA_SECRET" ]; then
1214 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1215 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1216 fi
1217 if [ -z "$OSM_VCA_PUBKEY" ]; then
1218 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1219 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1220 fi
1221 if [ -z "$OSM_VCA_APIPROXY" ]; then
1222 OSM_VCA_APIPROXY=$DEFAULT_IP
1223 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1224 fi
1225 juju_createproxy
1226
1227 if [ -z "$OSM_VCA_CACERT" ]; then
1228 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
1229 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1230 fi
1231 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1232 OSM_DATABASE_COMMONKEY=$(generate_secret)
1233 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1234 fi
1235 track juju
1236
1237 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1238 track docker_ce
1239
1240 #Installs Kubernetes and deploys osm services
1241 if [ -n "$KUBERNETES" ]; then
1242 install_kube
1243 track install_k8s
1244 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1245 kube_config_dir
1246 track init_k8s
1247 else
1248 #install_docker_compose
1249 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1250 track docker_swarm
1251 fi
1252
1253 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1254 track docker_build
1255
1256 generate_docker_env_files
1257
1258 if [ -n "$KUBERNETES" ]; then
1259 if [ -n "$K8S_MONITOR" ]; then
1260 # uninstall OSM MONITORING
1261 uninstall_k8s_monitoring
1262 fi
1263 #remove old namespace
1264 remove_k8s_namespace $OSM_STACK_NAME
1265 deploy_cni_provider
1266 kube_secrets
1267 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml $OSM_DOCKER_TAG
1268 namespace_vol
1269 deploy_osm_services
1270 track deploy_osm_services_k8s
1271 else
1272 # remove old stack
1273 remove_stack $OSM_STACK_NAME
1274 create_docker_network
1275 deploy_lightweight
1276 generate_osmclient_script
1277 track docker_deploy
1278 install_prometheus_nodeexporter
1279 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1280 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1281 fi
1282
1283 if [ -n "$KUBERNETES" ] && [ -n "$K8S_MONITOR" ]; then
1284 # install OSM MONITORING
1285 install_k8s_monitoring
1286 track install_k8s_monitoring
1287 fi
1288
1289 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1290 track osmclient
1291
1292 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1293 track end
1294 return 0
1295 }
1296
1297 function install_vimemu() {
1298 echo "\nInstalling vim-emu"
1299 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1300 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1301 # clone vim-emu repository (attention: branch is currently master only)
1302 echo "Cloning vim-emu repository ..."
1303 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1304 # build vim-emu docker
1305 echo "Building vim-emu Docker container..."
1306
1307 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1308 # start vim-emu container as daemon
1309 echo "Starting vim-emu Docker container 'vim-emu' ..."
1310 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1311 # in lightweight mode, the emulator needs to be attached to netOSM
1312 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1313 else
1314 # classic build mode
1315 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1316 fi
1317 echo "Waiting for 'vim-emu' container to start ..."
1318 sleep 5
1319 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1320 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1321 # print vim-emu connection info
1322 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1323 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1324 echo -e "To add the emulated VIM to OSM you should do:"
1325 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1326 }
1327
1328 function install_k8s_monitoring() {
1329 # install OSM monitoring
1330 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1331 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1332 }
1333
1334 function uninstall_k8s_monitoring() {
1335 # install OSM monitoring
1336 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1337 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1338 }
1339
1340 function dump_vars(){
1341 echo "DEVELOP=$DEVELOP"
1342 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1343 echo "UNINSTALL=$UNINSTALL"
1344 echo "NAT=$NAT"
1345 echo "UPDATE=$UPDATE"
1346 echo "RECONFIGURE=$RECONFIGURE"
1347 echo "TEST_INSTALLER=$TEST_INSTALLER"
1348 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1349 echo "INSTALL_LXD=$INSTALL_LXD"
1350 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1351 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1352 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1353 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1354 echo "INSTALL_ONLY=$INSTALL_ONLY"
1355 echo "INSTALL_ELK=$INSTALL_ELK"
1356 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1357 echo "TO_REBUILD=$TO_REBUILD"
1358 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1359 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1360 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1361 echo "RELEASE=$RELEASE"
1362 echo "REPOSITORY=$REPOSITORY"
1363 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1364 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1365 echo "NOCONFIGURE=$NOCONFIGURE"
1366 echo "OSM_DEVOPS=$OSM_DEVOPS"
1367 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1368 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1369 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1370 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1371 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1372 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1373 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1374 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1375 echo "DOCKER_USER=$DOCKER_USER"
1376 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1377 echo "PULL_IMAGES=$PULL_IMAGES"
1378 echo "KUBERNETES=$KUBERNETES"
1379 echo "SHOWOPTS=$SHOWOPTS"
1380 echo "Install from specific refspec (-b): $COMMIT_ID"
1381 }
1382
1383 function track(){
1384 ctime=`date +%s`
1385 duration=$((ctime - SESSION_ID))
1386 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1387 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1388 event_name="bin"
1389 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1390 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1391 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1392 event_name="${event_name}_$1"
1393 url="${url}&event=${event_name}&ce_duration=${duration}"
1394 wget -q -O /dev/null $url
1395 }
1396
1397 UNINSTALL=""
1398 DEVELOP=""
1399 NAT=""
1400 UPDATE=""
1401 RECONFIGURE=""
1402 TEST_INSTALLER=""
1403 INSTALL_LXD=""
1404 SHOWOPTS=""
1405 COMMIT_ID=""
1406 ASSUME_YES=""
1407 INSTALL_FROM_SOURCE=""
1408 RELEASE="ReleaseSIX"
1409 REPOSITORY="stable"
1410 INSTALL_VIMEMU=""
1411 INSTALL_FROM_LXDIMAGES=""
1412 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1413 LXD_REPOSITORY_PATH=""
1414 INSTALL_LIGHTWEIGHT="y"
1415 INSTALL_ONLY=""
1416 INSTALL_ELK=""
1417 #INSTALL_PERFMON=""
1418 TO_REBUILD=""
1419 INSTALL_NOLXD=""
1420 INSTALL_NODOCKER=""
1421 INSTALL_NOJUJU=""
1422 KUBERNETES=""
1423 K8S_MONITOR=""
1424 INSTALL_NOHOSTCLIENT=""
1425 NOCONFIGURE=""
1426 RELEASE_DAILY=""
1427 SESSION_ID=`date +%s`
1428 OSM_DEVOPS=
1429 OSM_VCA_HOST=
1430 OSM_VCA_SECRET=
1431 OSM_VCA_PUBKEY=
1432 OSM_STACK_NAME=osm
1433 NO_HOST_PORTS=""
1434 DOCKER_NOBUILD=""
1435 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1436 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1437 WORKDIR_SUDO=sudo
1438 OSM_WORK_DIR="/etc/osm"
1439 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1440 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1441 OSM_HOST_VOL="/var/lib/osm"
1442 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1443 OSM_DOCKER_TAG=latest
1444 DOCKER_USER=opensourcemano
1445 PULL_IMAGES="y"
1446 KAFKA_TAG=2.11-1.0.2
1447 PROMETHEUS_TAG=v2.4.3
1448 GRAFANA_TAG=latest
1449 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1450 PROMETHEUS_CADVISOR_TAG=latest
1451 KEYSTONEDB_TAG=10
1452 OSM_DATABASE_COMMONKEY=
1453 ELASTIC_VERSION=6.4.2
1454 ELASTIC_CURATOR_VERSION=5.5.4
1455 POD_NETWORK_CIDR=10.244.0.0/16
1456 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1457 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1458
1459 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o; do
1460 case "${o}" in
1461 h)
1462 usage && exit 0
1463 ;;
1464 b)
1465 COMMIT_ID=${OPTARG}
1466 PULL_IMAGES=""
1467 ;;
1468 r)
1469 REPOSITORY="${OPTARG}"
1470 REPO_ARGS+=(-r "$REPOSITORY")
1471 ;;
1472 c)
1473 [ "${OPTARG}" == "swarm" ] && continue
1474 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1475 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1476 usage && exit 1
1477 ;;
1478 R)
1479 RELEASE="${OPTARG}"
1480 REPO_ARGS+=(-R "$RELEASE")
1481 ;;
1482 k)
1483 REPOSITORY_KEY="${OPTARG}"
1484 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1485 ;;
1486 u)
1487 REPOSITORY_BASE="${OPTARG}"
1488 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1489 ;;
1490 U)
1491 DOCKER_USER="${OPTARG}"
1492 ;;
1493 l)
1494 LXD_REPOSITORY_BASE="${OPTARG}"
1495 ;;
1496 p)
1497 LXD_REPOSITORY_PATH="${OPTARG}"
1498 ;;
1499 D)
1500 OSM_DEVOPS="${OPTARG}"
1501 ;;
1502 s)
1503 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1504 ;;
1505 H)
1506 OSM_VCA_HOST="${OPTARG}"
1507 ;;
1508 S)
1509 OSM_VCA_SECRET="${OPTARG}"
1510 ;;
1511 P)
1512 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1513 ;;
1514 A)
1515 OSM_VCA_APIPROXY="${OPTARG}"
1516 ;;
1517 w)
1518 # when specifying workdir, do not use sudo for access
1519 WORKDIR_SUDO=
1520 OSM_WORK_DIR="${OPTARG}"
1521 ;;
1522 t)
1523 OSM_DOCKER_TAG="${OPTARG}"
1524 ;;
1525 o)
1526 INSTALL_ONLY="y"
1527 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1528 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1529 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1530 ;;
1531 m)
1532 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1533 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1534 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1535 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1536 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1537 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1538 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1539 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1540 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1541 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1542 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1543 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1544 ;;
1545 -)
1546 [ "${OPTARG}" == "help" ] && usage && exit 0
1547 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1548 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1549 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1550 [ "${OPTARG}" == "nat" ] && NAT="y" && continue
1551 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1552 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1553 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1554 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1555 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1556 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1557 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue
1558 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1559 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue
1560 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1561 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1562 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1563 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue
1564 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1565 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue
1566 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1567 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1568 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1569 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1570 [ "${OPTARG}" == "pullimages" ] && continue
1571 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR="y" && continue
1572 echo -e "Invalid option: '--$OPTARG'\n" >&2
1573 usage && exit 1
1574 ;;
1575 \?)
1576 echo -e "Invalid option: '-$OPTARG'\n" >&2
1577 usage && exit 1
1578 ;;
1579 y)
1580 ASSUME_YES="y"
1581 ;;
1582 *)
1583 usage && exit 1
1584 ;;
1585 esac
1586 done
1587
1588 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui"
1589 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui"
1590 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui"
1591 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --daily can only be used with --soui"
1592 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nolxd cannot be used with --soui"
1593 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nodocker cannot be used with --soui"
1594 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: -m cannot be used with --soui"
1595 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1596
1597 if [ -n "$SHOWOPTS" ]; then
1598 dump_vars
1599 exit 0
1600 fi
1601
1602 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master"
1603
1604 # if develop, we force master
1605 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1606
1607 need_packages="git jq wget curl tar"
1608 echo -e "Checking required packages: $need_packages"
1609 dpkg -l $need_packages &>/dev/null \
1610 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1611 || sudo apt-get update \
1612 || FATAL "failed to run apt-get update"
1613 dpkg -l $need_packages &>/dev/null \
1614 || ! echo -e "Installing $need_packages requires root privileges." \
1615 || sudo apt-get install -y $need_packages \
1616 || FATAL "failed to install $need_packages"
1617
1618 if [ -z "$OSM_DEVOPS" ]; then
1619 if [ -n "$TEST_INSTALLER" ]; then
1620 echo -e "\nUsing local devops repo for OSM installation"
1621 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1622 else
1623 echo -e "\nCreating temporary dir for OSM installation"
1624 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1625 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1626
1627 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1628
1629 if [ -z "$COMMIT_ID" ]; then
1630 echo -e "\nGuessing the current stable release"
1631 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1632 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1633
1634 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1635 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1636 else
1637 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1638 fi
1639 git -C $OSM_DEVOPS checkout $COMMIT_ID
1640 fi
1641 fi
1642
1643 . $OSM_DEVOPS/common/all_funcs
1644
1645 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1646 [ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0
1647 [ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0
1648 [ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0
1649 [ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0
1650 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1651 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1652 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1653 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1654
1655 #Installation starts here
1656 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README.txt &> /dev/null
1657 track start
1658
1659 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1660 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1661 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1662 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1663 fi
1664
1665 echo -e "Checking required packages: lxd"
1666 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1667 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1668
1669 # use local devops for containers
1670 export OSM_USE_LOCAL_DEVOPS=true
1671 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1672 echo -e "\nCreating the containers and building from source ..."
1673 $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')"
1674 ro_is_up && track RO
1675 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed"
1676 vca_is_up && track VCA
1677 $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed"
1678 mon_is_up && track MON
1679 $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')"
1680 $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')"
1681 #so_is_up && track SOUI
1682 track SOUI
1683 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1684 echo -e "\nInstalling from lxd images ..."
1685 install_from_lxdimages
1686 else #install from binaries
1687 echo -e "\nCreating the containers and installing from binaries ..."
1688 $OSM_DEVOPS/jenkins/host/install RO ${REPO_ARGS[@]} || FATAL "RO install failed"
1689 ro_is_up && track RO
1690 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed"
1691 vca_is_up && track VCA
1692 $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed"
1693 mon_is_up && track MON
1694 $OSM_DEVOPS/jenkins/host/install SO ${REPO_ARGS[@]} || FATAL "SO install failed"
1695 $OSM_DEVOPS/jenkins/host/install UI ${REPO_ARGS[@]} || FATAL "UI install failed"
1696 #so_is_up && track SOUI
1697 track SOUI
1698 fi
1699
1700 #Install iptables-persistent and configure NAT rules
1701 [ -z "$NOCONFIGURE" ] && nat
1702
1703 #Configure components
1704 [ -z "$NOCONFIGURE" ] && configure
1705
1706 #Install osmclient
1707 [ -z "$NOCONFIGURE" ] && install_osmclient
1708
1709 #Install vim-emu (optional)
1710 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1711
1712 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1713 track end
1714 echo -e "\nDONE"