0f8b1772e0425fd6a48c690605026f1833400f95
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
54 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
57 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
58 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
59 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
60 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
61 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
62 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
63 echo -e " --showopts: print chosen options and exit (only for debugging)"
64 echo -e " -y: do not prompt for confirmation, assumes yes"
65 echo -e " -h / --help: print this help"
66 }
67
68 #Uninstall OSM: remove containers
69 function uninstall(){
70 echo -e "\nUninstalling OSM"
71 if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
72 $OSM_DEVOPS/jenkins/host/clean_container RO
73 $OSM_DEVOPS/jenkins/host/clean_container VCA
74 $OSM_DEVOPS/jenkins/host/clean_container MON
75 $OSM_DEVOPS/jenkins/host/clean_container SO
76 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 else
78 lxc stop RO && lxc delete RO
79 lxc stop VCA && lxc delete VCA
80 lxc stop MON && lxc delete MON
81 lxc stop SO-ub && lxc delete SO-ub
82 fi
83 echo -e "\nDeleting imported lxd images if they exist"
84 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
85 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
86 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
87 return 0
88 }
89
90 # takes a juju/accounts.yaml file and returns the password specific
91 # for a controller. I wrote this using only bash tools to minimize
92 # additions of other packages
93 function parse_juju_password {
94 password_file="${HOME}/.local/share/juju/accounts.yaml"
95 local controller_name=$1
96 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
97 sed -ne "s|^\($s\):|\1|" \
98 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
99 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
100 awk -F$fs -v controller=$controller_name '{
101 indent = length($1)/2;
102 vname[indent] = $2;
103 for (i in vname) {if (i > indent) {delete vname[i]}}
104 if (length($3) > 0) {
105 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
106 if (match(vn,controller) && match($2,"password")) {
107 printf("%s",$3);
108 }
109 }
110 }'
111 }
112
113 function generate_secret() {
114 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
115 }
116
117 function remove_volumes() {
118 if [ -n "$KUBERNETES" ]; then
119 k8_volume=$1
120 echo "Removing ${k8_volume}"
121 $WORKDIR_SUDO rm -rf ${k8_volume}
122 else
123 stack=$1
124 volumes="mongo_db mon_db osm_packages ro_db"
125 for volume in $volumes; do
126 sg docker -c "docker volume rm ${stack}_${volume}"
127 done
128 fi
129 }
130
131 function remove_network() {
132 stack=$1
133 sg docker -c "docker network rm net${stack}"
134 }
135
136 function remove_iptables() {
137 stack=$1
138 if [ -z "$OSM_VCA_HOST" ]; then
139 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
140 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
141 fi
142
143 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
144 sudo iptables -t nat -D PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
145 sudo netfilter-persistent save
146 fi
147 }
148
149 function remove_stack() {
150 stack=$1
151 if sg docker -c "docker stack ps ${stack}" ; then
152 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
153 COUNTER=0
154 result=1
155 while [ ${COUNTER} -lt 30 ]; do
156 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
157 #echo "Dockers running: $result"
158 if [ "${result}" == "0" ]; then
159 break
160 fi
161 let COUNTER=COUNTER+1
162 sleep 1
163 done
164 if [ "${result}" == "0" ]; then
165 echo "All dockers of the stack ${stack} were removed"
166 else
167 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
168 fi
169 sleep 5
170 fi
171 }
172
173 #removes osm deployments and services
174 function remove_k8s_namespace() {
175 kubectl delete ns $1
176 }
177
178 #Uninstall lightweight OSM: remove dockers
179 function uninstall_lightweight() {
180 if [ -n "$INSTALL_ONLY" ]; then
181 if [ -n "$INSTALL_ELK" ]; then
182 echo -e "\nUninstalling OSM ELK stack"
183 remove_stack osm_elk
184 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
185 fi
186 else
187 echo -e "\nUninstalling OSM"
188 if [ -n "$KUBERNETES" ]; then
189 if [ -n "$K8S_MONITOR" ]; then
190 # uninstall OSM MONITORING
191 uninstall_k8s_monitoring
192 fi
193 remove_k8s_namespace $OSM_STACK_NAME
194 else
195
196 remove_stack $OSM_STACK_NAME
197 remove_stack osm_elk
198 fi
199 echo "Now osm docker images and volumes will be deleted"
200 newgrp docker << EONG
201 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
208 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
209 EONG
210
211 if [ -n "$KUBERNETES" ]; then
212 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
213 remove_volumes $OSM_NAMESPACE_VOL
214 else
215 remove_volumes $OSM_STACK_NAME
216 remove_network $OSM_STACK_NAME
217 fi
218 remove_iptables $OSM_STACK_NAME
219 echo "Removing $OSM_DOCKER_WORK_DIR"
220 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
221 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
222 fi
223 echo "Some docker images will be kept in case they are used by other docker stacks"
224 echo "To remove them, just run 'docker image prune' in a terminal"
225 return 0
226 }
227
228 #Configure NAT rules, based on the current IP addresses of containers
229 function nat(){
230 echo -e "\nChecking required packages: iptables-persistent"
231 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
232 sudo apt-get -yq install iptables-persistent
233 echo -e "\nConfiguring NAT rules"
234 echo -e " Required root privileges"
235 sudo $OSM_DEVOPS/installers/nat_osm
236 }
237
238 function FATAL(){
239 echo "FATAL error: Cannot install OSM due to \"$1\""
240 exit 1
241 }
242
243 #Update RO, SO and UI:
244 function update(){
245 echo -e "\nUpdating components"
246
247 echo -e " Updating RO"
248 CONTAINER="RO"
249 MDG="RO"
250 INSTALL_FOLDER="/opt/openmano"
251 echo -e " Fetching the repo"
252 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
253 BRANCH=""
254 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
255 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
256 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
257 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
258 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
259 # COMMIT_ID either was previously set with -b option, or is an empty string
260 CHECKOUT_ID=$COMMIT_ID
261 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
262 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
263 if [[ $CHECKOUT_ID == "tags/"* ]]; then
264 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
265 else
266 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
267 fi
268 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
269 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
270 echo " Nothing to be done."
271 else
272 echo " Update required."
273 lxc exec $CONTAINER -- service osm-ro stop
274 lxc exec $CONTAINER -- git -C /opt/openmano stash
275 lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
276 lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
277 lxc exec $CONTAINER -- git -C /opt/openmano stash pop
278 lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
279 lxc exec $CONTAINER -- service osm-ro start
280 fi
281 echo
282
283 echo -e " Updating SO and UI"
284 CONTAINER="SO-ub"
285 MDG="SO"
286 INSTALL_FOLDER="" # To be filled in
287 echo -e " Fetching the repo"
288 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
289 BRANCH=""
290 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
291 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
292 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
293 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
294 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
295 # COMMIT_ID either was previously set with -b option, or is an empty string
296 CHECKOUT_ID=$COMMIT_ID
297 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
298 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
299 if [[ $CHECKOUT_ID == "tags/"* ]]; then
300 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
301 else
302 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
303 fi
304 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
305 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
306 echo " Nothing to be done."
307 else
308 echo " Update required."
309 # Instructions to be added
310 # lxc exec SO-ub -- ...
311 fi
312 echo
313 echo -e "Updating MON Container"
314 CONTAINER="MON"
315 MDG="MON"
316 INSTALL_FOLDER="/root/MON"
317 echo -e " Fetching the repo"
318 lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
319 BRANCH=""
320 BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
321 [ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
322 CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
323 CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
324 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
325 # COMMIT_ID either was previously set with -b option, or is an empty string
326 CHECKOUT_ID=$COMMIT_ID
327 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
328 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
329 if [[ $CHECKOUT_ID == "tags/"* ]]; then
330 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
331 else
332 REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
333 fi
334 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
335 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
336 echo " Nothing to be done."
337 else
338 echo " Update required."
339 fi
340 echo
341 }
342
343 function so_is_up() {
344 if [ -n "$1" ]; then
345 SO_IP=$1
346 else
347 SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
348 fi
349 time=0
350 step=5
351 timelength=300
352 while [ $time -le $timelength ]
353 do
354 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
355 -H 'accept: application/vnd.yang.data+json' \
356 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
357 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
358 then
359 echo "RW.Restconf running....SO is up"
360 return 0
361 fi
362
363 sleep $step
364 echo -n "."
365 time=$((time+step))
366 done
367
368 FATAL "OSM Failed to startup. SO failed to startup"
369 }
370
371 function vca_is_up() {
372 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
373 echo "VCA is up and running"
374 return 0
375 fi
376
377 FATAL "OSM Failed to startup. VCA failed to startup"
378 }
379
380 function mon_is_up() {
381 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
382 echo "MON is up and running"
383 return 0
384 fi
385
386 FATAL "OSM Failed to startup. MON failed to startup"
387 }
388
389 function ro_is_up() {
390 if [ -n "$1" ]; then
391 RO_IP=$1
392 else
393 RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
394 fi
395 time=0
396 step=2
397 timelength=20
398 while [ $time -le $timelength ]; do
399 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
400 echo "RO is up and running"
401 return 0
402 fi
403 sleep $step
404 echo -n "."
405 time=$((time+step))
406 done
407
408 FATAL "OSM Failed to startup. RO failed to startup"
409 }
410
411
412 function configure_RO(){
413 . $OSM_DEVOPS/installers/export_ips
414 echo -e " Configuring RO"
415 lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
416 lxc exec RO -- service osm-ro restart
417
418 ro_is_up
419
420 lxc exec RO -- openmano tenant-delete -f osm >/dev/null
421 lxc exec RO -- openmano tenant-create osm > /dev/null
422 lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
423 lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
424 lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
425 }
426
427 function configure_VCA(){
428 echo -e " Configuring VCA"
429 JUJU_PASSWD=$(generate_secret)
430 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
431 }
432
433 function configure_SOUI(){
434 . $OSM_DEVOPS/installers/export_ips
435 JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
436 RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
437
438 echo -e " Configuring MON"
439 #Information to be added about SO socket for logging
440
441 echo -e " Configuring SO"
442 sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
443 sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
444 sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
445 sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
446 # make journaling persistent
447 lxc exec SO-ub -- mkdir -p /var/log/journal
448 lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
449 lxc exec SO-ub -- systemctl restart systemd-journald
450
451 echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
452
453 lxc exec SO-ub -- systemctl restart launchpad
454
455 so_is_up $SO_CONTAINER_IP
456
457 #delete existing config agent (could be there on reconfigure)
458 curl -k --request DELETE \
459 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
460 --header 'accept: application/vnd.yang.data+json' \
461 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
462 --header 'cache-control: no-cache' \
463 --header 'content-type: application/vnd.yang.data+json' &> /dev/null
464
465 result=$(curl -k --request POST \
466 --url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
467 --header 'accept: application/vnd.yang.data+json' \
468 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
469 --header 'cache-control: no-cache' \
470 --header 'content-type: application/vnd.yang.data+json' \
471 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
472 [[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
473
474 #R1/R2 config line
475 #result=$(curl -k --request PUT \
476 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
477 # --header 'accept: application/vnd.yang.data+json' \
478 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
479 # --header 'cache-control: no-cache' \
480 # --header 'content-type: application/vnd.yang.data+json' \
481 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
482
483 result=$(curl -k --request PUT \
484 --url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
485 --header 'accept: application/vnd.yang.data+json' \
486 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
487 --header 'cache-control: no-cache' \
488 --header 'content-type: application/vnd.yang.data+json' \
489 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
490 [[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
491
492 result=$(curl -k --request PATCH \
493 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
494 --header 'accept: application/vnd.yang.data+json' \
495 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
496 --header 'cache-control: no-cache' \
497 --header 'content-type: application/vnd.yang.data+json' \
498 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
499 [[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
500
501 result=$(curl -k --request PATCH \
502 --url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
503 --header 'accept: application/vnd.yang.data+json' \
504 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
505 --header 'cache-control: no-cache' \
506 --header 'content-type: application/vnd.yang.data+json' \
507 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
508 [[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
509
510 lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
511 auto lo:1
512 iface lo:1 inet static
513 address $DEFAULT_IP
514 netmask 255.255.255.255
515 EOF
516 lxc exec SO-ub ifup lo:1
517 }
518
519 #Configure RO, VCA, and SO with the initial configuration:
520 # RO -> tenant:osm, logs to be sent to SO
521 # VCA -> juju-password
522 # SO -> route to Juju Controller, add RO account, add VCA account
523 function configure(){
524 #Configure components
525 echo -e "\nConfiguring components"
526 configure_RO
527 configure_VCA
528 configure_SOUI
529 }
530
531 function install_lxd() {
532 sudo apt-get update
533 sudo apt-get install -y lxd
534 newgrp lxd
535 lxd init --auto
536 lxd waitready
537 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
538 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
539 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
540 lxc profile device set default eth0 mtu $DEFAULT_MTU
541 #sudo systemctl stop lxd-bridge
542 #sudo systemctl --system daemon-reload
543 #sudo systemctl enable lxd-bridge
544 #sudo systemctl start lxd-bridge
545 }
546
547 function ask_user(){
548 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
549 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
550 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
551 read -e -p "$1" USER_CONFIRMATION
552 while true ; do
553 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
554 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
555 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
556 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
557 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
558 done
559 }
560
561 function launch_container_from_lxd(){
562 export OSM_MDG=$1
563 OSM_load_config
564 export OSM_BASE_IMAGE=$2
565 if ! container_exists $OSM_BUILD_CONTAINER; then
566 CONTAINER_OPTS=""
567 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
568 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
569 create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
570 wait_container_up $OSM_BUILD_CONTAINER
571 fi
572 }
573
574 function install_osmclient(){
575 CLIENT_RELEASE=${RELEASE#"-R "}
576 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
577 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
578 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
579 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
580 curl $key_location | sudo apt-key add -
581 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
582 sudo apt-get update
583 sudo apt-get install -y python3-pip
584 sudo -H LC_ALL=C python3 -m pip install -U pip
585 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind
586 sudo apt-get install -y python3-osm-im python3-osmclient
587 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
588 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
589 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
590 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
591 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
592 echo -e "\nOSM client installed"
593 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
594 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
595 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
596 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
597 else
598 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
599 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
600 echo " export OSM_HOSTNAME=<OSM_host>"
601 fi
602 return 0
603 }
604
605 function install_prometheus_nodeexporter(){
606 if (systemctl -q is-active node_exporter)
607 then
608 echo "Node Exporter is already running."
609 else
610 echo "Node Exporter is not active, installing..."
611 if getent passwd node_exporter > /dev/null 2>&1; then
612 echo "node_exporter user exists"
613 else
614 echo "Creating user node_exporter"
615 sudo useradd --no-create-home --shell /bin/false node_exporter
616 fi
617 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
618 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
619 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
620 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
621 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
622 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
623 sudo systemctl daemon-reload
624 sudo systemctl restart node_exporter
625 sudo systemctl enable node_exporter
626 echo "Node Exporter has been activated in this host."
627 fi
628 return 0
629 }
630
631 function uninstall_prometheus_nodeexporter(){
632 sudo systemctl stop node_exporter
633 sudo systemctl disable node_exporter
634 sudo rm /etc/systemd/system/node_exporter.service
635 sudo systemctl daemon-reload
636 sudo userdel node_exporter
637 sudo rm /usr/local/bin/node_exporter
638 return 0
639 }
640
641 function install_from_lxdimages(){
642 LXD_RELEASE=${RELEASE#"-R "}
643 if [ -n "$LXD_REPOSITORY_PATH" ]; then
644 LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
645 else
646 LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
647 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
648 fi
649 echo -e "\nDeleting previous lxd images if they exist"
650 lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
651 lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
652 lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
653 echo -e "\nImporting osm-ro"
654 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
655 lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
656 rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
657 echo -e "\nImporting osm-vca"
658 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
659 lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
660 rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
661 echo -e "\nImporting osm-soui"
662 [ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
663 lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
664 rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
665 launch_container_from_lxd RO osm-ro
666 ro_is_up && track RO
667 launch_container_from_lxd VCA osm-vca
668 vca_is_up && track VCA
669 launch_container_from_lxd MON osm-mon
670 mon_is_up && track MON
671 launch_container_from_lxd SO osm-soui
672 #so_is_up && track SOUI
673 track SOUI
674 }
675
676 function install_docker_ce() {
677 # installs and configures Docker CE
678 echo "Installing Docker CE ..."
679 sudo apt-get -qq update
680 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
681 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
682 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
683 sudo apt-get -qq update
684 sudo apt-get install -y docker-ce
685 echo "Adding user to group 'docker'"
686 sudo groupadd -f docker
687 sudo usermod -aG docker $USER
688 sleep 2
689 sudo service docker restart
690 echo "... restarted Docker service"
691 sg docker -c "docker version" || FATAL "Docker installation failed"
692 echo "... Docker CE installation done"
693 return 0
694 }
695
696 function install_docker_compose() {
697 # installs and configures docker-compose
698 echo "Installing Docker Compose ..."
699 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
700 sudo chmod +x /usr/local/bin/docker-compose
701 echo "... Docker Compose installation done"
702 }
703
704 function install_juju() {
705 echo "Installing juju"
706 sudo snap install juju --classic
707 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
708 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
709 echo "Finished installation of juju"
710 return 0
711 }
712
713 function juju_createcontroller() {
714 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
715 # Not found created, create the controller
716 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
717 fi
718 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
719 }
720
721 function juju_createproxy() {
722 echo -e "\nChecking required packages: iptables-persistent"
723 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
724 sudo apt-get -yq install iptables-persistent
725
726 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
727 sudo iptables -t nat -A PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
728 sudo netfilter-persistent save
729 fi
730 }
731
732 function generate_docker_images() {
733 echo "Pulling and generating docker images"
734 _build_from=$COMMIT_ID
735 [ -z "$_build_from" ] && _build_from="master"
736
737 echo "OSM Docker images generated from $_build_from"
738
739 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
740 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
741 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
742 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
743
744 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
745 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
746 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
747 fi
748
749 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
750 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
751 fi
752
753 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
754 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
755 fi
756
757 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
758 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
759 fi
760
761 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
762 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
763 fi
764
765 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
766 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
767 fi
768
769 if [ -n "$PULL_IMAGES" ]; then
770 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
771 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
772 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
773 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
774 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
775 fi
776
777 if [ -n "$PULL_IMAGES" ]; then
778 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
779 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
780 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
781 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
782 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
783 fi
784
785 if [ -n "$PULL_IMAGES" ]; then
786 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
787 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
788 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
789 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
790 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
791 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
792 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
793 fi
794
795 if [ -n "$PULL_IMAGES" ]; then
796 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
797 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
798 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
799 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
800 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
801 fi
802
803 if [ -n "$PULL_IMAGES" ]; then
804 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
805 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
806 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
807 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
808 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
809 fi
810
811 if [ -n "$PULL_IMAGES" ]; then
812 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
813 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
814 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
815 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
816 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
817 fi
818
819 if [ -n "$PULL_IMAGES" ]; then
820 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
821 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
822 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
823 fi
824
825 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
826 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
827 fi
828
829 echo "Finished generation of docker images"
830 }
831
832 function cmp_overwrite() {
833 file1="$1"
834 file2="$2"
835 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
836 if [ -f "${file2}" ]; then
837 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
838 else
839 cp -b ${file1} ${file2}
840 fi
841 fi
842 }
843
844 function generate_docker_env_files() {
845 echo "Doing a backup of existing env files"
846 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
847 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
848 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
849 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
850 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
851 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
852 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
853 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
854 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
855
856 echo "Generating docker env files"
857 if [ -n "$KUBERNETES" ]; then
858 #Kubernetes resources
859 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
860 else
861 # Docker-compose
862 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
863
864 # Prometheus
865 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
866
867 # Grafana & Prometheus Exporter files
868 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
869 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
870 fi
871
872 # LCM
873 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
874 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
875 fi
876
877 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
878 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
879 else
880 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
881 fi
882
883 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
884 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
885 else
886 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
887 fi
888
889 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
890 echo "OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
891 else
892 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"|g" $OSM_DOCKER_WORK_DIR/lcm.env
893 fi
894
895 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
896 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
897 else
898 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
899 fi
900
901 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
902 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
903 else
904 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
905 fi
906
907 # RO
908 MYSQL_ROOT_PASSWORD=$(generate_secret)
909 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
910 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
911 fi
912 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
913 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
914 fi
915
916 # Keystone
917 KEYSTONE_DB_PASSWORD=$(generate_secret)
918 SERVICE_PASSWORD=$(generate_secret)
919 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
920 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
921 fi
922 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
923 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
924 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
925 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
926 fi
927
928 # NBI
929 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
930 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
931 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
932 fi
933
934 # MON
935 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
936 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
937 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
938 fi
939
940 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
941 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
942 else
943 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
944 fi
945
946 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
947 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
948 else
949 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
950 fi
951
952 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
953 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
954 else
955 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
956 fi
957
958 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
959 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
960 else
961 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
962 fi
963
964
965 # POL
966 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
967 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
968 fi
969
970 # LW-UI
971 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
972 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
973 fi
974
975 echo "Finished generation of docker env files"
976 }
977
978 function generate_osmclient_script () {
979 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
980 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
981 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
982 }
983
984 #installs kubernetes packages
985 function install_kube() {
986 sudo apt-get update && sudo apt-get install -y apt-transport-https
987 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
988 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
989 sudo apt-get update
990 echo "Installing Kubernetes Packages ..."
991 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
992 }
993
994 #initializes kubernetes control plane
995 function init_kubeadm() {
996 sudo swapoff -a
997 sudo kubeadm init --config $1
998 sleep 5
999 }
1000
1001 function kube_config_dir() {
1002 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
1003 mkdir -p $HOME/.kube
1004 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
1005 sudo chown $(id -u):$(id -g) $HOME/.kube/config
1006 }
1007
1008 #deploys flannel as daemonsets
1009 function deploy_cni_provider() {
1010 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
1011 trap 'rm -rf "${CNI_DIR}"' EXIT
1012 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
1013 kubectl apply -f $CNI_DIR
1014 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
1015 }
1016
1017 #creates secrets from env files which will be used by containers
1018 function kube_secrets(){
1019 kubectl create ns $OSM_STACK_NAME
1020 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
1021 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
1022 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
1023 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
1024 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
1025 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
1026 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
1027 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
1028 }
1029
1030 #deploys osm pods and services
1031 function deploy_osm_services() {
1032 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
1033 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1034 sleep 5
1035 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1036 }
1037
1038 function parse_yaml() {
1039 osm_services="nbi lcm ro pol mon light-ui keystone"
1040 TAG=$1
1041 for osm in $osm_services; do
1042 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
1043 done
1044 }
1045
1046 function namespace_vol() {
1047 osm_services="nbi lcm ro pol mon kafka mongo mysql"
1048 for osm in $osm_services; do
1049 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1050 done
1051 }
1052
1053 function init_docker_swarm() {
1054 if [ "${DEFAULT_MTU}" != "1500" ]; then
1055 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1056 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1057 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1058 fi
1059 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1060 return 0
1061 }
1062
1063 function create_docker_network() {
1064 echo "creating network"
1065 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1066 echo "creating network DONE"
1067 }
1068
1069 function deploy_lightweight() {
1070
1071 echo "Deploying lightweight build"
1072 OSM_NBI_PORT=9999
1073 OSM_RO_PORT=9090
1074 OSM_KEYSTONE_PORT=5000
1075 OSM_UI_PORT=80
1076 OSM_MON_PORT=8662
1077 OSM_PROM_PORT=9090
1078 OSM_PROM_CADVISOR_PORT=8080
1079 OSM_PROM_HOSTPORT=9091
1080 OSM_GRAFANA_PORT=3000
1081 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1082 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1083
1084 if [ -n "$NO_HOST_PORTS" ]; then
1085 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1086 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1087 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1088 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1089 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1090 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1091 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1092 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1093 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1094 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1095 else
1096 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1097 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1098 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1099 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1100 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1101 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1102 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1103 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1104 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1105 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1106 fi
1107 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1108 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1109 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1110 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1111 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1112 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1113 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1114 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1115 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1116
1117 pushd $OSM_DOCKER_WORK_DIR
1118 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1119 popd
1120
1121 echo "Finished deployment of lightweight build"
1122 }
1123
1124 function deploy_elk() {
1125 echo "Pulling docker images for ELK"
1126 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1127 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1128 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1129 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1130 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1131 echo "Finished pulling elk docker images"
1132 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1133 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1134 remove_stack osm_elk
1135 echo "Deploying ELK stack"
1136 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1137 echo "Waiting for ELK stack to be up and running"
1138 time=0
1139 step=5
1140 timelength=40
1141 elk_is_up=1
1142 while [ $time -le $timelength ]; do
1143 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1144 elk_is_up=0
1145 break
1146 fi
1147 sleep $step
1148 time=$((time+step))
1149 done
1150 if [ $elk_is_up -eq 0 ]; then
1151 echo "ELK is up and running. Trying to create index pattern..."
1152 #Create index pattern
1153 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1154 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1155 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1156 #Make it the default index
1157 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1158 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1159 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1160 else
1161 echo "Cannot connect to Kibana to create index pattern."
1162 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1163 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1164 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1165 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1166 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1167 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1168 -d"{\"value\":\"filebeat-*\"}"'
1169 fi
1170 echo "Finished deployment of ELK stack"
1171 return 0
1172 }
1173
1174 function install_lightweight() {
1175 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1176 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1177 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1178 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1179
1180 track checkingroot
1181 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1182 track noroot
1183
1184 if [ -n "$KUBERNETES" ]; then
1185 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1186 1. Install and configure LXD
1187 2. Install juju
1188 3. Install docker CE
1189 4. Disable swap space
1190 5. Install and initialize Kubernetes
1191 as pre-requirements.
1192 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1193
1194 else
1195 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1196 fi
1197 track proceed
1198
1199 echo "Installing lightweight build of OSM"
1200 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1201 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1202 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1203 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1204 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1205 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1206 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1207
1208 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1209 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1210 need_packages_lw="lxd snapd"
1211 echo -e "Checking required packages: $need_packages_lw"
1212 dpkg -l $need_packages_lw &>/dev/null \
1213 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1214 || sudo apt-get update \
1215 || FATAL "failed to run apt-get update"
1216 dpkg -l $need_packages_lw &>/dev/null \
1217 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1218 || sudo apt-get install -y $need_packages_lw \
1219 || FATAL "failed to install $need_packages_lw"
1220 fi
1221 track prereqok
1222
1223 [ -z "$INSTALL_NOJUJU" ] && install_juju
1224 track juju_install
1225
1226 if [ -z "$OSM_VCA_HOST" ]; then
1227 juju_createcontroller
1228 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1229 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1230 fi
1231 track juju_controller
1232
1233 if [ -z "$OSM_VCA_SECRET" ]; then
1234 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1235 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1236 fi
1237 if [ -z "$OSM_VCA_PUBKEY" ]; then
1238 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1239 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1240 fi
1241 if [ -z "$OSM_VCA_APIPROXY" ]; then
1242 OSM_VCA_APIPROXY=$DEFAULT_IP
1243 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1244 fi
1245 juju_createproxy
1246
1247 if [ -z "$OSM_VCA_CACERT" ]; then
1248 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
1249 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1250 fi
1251 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1252 OSM_DATABASE_COMMONKEY=$(generate_secret)
1253 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1254 fi
1255 track juju
1256
1257 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1258 track docker_ce
1259
1260 #Installs Kubernetes and deploys osm services
1261 if [ -n "$KUBERNETES" ]; then
1262 install_kube
1263 track install_k8s
1264 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1265 kube_config_dir
1266 track init_k8s
1267 else
1268 #install_docker_compose
1269 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1270 track docker_swarm
1271 fi
1272
1273 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1274 track docker_build
1275
1276 generate_docker_env_files
1277
1278 if [ -n "$KUBERNETES" ]; then
1279 if [ -n "$K8S_MONITOR" ]; then
1280 # uninstall OSM MONITORING
1281 uninstall_k8s_monitoring
1282 fi
1283 #remove old namespace
1284 remove_k8s_namespace $OSM_STACK_NAME
1285 deploy_cni_provider
1286 kube_secrets
1287 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml $OSM_DOCKER_TAG
1288 namespace_vol
1289 deploy_osm_services
1290 track deploy_osm_services_k8s
1291 else
1292 # remove old stack
1293 remove_stack $OSM_STACK_NAME
1294 create_docker_network
1295 deploy_lightweight
1296 generate_osmclient_script
1297 track docker_deploy
1298 install_prometheus_nodeexporter
1299 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1300 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1301 fi
1302
1303 if [ -n "$KUBERNETES" ] && [ -n "$K8S_MONITOR" ]; then
1304 # install OSM MONITORING
1305 install_k8s_monitoring
1306 track install_k8s_monitoring
1307 fi
1308
1309 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1310 track osmclient
1311
1312 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1313 track end
1314 return 0
1315 }
1316
1317 function install_vimemu() {
1318 echo "\nInstalling vim-emu"
1319 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1320 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1321 # clone vim-emu repository (attention: branch is currently master only)
1322 echo "Cloning vim-emu repository ..."
1323 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1324 # build vim-emu docker
1325 echo "Building vim-emu Docker container..."
1326
1327 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1328 # start vim-emu container as daemon
1329 echo "Starting vim-emu Docker container 'vim-emu' ..."
1330 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1331 # in lightweight mode, the emulator needs to be attached to netOSM
1332 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1333 else
1334 # classic build mode
1335 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1336 fi
1337 echo "Waiting for 'vim-emu' container to start ..."
1338 sleep 5
1339 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1340 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1341 # print vim-emu connection info
1342 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1343 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1344 echo -e "To add the emulated VIM to OSM you should do:"
1345 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1346 }
1347
1348 function install_k8s_monitoring() {
1349 # install OSM monitoring
1350 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1351 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1352 }
1353
1354 function uninstall_k8s_monitoring() {
1355 # install OSM monitoring
1356 chmod +x $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/*.sh
1357 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1358 }
1359
1360 function dump_vars(){
1361 echo "DEVELOP=$DEVELOP"
1362 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1363 echo "UNINSTALL=$UNINSTALL"
1364 echo "NAT=$NAT"
1365 echo "UPDATE=$UPDATE"
1366 echo "RECONFIGURE=$RECONFIGURE"
1367 echo "TEST_INSTALLER=$TEST_INSTALLER"
1368 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1369 echo "INSTALL_LXD=$INSTALL_LXD"
1370 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1371 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1372 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1373 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1374 echo "INSTALL_ONLY=$INSTALL_ONLY"
1375 echo "INSTALL_ELK=$INSTALL_ELK"
1376 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1377 echo "TO_REBUILD=$TO_REBUILD"
1378 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1379 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1380 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1381 echo "RELEASE=$RELEASE"
1382 echo "REPOSITORY=$REPOSITORY"
1383 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1384 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1385 echo "NOCONFIGURE=$NOCONFIGURE"
1386 echo "OSM_DEVOPS=$OSM_DEVOPS"
1387 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1388 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1389 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1390 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1391 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1392 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1393 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1394 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1395 echo "DOCKER_USER=$DOCKER_USER"
1396 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1397 echo "PULL_IMAGES=$PULL_IMAGES"
1398 echo "KUBERNETES=$KUBERNETES"
1399 echo "SHOWOPTS=$SHOWOPTS"
1400 echo "Install from specific refspec (-b): $COMMIT_ID"
1401 }
1402
1403 function track(){
1404 ctime=`date +%s`
1405 duration=$((ctime - SESSION_ID))
1406 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1407 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1408 event_name="bin"
1409 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1410 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1411 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1412 event_name="${event_name}_$1"
1413 url="${url}&event=${event_name}&ce_duration=${duration}"
1414 wget -q -O /dev/null $url
1415 }
1416
1417 UNINSTALL=""
1418 DEVELOP=""
1419 NAT=""
1420 UPDATE=""
1421 RECONFIGURE=""
1422 TEST_INSTALLER=""
1423 INSTALL_LXD=""
1424 SHOWOPTS=""
1425 COMMIT_ID=""
1426 ASSUME_YES=""
1427 INSTALL_FROM_SOURCE=""
1428 RELEASE="ReleaseSIX"
1429 REPOSITORY="stable"
1430 INSTALL_VIMEMU=""
1431 INSTALL_FROM_LXDIMAGES=""
1432 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1433 LXD_REPOSITORY_PATH=""
1434 INSTALL_LIGHTWEIGHT="y"
1435 INSTALL_ONLY=""
1436 INSTALL_ELK=""
1437 #INSTALL_PERFMON=""
1438 TO_REBUILD=""
1439 INSTALL_NOLXD=""
1440 INSTALL_NODOCKER=""
1441 INSTALL_NOJUJU=""
1442 KUBERNETES=""
1443 K8S_MONITOR=""
1444 INSTALL_NOHOSTCLIENT=""
1445 NOCONFIGURE=""
1446 RELEASE_DAILY=""
1447 SESSION_ID=`date +%s`
1448 OSM_DEVOPS=
1449 OSM_VCA_HOST=
1450 OSM_VCA_SECRET=
1451 OSM_VCA_PUBKEY=
1452 OSM_STACK_NAME=osm
1453 NO_HOST_PORTS=""
1454 DOCKER_NOBUILD=""
1455 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1456 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1457 WORKDIR_SUDO=sudo
1458 OSM_WORK_DIR="/etc/osm"
1459 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1460 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1461 OSM_HOST_VOL="/var/lib/osm"
1462 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1463 OSM_DOCKER_TAG=latest
1464 DOCKER_USER=opensourcemano
1465 PULL_IMAGES="y"
1466 KAFKA_TAG=2.11-1.0.2
1467 PROMETHEUS_TAG=v2.4.3
1468 GRAFANA_TAG=latest
1469 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1470 PROMETHEUS_CADVISOR_TAG=latest
1471 KEYSTONEDB_TAG=10
1472 OSM_DATABASE_COMMONKEY=
1473 ELASTIC_VERSION=6.4.2
1474 ELASTIC_CURATOR_VERSION=5.5.4
1475 POD_NETWORK_CIDR=10.244.0.0/16
1476 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1477 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1478
1479 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o; do
1480 case "${o}" in
1481 h)
1482 usage && exit 0
1483 ;;
1484 b)
1485 COMMIT_ID=${OPTARG}
1486 PULL_IMAGES=""
1487 ;;
1488 r)
1489 REPOSITORY="${OPTARG}"
1490 REPO_ARGS+=(-r "$REPOSITORY")
1491 ;;
1492 c)
1493 [ "${OPTARG}" == "swarm" ] && continue
1494 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1495 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1496 usage && exit 1
1497 ;;
1498 R)
1499 RELEASE="${OPTARG}"
1500 REPO_ARGS+=(-R "$RELEASE")
1501 ;;
1502 k)
1503 REPOSITORY_KEY="${OPTARG}"
1504 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1505 ;;
1506 u)
1507 REPOSITORY_BASE="${OPTARG}"
1508 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1509 ;;
1510 U)
1511 DOCKER_USER="${OPTARG}"
1512 ;;
1513 l)
1514 LXD_REPOSITORY_BASE="${OPTARG}"
1515 ;;
1516 p)
1517 LXD_REPOSITORY_PATH="${OPTARG}"
1518 ;;
1519 D)
1520 OSM_DEVOPS="${OPTARG}"
1521 ;;
1522 s)
1523 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1524 ;;
1525 H)
1526 OSM_VCA_HOST="${OPTARG}"
1527 ;;
1528 S)
1529 OSM_VCA_SECRET="${OPTARG}"
1530 ;;
1531 P)
1532 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1533 ;;
1534 A)
1535 OSM_VCA_APIPROXY="${OPTARG}"
1536 ;;
1537 w)
1538 # when specifying workdir, do not use sudo for access
1539 WORKDIR_SUDO=
1540 OSM_WORK_DIR="${OPTARG}"
1541 ;;
1542 t)
1543 OSM_DOCKER_TAG="${OPTARG}"
1544 ;;
1545 o)
1546 INSTALL_ONLY="y"
1547 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1548 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1549 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1550 ;;
1551 m)
1552 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1553 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1554 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1555 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1556 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1557 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1558 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1559 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1560 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1561 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1562 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1563 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1564 ;;
1565 -)
1566 [ "${OPTARG}" == "help" ] && usage && exit 0
1567 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1568 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1569 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1570 [ "${OPTARG}" == "nat" ] && NAT="y" && continue
1571 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1572 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1573 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1574 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1575 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1576 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1577 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES="y" && continue
1578 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1579 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT="" && RELEASE="-R ReleaseTHREE" && REPOSITORY="-r stable" && continue
1580 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1581 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1582 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1583 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE="y" && continue
1584 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1585 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY="y" && continue
1586 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1587 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1588 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1589 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1590 [ "${OPTARG}" == "pullimages" ] && continue
1591 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR="y" && continue
1592 echo -e "Invalid option: '--$OPTARG'\n" >&2
1593 usage && exit 1
1594 ;;
1595 \?)
1596 echo -e "Invalid option: '-$OPTARG'\n" >&2
1597 usage && exit 1
1598 ;;
1599 y)
1600 ASSUME_YES="y"
1601 ;;
1602 *)
1603 usage && exit 1
1604 ;;
1605 esac
1606 done
1607
1608 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --lxd can only be used with --soui"
1609 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --nat can only be used with --soui"
1610 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --noconfigure can only be used with --soui"
1611 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible options: --daily can only be used with --soui"
1612 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nolxd cannot be used with --soui"
1613 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: --nodocker cannot be used with --soui"
1614 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL "Incompatible option: -m cannot be used with --soui"
1615 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1616
1617 if [ -n "$SHOWOPTS" ]; then
1618 dump_vars
1619 exit 0
1620 fi
1621
1622 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE="-R ReleaseTHREE-daily" && REPOSITORY="-r testing" && COMMIT_ID="master"
1623
1624 # if develop, we force master
1625 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1626
1627 need_packages="git jq wget curl tar"
1628 echo -e "Checking required packages: $need_packages"
1629 dpkg -l $need_packages &>/dev/null \
1630 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1631 || sudo apt-get update \
1632 || FATAL "failed to run apt-get update"
1633 dpkg -l $need_packages &>/dev/null \
1634 || ! echo -e "Installing $need_packages requires root privileges." \
1635 || sudo apt-get install -y $need_packages \
1636 || FATAL "failed to install $need_packages"
1637
1638 if [ -z "$OSM_DEVOPS" ]; then
1639 if [ -n "$TEST_INSTALLER" ]; then
1640 echo -e "\nUsing local devops repo for OSM installation"
1641 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1642 else
1643 echo -e "\nCreating temporary dir for OSM installation"
1644 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1645 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1646
1647 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1648
1649 if [ -z "$COMMIT_ID" ]; then
1650 echo -e "\nGuessing the current stable release"
1651 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1652 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1653
1654 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1655 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1656 else
1657 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1658 fi
1659 git -C $OSM_DEVOPS checkout $COMMIT_ID
1660 fi
1661 fi
1662
1663 . $OSM_DEVOPS/common/all_funcs
1664
1665 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1666 [ -n "$UNINSTALL" ] && uninstall && echo -e "\nDONE" && exit 0
1667 [ -n "$NAT" ] && nat && echo -e "\nDONE" && exit 0
1668 [ -n "$UPDATE" ] && update && echo -e "\nDONE" && exit 0
1669 [ -n "$RECONFIGURE" ] && configure && echo -e "\nDONE" && exit 0
1670 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1671 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1672 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1673 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1674
1675 #Installation starts here
1676 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README.txt &> /dev/null
1677 track start
1678
1679 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1680 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1681 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1682 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1683 fi
1684
1685 echo -e "Checking required packages: lxd"
1686 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1687 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1688
1689 # use local devops for containers
1690 export OSM_USE_LOCAL_DEVOPS=true
1691 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1692 echo -e "\nCreating the containers and building from source ..."
1693 $OSM_DEVOPS/jenkins/host/start_build RO --notest checkout $COMMIT_ID || FATAL "RO container build failed (refspec: '$COMMIT_ID')"
1694 ro_is_up && track RO
1695 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA container build failed"
1696 vca_is_up && track VCA
1697 $OSM_DEVOPS/jenkins/host/start_build MON || FATAL "MON install failed"
1698 mon_is_up && track MON
1699 $OSM_DEVOPS/jenkins/host/start_build SO checkout $COMMIT_ID || FATAL "SO container build failed (refspec: '$COMMIT_ID')"
1700 $OSM_DEVOPS/jenkins/host/start_build UI checkout $COMMIT_ID || FATAL "UI container build failed (refspec: '$COMMIT_ID')"
1701 #so_is_up && track SOUI
1702 track SOUI
1703 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1704 echo -e "\nInstalling from lxd images ..."
1705 install_from_lxdimages
1706 else #install from binaries
1707 echo -e "\nCreating the containers and installing from binaries ..."
1708 $OSM_DEVOPS/jenkins/host/install RO ${REPO_ARGS[@]} || FATAL "RO install failed"
1709 ro_is_up && track RO
1710 $OSM_DEVOPS/jenkins/host/start_build VCA || FATAL "VCA install failed"
1711 vca_is_up && track VCA
1712 $OSM_DEVOPS/jenkins/host/install MON || FATAL "MON build failed"
1713 mon_is_up && track MON
1714 $OSM_DEVOPS/jenkins/host/install SO ${REPO_ARGS[@]} || FATAL "SO install failed"
1715 $OSM_DEVOPS/jenkins/host/install UI ${REPO_ARGS[@]} || FATAL "UI install failed"
1716 #so_is_up && track SOUI
1717 track SOUI
1718 fi
1719
1720 #Install iptables-persistent and configure NAT rules
1721 [ -z "$NOCONFIGURE" ] && nat
1722
1723 #Configure components
1724 [ -z "$NOCONFIGURE" ] && configure
1725
1726 #Install osmclient
1727 [ -z "$NOCONFIGURE" ] && install_osmclient
1728
1729 #Install vim-emu (optional)
1730 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1731
1732 wget -q -O- https://osm-download.etsi.org/ftp/osm-6.0-six/README2.txt &> /dev/null
1733 track end
1734 echo -e "\nDONE"