Newer
Older
#!/bin/bash
# Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function usage(){
echo -e "usage: $0 [OPTIONS]"
echo -e "Install OSM from binaries or source code (by default, from binaries)"
echo -e " OPTIONS"
echo -e " -r <repo>: use specified repository name for osm packages"
echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
echo -e " -u <repo base>: use specified repository url for osm packages"
echo -e " -k <repo key>: use specified repository public key url"
echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
echo -e " -b master (main dev branch)"
echo -e " -b v2.0 (v2.0 branch)"
echo -e " -b tags/v1.1.0 (a specific tag)"
echo -e " ..."
echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
echo -e " -H <VCA host> use specific juju host controller IP"
echo -e " -S <VCA secret> use VCA/juju secret key"
echo -e " -P <VCA pubkey> use VCA/juju public key file"
echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
garciadeblas
committed
echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
garciadeblas
committed
echo -e " -D <devops path> use local devops installation path"
echo -e " -w <work dir> Location to store runtime installation"
echo -e " -t <docker tag> specify osm docker tag (default is latest)"
echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
echo -e " --nojuju: do not juju, assumes already installed"
echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
echo -e " --nohostclient: do not install the osmclient"
garciadeblas
committed
echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
echo -e " --source: install OSM from source code using the latest stable tag"
echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
echo -e " --showopts: print chosen options and exit (only for debugging)"
echo -e " -y: do not prompt for confirmation, assumes yes"
echo -e " -h / --help: print this help"
}
#Uninstall OSM: remove containers
function uninstall(){
echo -e "\nUninstalling OSM"
if [ $RC_CLONE ] || [ -n "$TEST_INSTALLER" ]; then
$OSM_DEVOPS/jenkins/host/clean_container RO
$OSM_DEVOPS/jenkins/host/clean_container VCA
$OSM_DEVOPS/jenkins/host/clean_container MON
$OSM_DEVOPS/jenkins/host/clean_container SO
#$OSM_DEVOPS/jenkins/host/clean_container UI
else
lxc stop RO && lxc delete RO
lxc stop VCA && lxc delete VCA
lxc stop MON && lxc delete MON
lxc stop SO-ub && lxc delete SO-ub
fi
echo -e "\nDeleting imported lxd images if they exist"
lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
return 0
}
# takes a juju/accounts.yaml file and returns the password specific
# for a controller. I wrote this using only bash tools to minimize
# additions of other packages
function parse_juju_password {
password_file="${HOME}/.local/share/juju/accounts.yaml"
local controller_name=$1
local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
sed -ne "s|^\($s\):|\1|" \
-e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
awk -F$fs -v controller=$controller_name '{
indent = length($1)/2;
vname[indent] = $2;
for (i in vname) {if (i > indent) {delete vname[i]}}
if (length($3) > 0) {
vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
if (match(vn,controller) && match($2,"password")) {
printf("%s",$3);
}
}
}'
}
function generate_secret() {
head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
}
if [ -n "$KUBERNETES" ]; then
k8_volume=$1
echo "Removing ${k8_volume}"
$WORKDIR_SUDO rm -rf ${k8_volume}
else
stack=$1
volumes="mongo_db mon_db osm_packages ro_db"
for volume in $volumes; do
sg docker -c "docker volume rm ${stack}_${volume}"
done
fi
}
function remove_network() {
stack=$1
sg docker -c "docker network rm net${stack}"
}
function remove_iptables() {
stack=$1
if [ -z "$OSM_VCA_HOST" ]; then
OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
[ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
fi
if sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
sudo iptables -t nat -D PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
sudo netfilter-persistent save
fi
}
function remove_stack() {
stack=$1
if sg docker -c "docker stack ps ${stack}" ; then
echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
COUNTER=0
result=1
while [ ${COUNTER} -lt 30 ]; do
result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
#echo "Dockers running: $result"
if [ "${result}" == "0" ]; then
break
fi
let COUNTER=COUNTER+1
sleep 1
done
if [ "${result}" == "0" ]; then
echo "All dockers of the stack ${stack} were removed"
else
FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
#removes osm deployments and services
function remove_k8s_namespace() {
kubectl delete ns $1
}
#Uninstall lightweight OSM: remove dockers
function uninstall_lightweight() {
if [ -n "$INSTALL_ONLY" ]; then
if [ -n "$INSTALL_ELK" ]; then
echo -e "\nUninstalling OSM ELK stack"
remove_stack osm_elk
$WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
fi
else
echo -e "\nUninstalling OSM"
if [ -n "$K8S_MONITOR" ]; then
# uninstall OSM MONITORING
uninstall_k8s_monitoring
fi
remove_k8s_namespace $OSM_STACK_NAME
else
remove_stack $OSM_STACK_NAME
remove_stack osm_elk
uninstall_prometheus_nodeexporter
echo "Now osm docker images and volumes will be deleted"
newgrp docker << EONG
docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
EONG
if [ -n "$KUBERNETES" ]; then
OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
remove_volumes $OSM_NAMESPACE_VOL
else
remove_volumes $OSM_STACK_NAME
remove_network $OSM_STACK_NAME
fi
echo "Removing $OSM_DOCKER_WORK_DIR"
$WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
fi
echo "Some docker images will be kept in case they are used by other docker stacks"
echo "To remove them, just run 'docker image prune' in a terminal"
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
return 0
}
#Configure NAT rules, based on the current IP addresses of containers
function nat(){
echo -e "\nChecking required packages: iptables-persistent"
dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
echo -e "\nConfiguring NAT rules"
echo -e " Required root privileges"
sudo $OSM_DEVOPS/installers/nat_osm
}
function FATAL(){
echo "FATAL error: Cannot install OSM due to \"$1\""
exit 1
}
#Update RO, SO and UI:
function update(){
echo -e "\nUpdating components"
echo -e " Updating RO"
CONTAINER="RO"
MDG="RO"
INSTALL_FOLDER="/opt/openmano"
echo -e " Fetching the repo"
lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
BRANCH=""
BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
[ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
# COMMIT_ID either was previously set with -b option, or is an empty string
CHECKOUT_ID=$COMMIT_ID
[ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
[ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
if [[ $CHECKOUT_ID == "tags/"* ]]; then
REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
else
REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
fi
echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
echo " Nothing to be done."
else
echo " Update required."
lxc exec $CONTAINER -- service osm-ro stop
lxc exec $CONTAINER -- git -C /opt/openmano stash
lxc exec $CONTAINER -- git -C /opt/openmano pull --rebase
lxc exec $CONTAINER -- git -C /opt/openmano checkout $CHECKOUT_ID
lxc exec $CONTAINER -- git -C /opt/openmano stash pop
lxc exec $CONTAINER -- /opt/openmano/database_utils/migrate_mano_db.sh
lxc exec $CONTAINER -- service osm-ro start
fi
echo
echo -e " Updating SO and UI"
CONTAINER="SO-ub"
MDG="SO"
INSTALL_FOLDER="" # To be filled in
echo -e " Fetching the repo"
lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
BRANCH=""
BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
[ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
# COMMIT_ID either was previously set with -b option, or is an empty string
CHECKOUT_ID=$COMMIT_ID
[ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
[ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
if [[ $CHECKOUT_ID == "tags/"* ]]; then
REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
else
REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
fi
echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
echo " Nothing to be done."
else
echo " Update required."
# Instructions to be added
# lxc exec SO-ub -- ...
fi
echo
echo -e "Updating MON Container"
CONTAINER="MON"
MDG="MON"
INSTALL_FOLDER="/root/MON"
echo -e " Fetching the repo"
lxc exec $CONTAINER -- git -C $INSTALL_FOLDER fetch --all
BRANCH=""
BRANCH=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
[ -z "$BRANCH" ] && FATAL "Could not find the current branch in use in the '$MDG'"
CURRENT=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
CURRENT_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
# COMMIT_ID either was previously set with -b option, or is an empty string
CHECKOUT_ID=$COMMIT_ID
[ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID="tags/$LATEST_STABLE_DEVOPS"
[ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID="$BRANCH"
if [[ $CHECKOUT_ID == "tags/"* ]]; then
REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
else
REMOTE_COMMIT_ID=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
fi
echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
echo " Nothing to be done."
else
echo " Update required."
fi
echo
}
function so_is_up() {
if [ -n "$1" ]; then
SO_IP=$1
else
SO_IP=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
fi
time=0
step=5
timelength=300
while [ $time -le $timelength ]
do
if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
-H 'accept: application/vnd.yang.data+json' \
-H 'authorization: Basic YWRtaW46YWRtaW4=' \
-H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
then
echo "RW.Restconf running....SO is up"
return 0
fi
sleep $step
echo -n "."
time=$((time+step))
done
FATAL "OSM Failed to startup. SO failed to startup"
}
function vca_is_up() {
if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
echo "VCA is up and running"
return 0
fi
FATAL "OSM Failed to startup. VCA failed to startup"
}
function mon_is_up() {
if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
echo "MON is up and running"
return 0
fi
FATAL "OSM Failed to startup. MON failed to startup"
}
function ro_is_up() {
if [ -n "$1" ]; then
RO_IP=$1
else
RO_IP=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
fi
time=0
step=2
timelength=20
while [ $time -le $timelength ]; do
if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
echo "RO is up and running"
return 0
fi
sleep $step
echo -n "."
time=$((time+step))
done
FATAL "OSM Failed to startup. RO failed to startup"
}
function configure_RO(){
. $OSM_DEVOPS/installers/export_ips
echo -e " Configuring RO"
lxc exec RO -- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc/osm/openmanod.cfg
lxc exec RO -- service osm-ro restart
ro_is_up
lxc exec RO -- openmano tenant-delete -f osm >/dev/null
lxc exec RO -- openmano tenant-create osm > /dev/null
lxc exec RO -- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
lxc exec RO -- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
lxc exec RO -- sh -c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
}
function configure_VCA(){
echo -e " Configuring VCA"
JUJU_PASSWD=$(generate_secret)
echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc exec VCA -- juju change-user-password
}
function configure_SOUI(){
. $OSM_DEVOPS/installers/export_ips
JUJU_CONTROLLER_IP=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
RO_TENANT_ID=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
echo -e " Configuring MON"
#Information to be added about SO socket for logging
echo -e " Configuring SO"
sudo route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP
sudo ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP
sudo sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc/rc.local
sudo sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc/rc.local
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
# make journaling persistent
lxc exec SO-ub -- mkdir -p /var/log/journal
lxc exec SO-ub -- systemd-tmpfiles --create --prefix /var/log/journal
lxc exec SO-ub -- systemctl restart systemd-journald
echo RIFT_EXTERNAL_ADDRESS=$DEFAULT_IP | lxc exec SO-ub -- tee -a /usr/rift/etc/default/launchpad
lxc exec SO-ub -- systemctl restart launchpad
so_is_up $SO_CONTAINER_IP
#delete existing config agent (could be there on reconfigure)
curl -k --request DELETE \
--url https://$SO_CONTAINER_IP:8008/api/config/config-agent/account/osmjuju \
--header 'accept: application/vnd.yang.data+json' \
--header 'authorization: Basic YWRtaW46YWRtaW4=' \
--header 'cache-control: no-cache' \
--header 'content-type: application/vnd.yang.data+json' &> /dev/null
result=$(curl -k --request POST \
--url https://$SO_CONTAINER_IP:8008/api/config/config-agent \
--header 'accept: application/vnd.yang.data+json' \
--header 'authorization: Basic YWRtaW46YWRtaW4=' \
--header 'cache-control: no-cache' \
--header 'content-type: application/vnd.yang.data+json' \
--data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
[[ $result =~ .*success.* ]] || FATAL "Failed config-agent configuration: $result"
#R1/R2 config line
#result=$(curl -k --request PUT \
# --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
# --header 'accept: application/vnd.yang.data+json' \
# --header 'authorization: Basic YWRtaW46YWRtaW4=' \
# --header 'cache-control: no-cache' \
# --header 'content-type: application/vnd.yang.data+json' \
# --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
result=$(curl -k --request PUT \
--url https://$SO_CONTAINER_IP:8008/api/config/project/default/ro-account/account \
--header 'accept: application/vnd.yang.data+json' \
--header 'authorization: Basic YWRtaW46YWRtaW4=' \
--header 'cache-control: no-cache' \
--header 'content-type: application/vnd.yang.data+json' \
--data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
[[ $result =~ .*success.* ]] || FATAL "Failed resource-orchestrator configuration: $result"
result=$(curl -k --request PATCH \
--url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/redirect-uri \
--header 'accept: application/vnd.yang.data+json' \
--header 'authorization: Basic YWRtaW46YWRtaW4=' \
--header 'cache-control: no-cache' \
--header 'content-type: application/vnd.yang.data+json' \
--data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
[[ $result =~ .*success.* ]] || FATAL "Failed redirect-uri configuration: $result"
result=$(curl -k --request PATCH \
--url https://$SO_CONTAINER_IP:8008/v2/api/config/openidc-provider-config/rw-ui-client/post-logout-redirect-uri \
--header 'accept: application/vnd.yang.data+json' \
--header 'authorization: Basic YWRtaW46YWRtaW4=' \
--header 'cache-control: no-cache' \
--header 'content-type: application/vnd.yang.data+json' \
--data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
[[ $result =~ .*success.* ]] || FATAL "Failed post-logout-redirect-uri configuration: $result"
lxc exec SO-ub -- tee /etc/network/interfaces.d/60-rift.cfg <<EOF
auto lo:1
iface lo:1 inet static
address $DEFAULT_IP
netmask 255.255.255.255
EOF
lxc exec SO-ub ifup lo:1
}
#Configure RO, VCA, and SO with the initial configuration:
# RO -> tenant:osm, logs to be sent to SO
# VCA -> juju-password
# SO -> route to Juju Controller, add RO account, add VCA account
function configure(){
#Configure components
echo -e "\nConfiguring components"
configure_RO
configure_VCA
configure_SOUI
}
function install_lxd() {
sudo apt-get update
sudo apt-get install -y lxd
newgrp lxd
lxd init --auto
lxd waitready
lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
lxc profile device set default eth0 mtu $DEFAULT_MTU
#sudo systemctl stop lxd-bridge
#sudo systemctl --system daemon-reload
#sudo systemctl enable lxd-bridge
#sudo systemctl start lxd-bridge
}
function ask_user(){
# ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
# Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
# Return: true(0) if user type 'yes'; false (1) if user type 'no'
read -e -p "$1" USER_CONFIRMATION
while true ; do
[ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
[ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
[ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
[ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
done
}
function launch_container_from_lxd(){
export OSM_MDG=$1
OSM_load_config
export OSM_BASE_IMAGE=$2
if ! container_exists $OSM_BUILD_CONTAINER; then
CONTAINER_OPTS=""
[[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.privileged=true"
[[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS="$CONTAINER_OPTS -c security.nesting=true"
create_container $OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
wait_container_up $OSM_BUILD_CONTAINER
fi
}
function install_osmclient(){
CLIENT_RELEASE=${RELEASE#"-R "}
CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
CLIENT_REPOSITORY=${REPOSITORY#"-r "}
CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
curl $key_location | sudo apt-key add -
sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
sudo apt-get update
sudo apt-get install -y python3-pip
sudo -H LC_ALL=C python3 -m pip install -U pip
sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind
sudo apt-get install -y python3-osm-im python3-osmclient
#sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
#echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
#echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
[ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
[ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
echo -e "\nOSM client installed"
if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
else
echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
echo " export OSM_HOSTNAME=<OSM_host>"
fi
return 0
}
function install_prometheus_nodeexporter(){
sudo useradd --no-create-home --shell /bin/false node_exporter
sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
sudo systemctl daemon-reload
sudo systemctl restart node_exporter
sudo systemctl enable node_exporter
return 0
}
function uninstall_prometheus_nodeexporter(){
sudo systemctl stop node_exporter
sudo systemctl disable node_exporter
sudo rm /etc/systemd/system/node_exporter.service
sudo systemctl daemon-reload
sudo userdel node_exporter
sudo rm /usr/local/bin/node_exporter
return 0
}
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
function install_from_lxdimages(){
LXD_RELEASE=${RELEASE#"-R "}
if [ -n "$LXD_REPOSITORY_PATH" ]; then
LXD_IMAGE_DIR="$LXD_REPOSITORY_PATH"
else
LXD_IMAGE_DIR="$(mktemp -d -q --tmpdir "osmimages.XXXXXX")"
trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
fi
echo -e "\nDeleting previous lxd images if they exist"
lxc image show osm-ro &>/dev/null && lxc image delete osm-ro
lxc image show osm-vca &>/dev/null && lxc image delete osm-vca
lxc image show osm-soui &>/dev/null && lxc image delete osm-soui
echo -e "\nImporting osm-ro"
[ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-ro.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.tar.gz
lxc image import $LXD_IMAGE_DIR/osm-ro.tar.gz --alias osm-ro
rm -f $LXD_IMAGE_DIR/osm-ro.tar.gz
echo -e "\nImporting osm-vca"
[ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-vca.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.tar.gz
lxc image import $LXD_IMAGE_DIR/osm-vca.tar.gz --alias osm-vca
rm -f $LXD_IMAGE_DIR/osm-vca.tar.gz
echo -e "\nImporting osm-soui"
[ -z "$LXD_REPOSITORY_PATH" ] && wget -O $LXD_IMAGE_DIR/osm-soui.tar.gz $LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.tar.gz
lxc image import $LXD_IMAGE_DIR/osm-soui.tar.gz --alias osm-soui
rm -f $LXD_IMAGE_DIR/osm-soui.tar.gz
launch_container_from_lxd RO osm-ro
ro_is_up && track RO
launch_container_from_lxd VCA osm-vca
vca_is_up && track VCA
launch_container_from_lxd MON osm-mon
mon_is_up && track MON
launch_container_from_lxd SO osm-soui
#so_is_up && track SOUI
track SOUI
}
function install_docker_ce() {
# installs and configures Docker CE
echo "Installing Docker CE ..."
sudo apt-get -qq update
sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get -qq update
sudo apt-get install -y docker-ce
echo "Adding user to group 'docker'"
sudo groupadd -f docker
sudo usermod -aG docker $USER
sleep 2
sudo service docker restart
echo "... restarted Docker service"
sg docker -c "docker version" || FATAL "Docker installation failed"
echo "... Docker CE installation done"
return 0
}
function install_docker_compose() {
# installs and configures docker-compose
echo "Installing Docker Compose ..."
sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "... Docker Compose installation done"
}
function install_juju() {
echo "Installing juju"
[ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
[[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
echo "Finished installation of juju"
return 0
}
function juju_createcontroller() {
if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
# Not found created, create the controller
sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
fi
[ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
echo -e "\nChecking required packages: iptables-persistent"
dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install iptables-persistent
if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
sudo iptables -t nat -A PREROUTING -p tcp -m tcp --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
sudo netfilter-persistent save
fi
}
function generate_docker_images() {
echo "Pulling and generating docker images"
_build_from=$COMMIT_ID
[ -z "$_build_from" ] && _build_from="master"
echo "OSM Docker images generated from $_build_from"
BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
fi
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
fi
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
fi
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
fi
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
fi
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
fi
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
fi
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
fi
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
fi
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
fi
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
fi
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
fi
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
fi
echo "Finished generation of docker images"
}
function cmp_overwrite() {
file1="$1"
file2="$2"
if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
if [ -f "${file2}" ]; then
ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
function generate_docker_env_files() {
garciadeblas
committed
echo "Doing a backup of existing env files"
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
$WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
echo "Generating docker env files"
if [ -n "$KUBERNETES" ]; then
#Kubernetes resources
$WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
else
# Docker-compose
$WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
garciadeblas
committed
$WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
# Grafana & Prometheus Exporter files
$WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
$WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
garciadeblas
committed
# LCM
if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
fi
if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
else
$WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
fi
if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
else
$WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
fi
if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
echo "OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
$WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=\"${OSM_VCA_PUBKEY}\"|g" $OSM_DOCKER_WORK_DIR/lcm.env
if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
else
$WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
fi
if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
else
$WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
fi
# RO
MYSQL_ROOT_PASSWORD=$(generate_secret)
if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
# Keystone
KEYSTONE_DB_PASSWORD=$(generate_secret)
SERVICE_PASSWORD=$(generate_secret)
if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
fi
if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
# MON
if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
fi
if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
else
$WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
fi
if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
else
$WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
fi
if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
else
$WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
fi
if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
else
$WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
fi
# POL
if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
fi
# LW-UI
if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
fi
echo "Finished generation of docker env files"
}
function generate_osmclient_script () {
echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
$WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
}
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#installs kubernetes packages
function install_kube() {
sudo apt-get update && sudo apt-get install -y apt-transport-https
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
sudo apt-get update
echo "Installing Kubernetes Packages ..."
sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
}
#initializes kubernetes control plane
function init_kubeadm() {
sudo swapoff -a
sudo kubeadm init --config $1
sleep 5
}
function kube_config_dir() {
[ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
mkdir -p $HOME/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
}
#deploys flannel as daemonsets
function deploy_cni_provider() {
CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
trap 'rm -rf "${CNI_DIR}"' EXIT