2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
53 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
54 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
55 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
56 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
57 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
60 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
61 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
62 echo -e " --showopts: print chosen options and exit (only for debugging)"
63 echo -e " -y: do not prompt for confirmation, assumes yes"
64 echo -e " -h / --help: print this help"
67 #Uninstall OSM: remove containers
69 echo -e "\nUninstalling OSM"
70 if [ $RC_CLONE ] ||
[ -n "$TEST_INSTALLER" ]; then
71 $OSM_DEVOPS/jenkins
/host
/clean_container RO
72 $OSM_DEVOPS/jenkins
/host
/clean_container VCA
73 $OSM_DEVOPS/jenkins
/host
/clean_container MON
74 $OSM_DEVOPS/jenkins
/host
/clean_container SO
75 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 lxc stop RO
&& lxc delete RO
78 lxc stop VCA
&& lxc delete VCA
79 lxc stop MON
&& lxc delete MON
80 lxc stop SO-ub
&& lxc delete SO-ub
82 echo -e "\nDeleting imported lxd images if they exist"
83 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
84 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
85 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
89 # takes a juju/accounts.yaml file and returns the password specific
90 # for a controller. I wrote this using only bash tools to minimize
91 # additions of other packages
92 function parse_juju_password
{
93 password_file
="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name
=$1
95 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller
=$controller_name '{
100 indent = length($1)/2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
112 function generate_secret
() {
113 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
116 function remove_volumes
() {
117 if [ -n "$KUBERNETES" ]; then
119 echo "Removing ${k8_volume}"
120 $WORKDIR_SUDO rm -rf ${k8_volume}
123 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
124 for volume
in $volumes; do
125 sg docker
-c "docker volume rm ${stack}_${volume}"
130 function remove_network
() {
132 sg docker
-c "docker network rm net${stack}"
135 function remove_iptables
() {
137 if [ -z "$OSM_VCA_HOST" ]; then
138 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
139 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
142 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
143 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
144 sudo netfilter-persistent save
148 function remove_stack
() {
150 if sg docker
-c "docker stack ps ${stack}" ; then
151 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
154 while [ ${COUNTER} -lt 30 ]; do
155 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
156 #echo "Dockers running: $result"
157 if [ "${result}" == "0" ]; then
160 let COUNTER
=COUNTER
+1
163 if [ "${result}" == "0" ]; then
164 echo "All dockers of the stack ${stack} were removed"
166 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 #removes osm deployments and services
173 function remove_k8s_namespace
() {
177 #Uninstall lightweight OSM: remove dockers
178 function uninstall_lightweight
() {
179 if [ -n "$INSTALL_ONLY" ]; then
180 if [ -n "$INSTALL_ELK" ]; then
181 echo -e "\nUninstalling OSM ELK stack"
183 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
186 echo -e "\nUninstalling OSM"
187 if [ -n "$KUBERNETES" ]; then
188 if [ -n "$K8S_MONITOR" ]; then
189 # uninstall OSM MONITORING
190 uninstall_k8s_monitoring
192 remove_k8s_namespace
$OSM_STACK_NAME
195 remove_stack
$OSM_STACK_NAME
198 echo "Now osm docker images and volumes will be deleted"
199 newgrp docker
<< EONG
200 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
201 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
210 if [ -n "$KUBERNETES" ]; then
211 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
212 remove_volumes
$OSM_NAMESPACE_VOL
214 remove_volumes
$OSM_STACK_NAME
215 remove_network
$OSM_STACK_NAME
217 remove_iptables
$OSM_STACK_NAME
218 echo "Removing $OSM_DOCKER_WORK_DIR"
219 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
220 sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
222 echo "Some docker images will be kept in case they are used by other docker stacks"
223 echo "To remove them, just run 'docker image prune' in a terminal"
227 #Configure NAT rules, based on the current IP addresses of containers
229 echo -e "\nChecking required packages: iptables-persistent"
230 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
231 sudo apt-get
-yq install iptables-persistent
232 echo -e "\nConfiguring NAT rules"
233 echo -e " Required root privileges"
234 sudo
$OSM_DEVOPS/installers
/nat_osm
238 echo "FATAL error: Cannot install OSM due to \"$1\""
242 #Update RO, SO and UI:
244 echo -e "\nUpdating components"
246 echo -e " Updating RO"
249 INSTALL_FOLDER
="/opt/openmano"
250 echo -e " Fetching the repo"
251 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
253 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
254 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
255 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
256 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
257 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
258 # COMMIT_ID either was previously set with -b option, or is an empty string
259 CHECKOUT_ID
=$COMMIT_ID
260 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
261 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
262 if [[ $CHECKOUT_ID == "tags/"* ]]; then
263 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
265 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
267 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
268 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
269 echo " Nothing to be done."
271 echo " Update required."
272 lxc
exec $CONTAINER -- service osm-ro stop
273 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash
274 lxc
exec $CONTAINER -- git
-C /opt
/openmano pull
--rebase
275 lxc
exec $CONTAINER -- git
-C /opt
/openmano checkout
$CHECKOUT_ID
276 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash pop
277 lxc
exec $CONTAINER -- /opt
/openmano
/database_utils
/migrate_mano_db.sh
278 lxc
exec $CONTAINER -- service osm-ro start
282 echo -e " Updating SO and UI"
285 INSTALL_FOLDER
="" # To be filled in
286 echo -e " Fetching the repo"
287 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
289 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
290 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
291 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
292 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
293 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
294 # COMMIT_ID either was previously set with -b option, or is an empty string
295 CHECKOUT_ID
=$COMMIT_ID
296 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
297 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
298 if [[ $CHECKOUT_ID == "tags/"* ]]; then
299 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
301 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
303 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
304 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
305 echo " Nothing to be done."
307 echo " Update required."
308 # Instructions to be added
309 # lxc exec SO-ub -- ...
312 echo -e "Updating MON Container"
315 INSTALL_FOLDER
="/root/MON"
316 echo -e " Fetching the repo"
317 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
319 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
320 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
321 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
322 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
323 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
324 # COMMIT_ID either was previously set with -b option, or is an empty string
325 CHECKOUT_ID
=$COMMIT_ID
326 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
327 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
328 if [[ $CHECKOUT_ID == "tags/"* ]]; then
329 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
331 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
333 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
334 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
335 echo " Nothing to be done."
337 echo " Update required."
342 function so_is_up
() {
346 SO_IP
=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
351 while [ $time -le $timelength ]
353 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
354 -H 'accept: application/vnd.yang.data+json' \
355 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
356 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
358 echo "RW.Restconf running....SO is up"
367 FATAL
"OSM Failed to startup. SO failed to startup"
370 function vca_is_up
() {
371 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
372 echo "VCA is up and running"
376 FATAL
"OSM Failed to startup. VCA failed to startup"
379 function mon_is_up
() {
380 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
381 echo "MON is up and running"
385 FATAL
"OSM Failed to startup. MON failed to startup"
388 function ro_is_up
() {
392 RO_IP
=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
397 while [ $time -le $timelength ]; do
398 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
399 echo "RO is up and running"
407 FATAL
"OSM Failed to startup. RO failed to startup"
411 function configure_RO
(){
412 .
$OSM_DEVOPS/installers
/export_ips
413 echo -e " Configuring RO"
414 lxc
exec RO
-- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc
/osm
/openmanod.cfg
415 lxc
exec RO
-- service osm-ro restart
419 lxc
exec RO
-- openmano tenant-delete
-f osm
>/dev
/null
420 lxc
exec RO
-- openmano tenant-create osm
> /dev
/null
421 lxc
exec RO
-- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
422 lxc
exec RO
-- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
423 lxc
exec RO
-- sh
-c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
426 function configure_VCA
(){
427 echo -e " Configuring VCA"
428 JUJU_PASSWD
=$
(generate_secret
)
429 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc
exec VCA
-- juju change-user-password
432 function configure_SOUI
(){
433 .
$OSM_DEVOPS/installers
/export_ips
434 JUJU_CONTROLLER_IP
=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
435 RO_TENANT_ID
=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
437 echo -e " Configuring MON"
438 #Information to be added about SO socket for logging
440 echo -e " Configuring SO"
441 sudo route add
-host $JUJU_CONTROLLER_IP gw
$VCA_CONTAINER_IP
442 sudo ip route add
10.44.127.0/24 via
$VCA_CONTAINER_IP
443 sudo
sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc
/rc.
local
444 sudo
sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc
/rc.
local
445 # make journaling persistent
446 lxc
exec SO-ub
-- mkdir
-p /var
/log
/journal
447 lxc
exec SO-ub
-- systemd-tmpfiles
--create --prefix /var
/log
/journal
448 lxc
exec SO-ub
-- systemctl restart systemd-journald
450 echo RIFT_EXTERNAL_ADDRESS
=$DEFAULT_IP | lxc
exec SO-ub
-- tee -a /usr
/rift
/etc
/default
/launchpad
452 lxc
exec SO-ub
-- systemctl restart launchpad
454 so_is_up
$SO_CONTAINER_IP
456 #delete existing config agent (could be there on reconfigure)
457 curl
-k --request DELETE \
458 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent
/account
/osmjuju \
459 --header 'accept: application/vnd.yang.data+json' \
460 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
461 --header 'cache-control: no-cache' \
462 --header 'content-type: application/vnd.yang.data+json' &> /dev
/null
464 result
=$
(curl
-k --request POST \
465 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent \
466 --header 'accept: application/vnd.yang.data+json' \
467 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
468 --header 'cache-control: no-cache' \
469 --header 'content-type: application/vnd.yang.data+json' \
470 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
471 [[ $result =~ .
*success.
* ]] || FATAL
"Failed config-agent configuration: $result"
474 #result=$(curl -k --request PUT \
475 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
476 # --header 'accept: application/vnd.yang.data+json' \
477 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
478 # --header 'cache-control: no-cache' \
479 # --header 'content-type: application/vnd.yang.data+json' \
480 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
482 result
=$
(curl
-k --request PUT \
483 --url https
://$SO_CONTAINER_IP:8008/api
/config
/project
/default
/ro-account
/account \
484 --header 'accept: application/vnd.yang.data+json' \
485 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
486 --header 'cache-control: no-cache' \
487 --header 'content-type: application/vnd.yang.data+json' \
488 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
489 [[ $result =~ .
*success.
* ]] || FATAL
"Failed resource-orchestrator configuration: $result"
491 result
=$
(curl
-k --request PATCH \
492 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/redirect-uri \
493 --header 'accept: application/vnd.yang.data+json' \
494 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
495 --header 'cache-control: no-cache' \
496 --header 'content-type: application/vnd.yang.data+json' \
497 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
498 [[ $result =~ .
*success.
* ]] || FATAL
"Failed redirect-uri configuration: $result"
500 result
=$
(curl
-k --request PATCH \
501 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/post-logout-redirect-uri \
502 --header 'accept: application/vnd.yang.data+json' \
503 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
504 --header 'cache-control: no-cache' \
505 --header 'content-type: application/vnd.yang.data+json' \
506 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
507 [[ $result =~ .
*success.
* ]] || FATAL
"Failed post-logout-redirect-uri configuration: $result"
509 lxc
exec SO-ub
-- tee /etc
/network
/interfaces.d
/60-rift.cfg
<<EOF
511 iface lo:1 inet static
513 netmask 255.255.255.255
515 lxc
exec SO-ub ifup lo
:1
518 #Configure RO, VCA, and SO with the initial configuration:
519 # RO -> tenant:osm, logs to be sent to SO
520 # VCA -> juju-password
521 # SO -> route to Juju Controller, add RO account, add VCA account
522 function configure
(){
523 #Configure components
524 echo -e "\nConfiguring components"
530 function install_lxd
() {
532 sudo apt-get
install -y lxd
536 lxc network create lxdbr0 ipv4.address
=auto ipv4.nat
=true ipv6.address
=none ipv6.nat
=false
537 DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8}')
538 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
539 lxc profile device
set default eth0 mtu
$DEFAULT_MTU
540 #sudo systemctl stop lxd-bridge
541 #sudo systemctl --system daemon-reload
542 #sudo systemctl enable lxd-bridge
543 #sudo systemctl start lxd-bridge
547 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
548 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
549 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
550 read -e -p "$1" USER_CONFIRMATION
552 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
553 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
554 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
555 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
556 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
560 function launch_container_from_lxd
(){
563 export OSM_BASE_IMAGE
=$2
564 if ! container_exists
$OSM_BUILD_CONTAINER; then
566 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.privileged=true"
567 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.nesting=true"
568 create_container
$OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
569 wait_container_up
$OSM_BUILD_CONTAINER
573 function install_osmclient
(){
574 CLIENT_RELEASE
=${RELEASE#"-R "}
575 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
576 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
577 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
578 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
579 curl
$key_location | sudo apt-key add
-
580 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
582 sudo apt-get
install -y python3-pip
583 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
584 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind
585 sudo apt-get
install -y python3-osm-im python3-osmclient
586 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
587 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
588 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
589 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
590 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
591 echo -e "\nOSM client installed"
592 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
593 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
594 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
595 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
597 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
598 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
599 echo " export OSM_HOSTNAME=<OSM_host>"
604 function install_prometheus_nodeexporter
(){
605 if (systemctl
-q is-active node_exporter
)
607 echo "Node Exporter is already running."
609 echo "Node Exporter is not active, installing..."
610 if getent passwd node_exporter
> /dev
/null
2>&1; then
611 echo "node_exporter user exists"
613 echo "Creating user node_exporter"
614 sudo useradd
--no-create-home --shell /bin
/false node_exporter
616 sudo wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
617 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
618 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
619 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
620 sudo
rm -rf node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
621 sudo
cp ${OSM_DEVOPS}/installers
/docker
/files
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
622 sudo systemctl daemon-reload
623 sudo systemctl restart node_exporter
624 sudo systemctl
enable node_exporter
625 echo "Node Exporter has been activated in this host."
630 function uninstall_prometheus_nodeexporter
(){
631 sudo systemctl stop node_exporter
632 sudo systemctl disable node_exporter
633 sudo
rm /etc
/systemd
/system
/node_exporter.service
634 sudo systemctl daemon-reload
635 sudo userdel node_exporter
636 sudo
rm /usr
/local
/bin
/node_exporter
640 function install_from_lxdimages
(){
641 LXD_RELEASE
=${RELEASE#"-R "}
642 if [ -n "$LXD_REPOSITORY_PATH" ]; then
643 LXD_IMAGE_DIR
="$LXD_REPOSITORY_PATH"
645 LXD_IMAGE_DIR
="$(mktemp -d -q --tmpdir "osmimages.XXXXXX
")"
646 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
648 echo -e "\nDeleting previous lxd images if they exist"
649 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
650 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
651 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
652 echo -e "\nImporting osm-ro"
653 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-ro.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.
tar.gz
654 lxc image import
$LXD_IMAGE_DIR/osm-ro.
tar.gz
--alias osm-ro
655 rm -f $LXD_IMAGE_DIR/osm-ro.
tar.gz
656 echo -e "\nImporting osm-vca"
657 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-vca.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.
tar.gz
658 lxc image import
$LXD_IMAGE_DIR/osm-vca.
tar.gz
--alias osm-vca
659 rm -f $LXD_IMAGE_DIR/osm-vca.
tar.gz
660 echo -e "\nImporting osm-soui"
661 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-soui.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.
tar.gz
662 lxc image import
$LXD_IMAGE_DIR/osm-soui.
tar.gz
--alias osm-soui
663 rm -f $LXD_IMAGE_DIR/osm-soui.
tar.gz
664 launch_container_from_lxd RO osm-ro
666 launch_container_from_lxd VCA osm-vca
667 vca_is_up
&& track VCA
668 launch_container_from_lxd MON osm-mon
669 mon_is_up
&& track MON
670 launch_container_from_lxd SO osm-soui
671 #so_is_up && track SOUI
675 function install_docker_ce
() {
676 # installs and configures Docker CE
677 echo "Installing Docker CE ..."
678 sudo apt-get
-qq update
679 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
680 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
681 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
682 sudo apt-get
-qq update
683 sudo apt-get
install -y docker-ce
684 echo "Adding user to group 'docker'"
685 sudo groupadd
-f docker
686 sudo usermod
-aG docker
$USER
688 sudo service docker restart
689 echo "... restarted Docker service"
690 sg docker
-c "docker version" || FATAL
"Docker installation failed"
691 echo "... Docker CE installation done"
695 function install_docker_compose
() {
696 # installs and configures docker-compose
697 echo "Installing Docker Compose ..."
698 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
699 sudo
chmod +x
/usr
/local
/bin
/docker-compose
700 echo "... Docker Compose installation done"
703 function install_juju
() {
704 echo "Installing juju"
705 sudo snap
install juju
--classic
706 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure
-p medium lxd
707 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
708 echo "Finished installation of juju"
712 function juju_createcontroller
() {
713 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
714 # Not found created, create the controller
715 sg lxd
-c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
717 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
720 function juju_createproxy
() {
721 echo -e "\nChecking required packages: iptables-persistent"
722 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
723 sudo apt-get
-yq install iptables-persistent
725 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
726 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
727 sudo netfilter-persistent save
731 function generate_docker_images
() {
732 echo "Pulling and generating docker images"
733 _build_from
=$COMMIT_ID
734 [ -z "$_build_from" ] && _build_from
="master"
736 echo "OSM Docker images generated from $_build_from"
738 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
739 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
740 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
741 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
743 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
744 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
745 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
748 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
749 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
752 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
753 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
756 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
757 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
760 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
761 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
764 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
765 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
768 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
769 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
772 if [ -n "$PULL_IMAGES" ]; then
773 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
774 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
775 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
776 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
777 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
780 if [ -n "$PULL_IMAGES" ]; then
781 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
782 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
783 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
784 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
785 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
788 if [ -n "$PULL_IMAGES" ]; then
789 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
790 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
791 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
792 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
793 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
794 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
795 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
798 if [ -n "$PULL_IMAGES" ]; then
799 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
800 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
801 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
802 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
803 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
806 if [ -n "$PULL_IMAGES" ]; then
807 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
808 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
809 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
810 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
811 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
814 if [ -n "$PULL_IMAGES" ]; then
815 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
816 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
817 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
818 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
819 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
822 if [ -n "$PULL_IMAGES" ]; then
823 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
824 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
825 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
828 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
829 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
832 echo "Finished generation of docker images"
835 function cmp_overwrite
() {
838 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
839 if [ -f "${file2}" ]; then
840 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
842 cp -b ${file1} ${file2}
847 function generate_docker_env_files() {
848 echo "Doing a backup of existing env files
"
849 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
850 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
851 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
852 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
853 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
854 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
855 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
856 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
857 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
859 echo "Generating docker env files
"
860 if [ -n "$KUBERNETES" ]; then
861 #Kubernetes resources
862 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
865 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
868 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
870 # Grafana & Prometheus Exporter files
871 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
872 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
876 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
877 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
880 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
881 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
883 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
886 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
887 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
889 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
892 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
893 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
895 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
898 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
899 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
901 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
904 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
905 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
907 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
911 MYSQL_ROOT_PASSWORD=$(generate_secret)
912 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
913 echo "MYSQL_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
915 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
916 echo "RO_DB_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
920 KEYSTONE_DB_PASSWORD=$(generate_secret)
921 SERVICE_PASSWORD=$(generate_secret)
922 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
923 echo "MYSQL_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
925 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
926 echo "ROOT_DB_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
927 echo "KEYSTONE_DB_PASSWORD
=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
928 echo "SERVICE_PASSWORD
=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
932 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
933 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD
=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
934 echo "OSMNBI_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
938 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
939 echo "OSMMON_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
940 echo "OSMMON_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/mon
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
943 if ! grep -Fq "OS_NOTIFIER_URI
" $OSM_DOCKER_WORK_DIR/mon.env; then
944 echo "OS_NOTIFIER_URI
=http
://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
946 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.
*|OS_NOTIFIER_URI
=http
://$DEFAULT_IP:8662|g
" $OSM_DOCKER_WORK_DIR/mon.env
949 if ! grep -Fq "OSMMON_VCA_HOST
" $OSM_DOCKER_WORK_DIR/mon.env; then
950 echo "OSMMON_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
952 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.
*|OSMMON_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/mon.env
955 if ! grep -Fq "OSMMON_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/mon.env; then
956 echo "OSMMON_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
958 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.
*|OSMMON_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/mon.env
961 if ! grep -Fq "OSMMON_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/mon.env; then
962 echo "OSMMON_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
964 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.
*|OSMMON_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/mon.env
969 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
970 echo "OSMPOL_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/pol
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
974 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
975 echo "OSMUI_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/lwui
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
978 echo "Finished generation of docker env files
"
981 function generate_osmclient_script () {
982 echo "docker run
-ti --network net
${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
983 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm
"
984 echo "osmclient sidecar container can be found
at: $OSM_DOCKER_WORK_DIR/osm
"
987 #installs kubernetes packages
988 function install_kube() {
989 sudo apt-get update && sudo apt-get install -y apt-transport-https
990 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
991 sudo add-apt-repository "deb https
://apt.kubernetes.io
/ kubernetes-xenial main
"
993 echo "Installing Kubernetes Packages ...
"
994 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
997 #initializes kubernetes control plane
998 function init_kubeadm() {
1000 sudo kubeadm init --config $1
1004 function kube_config_dir() {
1005 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes
"
1006 mkdir -p $HOME/.kube
1007 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
1008 sudo chown $(id -u):$(id -g) $HOME/.kube/config
1011 #deploys flannel as daemonsets
1012 function deploy_cni_provider() {
1013 CNI_DIR="$
(mktemp
-d -q --tmpdir "flannel.XXXXXX")"
1014 trap 'rm -rf "${CNI_DIR}"' EXIT
1015 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
1016 kubectl apply -f $CNI_DIR
1017 [ $? -ne 0 ] && FATAL "Cannot Install Flannel
"
1020 #creates secrets from env files which will be used by containers
1021 function kube_secrets(){
1022 kubectl create ns $OSM_STACK_NAME
1023 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
1024 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
1025 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
1026 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
1027 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
1028 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
1029 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
1030 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
1033 #deploys osm pods and services
1034 function deploy_osm_services() {
1035 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
1036 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1038 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1041 function parse_yaml() {
1042 osm_services="nbi lcm ro pol mon light-ui keystone
"
1044 for osm in $osm_services; do
1045 $WORKDIR_SUDO sed -i "s
/opensourcemano\
/$osm:.
*/opensourcemano\
/$osm:$TAG/g
" $OSM_K8S_WORK_DIR/$osm.yaml
1049 function namespace_vol() {
1050 osm_services="nbi lcm ro pol mon kafka mongo mysql
"
1051 for osm in $osm_services; do
1052 $WORKDIR_SUDO sed -i "s
#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1056 function init_docker_swarm
() {
1057 if [ "${DEFAULT_MTU}" != "1500" ]; then
1058 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1059 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1060 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1062 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1066 function create_docker_network
() {
1067 echo "creating network"
1068 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1069 echo "creating network DONE"
1072 function deploy_lightweight
() {
1074 echo "Deploying lightweight build"
1077 OSM_KEYSTONE_PORT
=5000
1081 OSM_PROM_CADVISOR_PORT
=8080
1082 OSM_PROM_HOSTPORT
=9091
1083 OSM_GRAFANA_PORT
=3000
1084 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
1085 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1087 if [ -n "$NO_HOST_PORTS" ]; then
1088 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
1089 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
1090 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
1091 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
1092 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
1093 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
1094 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
1095 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
1096 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1097 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
1099 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
1100 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
1101 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1102 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
1103 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
1104 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1105 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1106 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1107 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1108 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
1110 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1111 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1112 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1113 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1114 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1115 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1116 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1117 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1118 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1120 pushd $OSM_DOCKER_WORK_DIR
1121 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1124 echo "Finished deployment of lightweight build"
1127 function deploy_elk
() {
1128 echo "Pulling docker images for ELK"
1129 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
1130 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
1131 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
1132 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
1133 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
1134 echo "Finished pulling elk docker images"
1135 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
1136 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
1137 remove_stack osm_elk
1138 echo "Deploying ELK stack"
1139 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1140 echo "Waiting for ELK stack to be up and running"
1145 while [ $time -le $timelength ]; do
1146 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
1153 if [ $elk_is_up -eq 0 ]; then
1154 echo "ELK is up and running. Trying to create index pattern..."
1155 #Create index pattern
1156 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1157 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1158 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
1159 #Make it the default index
1160 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1161 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1162 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
1164 echo "Cannot connect to Kibana to create index pattern."
1165 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1166 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1167 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1168 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1169 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1170 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1171 -d"{\"value\":\"filebeat-*\"}"'
1173 echo "Finished deployment of ELK stack"
1177 function install_lightweight
() {
1178 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1179 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1180 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1181 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1184 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1187 if [ -n "$KUBERNETES" ]; then
1188 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1189 1. Install and configure LXD
1191 3. Install docker CE
1192 4. Disable swap space
1193 5. Install and initialize Kubernetes
1194 as pre-requirements.
1195 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1198 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1202 echo "Installing lightweight build of OSM"
1203 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1204 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1205 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1206 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1207 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1208 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1209 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1211 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1212 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1213 need_packages_lw
="lxd snapd"
1214 echo -e "Checking required packages: $need_packages_lw"
1215 dpkg
-l $need_packages_lw &>/dev
/null \
1216 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1217 || sudo apt-get update \
1218 || FATAL
"failed to run apt-get update"
1219 dpkg
-l $need_packages_lw &>/dev
/null \
1220 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1221 || sudo apt-get
install -y $need_packages_lw \
1222 || FATAL
"failed to install $need_packages_lw"
1226 [ -z "$INSTALL_NOJUJU" ] && install_juju
1229 if [ -z "$OSM_VCA_HOST" ]; then
1230 juju_createcontroller
1231 OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1232 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1234 track juju_controller
1236 if [ -z "$OSM_VCA_SECRET" ]; then
1237 OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1238 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1240 if [ -z "$OSM_VCA_PUBKEY" ]; then
1241 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1242 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1244 if [ -z "$OSM_VCA_CACERT" ]; then
1245 OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r '.controllers["osm"]["ca-cert"]' | base64 |
tr -d \\n
)
1246 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1248 if [ -z "$OSM_VCA_APIPROXY" ]; then
1249 OSM_VCA_APIPROXY
=$DEFAULT_IP
1250 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1255 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1256 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1257 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1260 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1263 #Installs Kubernetes and deploys osm services
1264 if [ -n "$KUBERNETES" ]; then
1267 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1271 #install_docker_compose
1272 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1276 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1279 generate_docker_env_files
1281 if [ -n "$KUBERNETES" ]; then
1282 if [ -n "$K8S_MONITOR" ]; then
1283 # uninstall OSM MONITORING
1284 uninstall_k8s_monitoring
1285 track uninstall_k8s_monitoring
1287 #remove old namespace
1288 remove_k8s_namespace
$OSM_STACK_NAME
1291 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml
$OSM_DOCKER_TAG
1294 track deploy_osm_services_k8s
1295 if [ -n "$K8S_MONITOR" ]; then
1296 # install OSM MONITORING
1297 install_k8s_monitoring
1298 track install_k8s_monitoring
1302 remove_stack
$OSM_STACK_NAME
1303 create_docker_network
1305 generate_osmclient_script
1307 install_prometheus_nodeexporter
1309 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1310 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1313 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1316 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null
1321 function install_vimemu
() {
1322 echo "\nInstalling vim-emu"
1323 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1324 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1325 # clone vim-emu repository (attention: branch is currently master only)
1326 echo "Cloning vim-emu repository ..."
1327 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1328 # build vim-emu docker
1329 echo "Building vim-emu Docker container..."
1331 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1332 # start vim-emu container as daemon
1333 echo "Starting vim-emu Docker container 'vim-emu' ..."
1334 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1335 # in lightweight mode, the emulator needs to be attached to netOSM
1336 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1338 # classic build mode
1339 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1341 echo "Waiting for 'vim-emu' container to start ..."
1343 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1344 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1345 # print vim-emu connection info
1346 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1347 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1348 echo -e "To add the emulated VIM to OSM you should do:"
1349 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1352 function install_k8s_monitoring
() {
1353 # install OSM monitoring
1354 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1355 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1358 function uninstall_k8s_monitoring
() {
1359 # install OSM monitoring
1360 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1363 function dump_vars
(){
1364 echo "DEVELOP=$DEVELOP"
1365 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1366 echo "UNINSTALL=$UNINSTALL"
1368 echo "UPDATE=$UPDATE"
1369 echo "RECONFIGURE=$RECONFIGURE"
1370 echo "TEST_INSTALLER=$TEST_INSTALLER"
1371 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1372 echo "INSTALL_LXD=$INSTALL_LXD"
1373 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1374 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1375 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1376 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1377 echo "INSTALL_ONLY=$INSTALL_ONLY"
1378 echo "INSTALL_ELK=$INSTALL_ELK"
1379 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1380 echo "TO_REBUILD=$TO_REBUILD"
1381 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1382 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1383 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1384 echo "RELEASE=$RELEASE"
1385 echo "REPOSITORY=$REPOSITORY"
1386 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1387 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1388 echo "NOCONFIGURE=$NOCONFIGURE"
1389 echo "OSM_DEVOPS=$OSM_DEVOPS"
1390 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1391 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1392 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1393 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1394 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1395 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1396 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1397 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1398 echo "DOCKER_USER=$DOCKER_USER"
1399 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1400 echo "PULL_IMAGES=$PULL_IMAGES"
1401 echo "KUBERNETES=$KUBERNETES"
1402 echo "SHOWOPTS=$SHOWOPTS"
1403 echo "Install from specific refspec (-b): $COMMIT_ID"
1408 duration
=$
((ctime
- SESSION_ID
))
1409 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1410 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1412 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1413 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1414 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1415 event_name
="${event_name}_$1"
1416 url
="${url}&event=${event_name}&ce_duration=${duration}"
1417 wget
-q -O /dev
/null
$url
1430 INSTALL_FROM_SOURCE
=""
1431 RELEASE
="ReleaseSEVEN"
1434 INSTALL_FROM_LXDIMAGES
=""
1435 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1436 LXD_REPOSITORY_PATH
=""
1437 INSTALL_LIGHTWEIGHT
="y"
1446 INSTALL_NOHOSTCLIENT
=""
1449 SESSION_ID
=`date +%s`
1457 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1458 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1460 OSM_WORK_DIR
="/etc/osm"
1461 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1462 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1463 OSM_HOST_VOL
="/var/lib/osm"
1464 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1465 OSM_DOCKER_TAG
=latest
1466 DOCKER_USER
=opensourcemano
1468 KAFKA_TAG
=2.11-1.0
.2
1469 PROMETHEUS_TAG
=v2.4
.3
1471 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1472 PROMETHEUS_CADVISOR_TAG
=latest
1474 OSM_DATABASE_COMMONKEY
=
1475 ELASTIC_VERSION
=6.4.2
1476 ELASTIC_CURATOR_VERSION
=5.5.4
1477 POD_NETWORK_CIDR
=10.244.0.0/16
1478 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1479 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1481 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o
; do
1491 REPOSITORY
="${OPTARG}"
1492 REPO_ARGS
+=(-r "$REPOSITORY")
1495 [ "${OPTARG}" == "swarm" ] && continue
1496 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1497 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1502 REPO_ARGS
+=(-R "$RELEASE")
1505 REPOSITORY_KEY
="${OPTARG}"
1506 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1509 REPOSITORY_BASE
="${OPTARG}"
1510 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1513 DOCKER_USER
="${OPTARG}"
1516 LXD_REPOSITORY_BASE
="${OPTARG}"
1519 LXD_REPOSITORY_PATH
="${OPTARG}"
1522 OSM_DEVOPS
="${OPTARG}"
1525 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1528 OSM_VCA_HOST
="${OPTARG}"
1531 OSM_VCA_SECRET
="${OPTARG}"
1534 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1537 OSM_VCA_APIPROXY
="${OPTARG}"
1540 # when specifying workdir, do not use sudo for access
1542 OSM_WORK_DIR
="${OPTARG}"
1545 OSM_DOCKER_TAG
="${OPTARG}"
1549 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1550 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1553 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1554 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1555 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1556 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1557 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1558 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1559 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1560 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1561 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1562 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1563 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1564 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1565 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1568 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1569 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1570 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1571 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1572 [ "${OPTARG}" == "nat" ] && NAT
="y" && continue
1573 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1574 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1575 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1576 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1577 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1578 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1579 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES
="y" && continue
1580 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1581 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT
="" && RELEASE
="-R ReleaseTHREE" && REPOSITORY
="-r stable" && continue
1582 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1583 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1584 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE
="y" && continue
1585 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1586 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY
="y" && continue
1587 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1588 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1589 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1590 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1591 [ "${OPTARG}" == "pullimages" ] && continue
1592 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR
="y" && continue
1593 echo -e "Invalid option: '--$OPTARG'\n" >&2
1597 echo -e "Invalid option: '-$OPTARG'\n" >&2
1609 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --lxd can only be used with --soui"
1610 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --nat can only be used with --soui"
1611 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --noconfigure can only be used with --soui"
1612 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --daily can only be used with --soui"
1613 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nolxd cannot be used with --soui"
1614 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nodocker cannot be used with --soui"
1615 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: -m cannot be used with --soui"
1616 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1618 if [ -n "$SHOWOPTS" ]; then
1623 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE
="-R ReleaseTHREE-daily" && REPOSITORY
="-r testing" && COMMIT_ID
="master"
1625 # if develop, we force master
1626 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1628 need_packages
="git jq wget curl tar"
1629 echo -e "Checking required packages: $need_packages"
1630 dpkg
-l $need_packages &>/dev
/null \
1631 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1632 || sudo apt-get update \
1633 || FATAL
"failed to run apt-get update"
1634 dpkg
-l $need_packages &>/dev
/null \
1635 ||
! echo -e "Installing $need_packages requires root privileges." \
1636 || sudo apt-get
install -y $need_packages \
1637 || FATAL
"failed to install $need_packages"
1639 if [ -z "$OSM_DEVOPS" ]; then
1640 if [ -n "$TEST_INSTALLER" ]; then
1641 echo -e "\nUsing local devops repo for OSM installation"
1642 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1644 echo -e "\nCreating temporary dir for OSM installation"
1645 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1646 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1648 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1650 if [ -z "$COMMIT_ID" ]; then
1651 echo -e "\nGuessing the current stable release"
1652 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1653 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1655 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1656 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1658 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1660 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1664 .
$OSM_DEVOPS/common
/all_funcs
1666 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1667 [ -n "$UNINSTALL" ] && uninstall
&& echo -e "\nDONE" && exit 0
1668 [ -n "$NAT" ] && nat
&& echo -e "\nDONE" && exit 0
1669 [ -n "$UPDATE" ] && update
&& echo -e "\nDONE" && exit 0
1670 [ -n "$RECONFIGURE" ] && configure
&& echo -e "\nDONE" && exit 0
1671 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1672 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1673 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1674 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1676 #Installation starts here
1677 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README.txt
&> /dev
/null
1680 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1681 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1682 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1683 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1686 echo -e "Checking required packages: lxd"
1687 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1688 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1690 # use local devops for containers
1691 export OSM_USE_LOCAL_DEVOPS
=true
1692 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1693 echo -e "\nCreating the containers and building from source ..."
1694 $OSM_DEVOPS/jenkins
/host
/start_build RO
--notest checkout
$COMMIT_ID || FATAL
"RO container build failed (refspec: '$COMMIT_ID')"
1695 ro_is_up
&& track RO
1696 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA container build failed"
1697 vca_is_up
&& track VCA
1698 $OSM_DEVOPS/jenkins
/host
/start_build MON || FATAL
"MON install failed"
1699 mon_is_up
&& track MON
1700 $OSM_DEVOPS/jenkins
/host
/start_build SO checkout
$COMMIT_ID || FATAL
"SO container build failed (refspec: '$COMMIT_ID')"
1701 $OSM_DEVOPS/jenkins
/host
/start_build UI checkout
$COMMIT_ID || FATAL
"UI container build failed (refspec: '$COMMIT_ID')"
1702 #so_is_up && track SOUI
1704 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1705 echo -e "\nInstalling from lxd images ..."
1706 install_from_lxdimages
1707 else #install from binaries
1708 echo -e "\nCreating the containers and installing from binaries ..."
1709 $OSM_DEVOPS/jenkins
/host
/install RO
${REPO_ARGS[@]} || FATAL
"RO install failed"
1710 ro_is_up
&& track RO
1711 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA install failed"
1712 vca_is_up
&& track VCA
1713 $OSM_DEVOPS/jenkins
/host
/install MON || FATAL
"MON build failed"
1714 mon_is_up
&& track MON
1715 $OSM_DEVOPS/jenkins
/host
/install SO
${REPO_ARGS[@]} || FATAL
"SO install failed"
1716 $OSM_DEVOPS/jenkins
/host
/install UI
${REPO_ARGS[@]} || FATAL
"UI install failed"
1717 #so_is_up && track SOUI
1721 #Install iptables-persistent and configure NAT rules
1722 [ -z "$NOCONFIGURE" ] && nat
1724 #Configure components
1725 [ -z "$NOCONFIGURE" ] && configure
1728 [ -z "$NOCONFIGURE" ] && install_osmclient
1730 #Install vim-emu (optional)
1731 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1733 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null