2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
54 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
57 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
60 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
61 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
62 echo -e " --showopts: print chosen options and exit (only for debugging)"
63 echo -e " -y: do not prompt for confirmation, assumes yes"
64 echo -e " -h / --help: print this help"
67 #Uninstall OSM: remove containers
69 echo -e "\nUninstalling OSM"
70 if [ $RC_CLONE ] ||
[ -n "$TEST_INSTALLER" ]; then
71 $OSM_DEVOPS/jenkins
/host
/clean_container RO
72 $OSM_DEVOPS/jenkins
/host
/clean_container VCA
73 $OSM_DEVOPS/jenkins
/host
/clean_container MON
74 $OSM_DEVOPS/jenkins
/host
/clean_container SO
75 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 lxc stop RO
&& lxc delete RO
78 lxc stop VCA
&& lxc delete VCA
79 lxc stop MON
&& lxc delete MON
80 lxc stop SO-ub
&& lxc delete SO-ub
82 echo -e "\nDeleting imported lxd images if they exist"
83 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
84 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
85 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
89 # takes a juju/accounts.yaml file and returns the password specific
90 # for a controller. I wrote this using only bash tools to minimize
91 # additions of other packages
92 function parse_juju_password
{
93 password_file
="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name
=$1
95 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller
=$controller_name '{
100 indent = length($1)/2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
112 function generate_secret
() {
113 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
116 function remove_volumes
() {
117 if [ -n "$KUBERNETES" ]; then
119 echo "Removing ${k8_volume}"
120 $WORKDIR_SUDO rm -rf ${k8_volume}
123 volumes
="mongo_db mon_db osm_packages ro_db"
124 for volume
in $volumes; do
125 sg docker
-c "docker volume rm ${stack}_${volume}"
130 function remove_network
() {
132 sg docker
-c "docker network rm net${stack}"
135 function remove_iptables
() {
137 if [ -z "$OSM_VCA_HOST" ]; then
138 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
139 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
142 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
143 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
144 sudo netfilter-persistent save
148 function remove_stack
() {
150 if sg docker
-c "docker stack ps ${stack}" ; then
151 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
154 while [ ${COUNTER} -lt 30 ]; do
155 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
156 #echo "Dockers running: $result"
157 if [ "${result}" == "0" ]; then
160 let COUNTER
=COUNTER
+1
163 if [ "${result}" == "0" ]; then
164 echo "All dockers of the stack ${stack} were removed"
166 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 #removes osm deployments and services
173 function remove_k8s_namespace
() {
177 #Uninstall lightweight OSM: remove dockers
178 function uninstall_lightweight
() {
179 if [ -n "$INSTALL_ONLY" ]; then
180 if [ -n "$INSTALL_ELK" ]; then
181 echo -e "\nUninstalling OSM ELK stack"
183 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
186 echo -e "\nUninstalling OSM"
187 if [ -n "$KUBERNETES" ]; then
188 remove_k8s_namespace
$OSM_STACK_NAME
190 remove_stack
$OSM_STACK_NAME
193 echo "Now osm docker images and volumes will be deleted"
194 newgrp docker
<< EONG
195 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
196 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
197 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
198 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
199 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
200 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
201 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
205 if [ -n "$KUBERNETES" ]; then
206 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
207 remove_volumes
$OSM_NAMESPACE_VOL
209 remove_volumes
$OSM_STACK_NAME
210 remove_network
$OSM_STACK_NAME
212 remove_iptables
$OSM_STACK_NAME
213 echo "Removing $OSM_DOCKER_WORK_DIR"
214 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
215 sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
217 echo "Some docker images will be kept in case they are used by other docker stacks"
218 echo "To remove them, just run 'docker image prune' in a terminal"
222 #Configure NAT rules, based on the current IP addresses of containers
224 echo -e "\nChecking required packages: iptables-persistent"
225 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
226 sudo DEBIAN_FRONTEND
=noninteractive apt-get
-yq install iptables-persistent
227 echo -e "\nConfiguring NAT rules"
228 echo -e " Required root privileges"
229 sudo
$OSM_DEVOPS/installers
/nat_osm
233 echo "FATAL error: Cannot install OSM due to \"$1\""
237 #Update RO, SO and UI:
239 echo -e "\nUpdating components"
241 echo -e " Updating RO"
244 INSTALL_FOLDER
="/opt/openmano"
245 echo -e " Fetching the repo"
246 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
248 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
249 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
250 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
251 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
252 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
253 # COMMIT_ID either was previously set with -b option, or is an empty string
254 CHECKOUT_ID
=$COMMIT_ID
255 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
256 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
257 if [[ $CHECKOUT_ID == "tags/"* ]]; then
258 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
260 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
262 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
263 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
264 echo " Nothing to be done."
266 echo " Update required."
267 lxc
exec $CONTAINER -- service osm-ro stop
268 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash
269 lxc
exec $CONTAINER -- git
-C /opt
/openmano pull
--rebase
270 lxc
exec $CONTAINER -- git
-C /opt
/openmano checkout
$CHECKOUT_ID
271 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash pop
272 lxc
exec $CONTAINER -- /opt
/openmano
/database_utils
/migrate_mano_db.sh
273 lxc
exec $CONTAINER -- service osm-ro start
277 echo -e " Updating SO and UI"
280 INSTALL_FOLDER
="" # To be filled in
281 echo -e " Fetching the repo"
282 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
284 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
285 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
286 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
287 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
288 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
289 # COMMIT_ID either was previously set with -b option, or is an empty string
290 CHECKOUT_ID
=$COMMIT_ID
291 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
292 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
293 if [[ $CHECKOUT_ID == "tags/"* ]]; then
294 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
296 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
298 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
299 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
300 echo " Nothing to be done."
302 echo " Update required."
303 # Instructions to be added
304 # lxc exec SO-ub -- ...
307 echo -e "Updating MON Container"
310 INSTALL_FOLDER
="/root/MON"
311 echo -e " Fetching the repo"
312 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
314 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
315 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
316 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
317 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
318 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
319 # COMMIT_ID either was previously set with -b option, or is an empty string
320 CHECKOUT_ID
=$COMMIT_ID
321 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
322 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
323 if [[ $CHECKOUT_ID == "tags/"* ]]; then
324 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
326 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
328 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
329 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
330 echo " Nothing to be done."
332 echo " Update required."
337 function so_is_up
() {
341 SO_IP
=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
346 while [ $time -le $timelength ]
348 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
349 -H 'accept: application/vnd.yang.data+json' \
350 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
351 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
353 echo "RW.Restconf running....SO is up"
362 FATAL
"OSM Failed to startup. SO failed to startup"
365 function vca_is_up
() {
366 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
367 echo "VCA is up and running"
371 FATAL
"OSM Failed to startup. VCA failed to startup"
374 function mon_is_up
() {
375 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
376 echo "MON is up and running"
380 FATAL
"OSM Failed to startup. MON failed to startup"
383 function ro_is_up
() {
387 RO_IP
=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
392 while [ $time -le $timelength ]; do
393 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
394 echo "RO is up and running"
402 FATAL
"OSM Failed to startup. RO failed to startup"
406 function configure_RO
(){
407 .
$OSM_DEVOPS/installers
/export_ips
408 echo -e " Configuring RO"
409 lxc
exec RO
-- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc
/osm
/openmanod.cfg
410 lxc
exec RO
-- service osm-ro restart
414 lxc
exec RO
-- openmano tenant-delete
-f osm
>/dev
/null
415 lxc
exec RO
-- openmano tenant-create osm
> /dev
/null
416 lxc
exec RO
-- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
417 lxc
exec RO
-- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
418 lxc
exec RO
-- sh
-c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
421 function configure_VCA
(){
422 echo -e " Configuring VCA"
423 JUJU_PASSWD
=$
(generate_secret
)
424 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc
exec VCA
-- juju change-user-password
427 function configure_SOUI
(){
428 .
$OSM_DEVOPS/installers
/export_ips
429 JUJU_CONTROLLER_IP
=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
430 RO_TENANT_ID
=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
432 echo -e " Configuring MON"
433 #Information to be added about SO socket for logging
435 echo -e " Configuring SO"
436 sudo route add
-host $JUJU_CONTROLLER_IP gw
$VCA_CONTAINER_IP
437 sudo ip route add
10.44.127.0/24 via
$VCA_CONTAINER_IP
438 sudo
sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc
/rc.
local
439 sudo
sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc
/rc.
local
440 # make journaling persistent
441 lxc
exec SO-ub
-- mkdir
-p /var
/log
/journal
442 lxc
exec SO-ub
-- systemd-tmpfiles
--create --prefix /var
/log
/journal
443 lxc
exec SO-ub
-- systemctl restart systemd-journald
445 echo RIFT_EXTERNAL_ADDRESS
=$DEFAULT_IP | lxc
exec SO-ub
-- tee -a /usr
/rift
/etc
/default
/launchpad
447 lxc
exec SO-ub
-- systemctl restart launchpad
449 so_is_up
$SO_CONTAINER_IP
451 #delete existing config agent (could be there on reconfigure)
452 curl
-k --request DELETE \
453 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent
/account
/osmjuju \
454 --header 'accept: application/vnd.yang.data+json' \
455 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
456 --header 'cache-control: no-cache' \
457 --header 'content-type: application/vnd.yang.data+json' &> /dev
/null
459 result
=$
(curl
-k --request POST \
460 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent \
461 --header 'accept: application/vnd.yang.data+json' \
462 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
463 --header 'cache-control: no-cache' \
464 --header 'content-type: application/vnd.yang.data+json' \
465 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
466 [[ $result =~ .
*success.
* ]] || FATAL
"Failed config-agent configuration: $result"
469 #result=$(curl -k --request PUT \
470 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
471 # --header 'accept: application/vnd.yang.data+json' \
472 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
473 # --header 'cache-control: no-cache' \
474 # --header 'content-type: application/vnd.yang.data+json' \
475 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
477 result
=$
(curl
-k --request PUT \
478 --url https
://$SO_CONTAINER_IP:8008/api
/config
/project
/default
/ro-account
/account \
479 --header 'accept: application/vnd.yang.data+json' \
480 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
481 --header 'cache-control: no-cache' \
482 --header 'content-type: application/vnd.yang.data+json' \
483 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
484 [[ $result =~ .
*success.
* ]] || FATAL
"Failed resource-orchestrator configuration: $result"
486 result
=$
(curl
-k --request PATCH \
487 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/redirect-uri \
488 --header 'accept: application/vnd.yang.data+json' \
489 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
490 --header 'cache-control: no-cache' \
491 --header 'content-type: application/vnd.yang.data+json' \
492 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
493 [[ $result =~ .
*success.
* ]] || FATAL
"Failed redirect-uri configuration: $result"
495 result
=$
(curl
-k --request PATCH \
496 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/post-logout-redirect-uri \
497 --header 'accept: application/vnd.yang.data+json' \
498 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
499 --header 'cache-control: no-cache' \
500 --header 'content-type: application/vnd.yang.data+json' \
501 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
502 [[ $result =~ .
*success.
* ]] || FATAL
"Failed post-logout-redirect-uri configuration: $result"
504 lxc
exec SO-ub
-- tee /etc
/network
/interfaces.d
/60-rift.cfg
<<EOF
506 iface lo:1 inet static
508 netmask 255.255.255.255
510 lxc
exec SO-ub ifup lo
:1
513 #Configure RO, VCA, and SO with the initial configuration:
514 # RO -> tenant:osm, logs to be sent to SO
515 # VCA -> juju-password
516 # SO -> route to Juju Controller, add RO account, add VCA account
517 function configure
(){
518 #Configure components
519 echo -e "\nConfiguring components"
525 function install_lxd
() {
527 sudo apt-get
install -y lxd
531 lxc network create lxdbr0 ipv4.address
=auto ipv4.nat
=true ipv6.address
=none ipv6.nat
=false
532 DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8}')
533 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
534 lxc profile device
set default eth0 mtu
$DEFAULT_MTU
535 #sudo systemctl stop lxd-bridge
536 #sudo systemctl --system daemon-reload
537 #sudo systemctl enable lxd-bridge
538 #sudo systemctl start lxd-bridge
542 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
543 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
544 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
545 read -e -p "$1" USER_CONFIRMATION
547 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
548 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
549 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
550 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
551 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
555 function launch_container_from_lxd
(){
558 export OSM_BASE_IMAGE
=$2
559 if ! container_exists
$OSM_BUILD_CONTAINER; then
561 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.privileged=true"
562 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.nesting=true"
563 create_container
$OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
564 wait_container_up
$OSM_BUILD_CONTAINER
568 function install_osmclient
(){
569 CLIENT_RELEASE
=${RELEASE#"-R "}
570 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
571 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
572 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
573 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
574 curl
$key_location | sudo apt-key add
-
575 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
577 sudo apt-get
install -y python3-pip
578 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
579 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind
580 sudo apt-get
install -y python3-osm-im python3-osmclient
581 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
582 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
583 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
584 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
585 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
586 echo -e "\nOSM client installed"
587 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
588 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
589 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
590 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
592 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
593 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
594 echo " export OSM_HOSTNAME=<OSM_host>"
599 function install_prometheus_nodeexporter
(){
600 sudo useradd
--no-create-home --shell /bin
/false node_exporter
601 sudo wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
602 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
603 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
604 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
605 sudo
rm -rf node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
606 sudo
cp ${OSM_DEVOPS}/installers
/docker
/files
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
607 sudo systemctl daemon-reload
608 sudo systemctl restart node_exporter
609 sudo systemctl
enable node_exporter
613 function install_from_lxdimages
(){
614 LXD_RELEASE
=${RELEASE#"-R "}
615 if [ -n "$LXD_REPOSITORY_PATH" ]; then
616 LXD_IMAGE_DIR
="$LXD_REPOSITORY_PATH"
618 LXD_IMAGE_DIR
="$(mktemp -d -q --tmpdir "osmimages.XXXXXX
")"
619 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
621 echo -e "\nDeleting previous lxd images if they exist"
622 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
623 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
624 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
625 echo -e "\nImporting osm-ro"
626 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-ro.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.
tar.gz
627 lxc image import
$LXD_IMAGE_DIR/osm-ro.
tar.gz
--alias osm-ro
628 rm -f $LXD_IMAGE_DIR/osm-ro.
tar.gz
629 echo -e "\nImporting osm-vca"
630 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-vca.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.
tar.gz
631 lxc image import
$LXD_IMAGE_DIR/osm-vca.
tar.gz
--alias osm-vca
632 rm -f $LXD_IMAGE_DIR/osm-vca.
tar.gz
633 echo -e "\nImporting osm-soui"
634 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-soui.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.
tar.gz
635 lxc image import
$LXD_IMAGE_DIR/osm-soui.
tar.gz
--alias osm-soui
636 rm -f $LXD_IMAGE_DIR/osm-soui.
tar.gz
637 launch_container_from_lxd RO osm-ro
639 launch_container_from_lxd VCA osm-vca
640 vca_is_up
&& track VCA
641 launch_container_from_lxd MON osm-mon
642 mon_is_up
&& track MON
643 launch_container_from_lxd SO osm-soui
644 #so_is_up && track SOUI
648 function install_docker_ce
() {
649 # installs and configures Docker CE
650 echo "Installing Docker CE ..."
651 sudo apt-get
-qq update
652 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
653 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
654 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
655 sudo apt-get
-qq update
656 sudo apt-get
install -y docker-ce
657 echo "Adding user to group 'docker'"
658 sudo groupadd
-f docker
659 sudo usermod
-aG docker
$USER
661 sudo service docker restart
662 echo "... restarted Docker service"
663 sg docker
-c "docker version" || FATAL
"Docker installation failed"
664 echo "... Docker CE installation done"
668 function install_docker_compose
() {
669 # installs and configures docker-compose
670 echo "Installing Docker Compose ..."
671 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
672 sudo
chmod +x
/usr
/local
/bin
/docker-compose
673 echo "... Docker Compose installation done"
676 function install_juju
() {
677 echo "Installing juju"
678 sudo snap
install juju
--classic
679 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure
-p medium lxd
680 echo "Finished installation of juju"
684 function juju_createcontroller
() {
685 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
686 # Not found created, create the controller
687 sg lxd
-c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
689 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
692 function juju_createproxy
() {
693 echo -e "\nChecking required packages: iptables-persistent"
694 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
695 sudo DEBIAN_FRONTEND
=noninteractive apt-get
-yq install iptables-persistent
697 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
698 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
699 sudo netfilter-persistent save
703 function generate_docker_images
() {
704 echo "Pulling and generating docker images"
705 _build_from
=$COMMIT_ID
706 [ -z "$_build_from" ] && _build_from
="master"
708 echo "OSM Docker images generated from $_build_from"
710 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
711 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
712 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
713 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
715 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
716 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
717 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
720 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
721 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
724 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
725 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
728 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
729 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
732 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
733 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
736 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
737 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
740 if [ -n "$PULL_IMAGES" ]; then
741 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
742 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
743 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
744 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
745 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
748 if [ -n "$PULL_IMAGES" ]; then
749 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
750 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
751 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
752 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
753 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
756 if [ -n "$PULL_IMAGES" ]; then
757 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
758 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
759 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
760 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
761 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
762 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
763 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
766 if [ -n "$PULL_IMAGES" ]; then
767 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
768 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
769 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
770 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
771 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
774 if [ -n "$PULL_IMAGES" ]; then
775 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
776 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
777 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
778 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
779 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
782 if [ -n "$PULL_IMAGES" ]; then
783 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
784 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
785 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
786 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
787 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
790 if [ -n "$PULL_IMAGES" ]; then
791 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
792 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
793 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
796 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
797 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
800 echo "Finished generation of docker images"
803 function cmp_overwrite
() {
806 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
807 if [ -f "${file2}" ]; then
808 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
810 cp -b ${file1} ${file2}
815 function generate_docker_env_files() {
816 echo "Doing a backup of existing env files
"
817 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
818 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
819 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
820 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
821 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
822 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
823 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
824 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
825 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
827 echo "Generating docker env files
"
828 if [ -n "$KUBERNETES" ]; then
829 #Kubernetes resources
830 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
833 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
836 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
838 # Grafana & Prometheus Exporter files
839 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
840 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
844 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
845 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
848 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
849 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
851 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
854 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
855 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
857 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
860 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
861 echo "OSMLCM_VCA_PUBKEY
=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
863 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=\"${OSM_VCA_PUBKEY}\"|g
" $OSM_DOCKER_WORK_DIR/lcm.env
866 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
867 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
869 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
872 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
873 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
875 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
879 MYSQL_ROOT_PASSWORD=$(generate_secret)
880 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
881 echo "MYSQL_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
883 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
884 echo "RO_DB_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
888 KEYSTONE_DB_PASSWORD=$(generate_secret)
889 SERVICE_PASSWORD=$(generate_secret)
890 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
891 echo "MYSQL_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
893 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
894 echo "ROOT_DB_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
895 echo "KEYSTONE_DB_PASSWORD
=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
896 echo "SERVICE_PASSWORD
=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
900 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
901 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD
=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
902 echo "OSMNBI_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
906 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
907 echo "OSMMON_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
908 echo "OSMMON_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/mon
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
911 if ! grep -Fq "OS_NOTIFIER_URI
" $OSM_DOCKER_WORK_DIR/mon.env; then
912 echo "OS_NOTIFIER_URI
=http
://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
914 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.
*|OS_NOTIFIER_URI
=http
://$DEFAULT_IP:8662|g
" $OSM_DOCKER_WORK_DIR/mon.env
917 if ! grep -Fq "OSMMON_VCA_HOST
" $OSM_DOCKER_WORK_DIR/mon.env; then
918 echo "OSMMON_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
920 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.
*|OSMMON_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/mon.env
923 if ! grep -Fq "OSMMON_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/mon.env; then
924 echo "OSMMON_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
926 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.
*|OSMMON_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/mon.env
930 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
931 echo "OSMPOL_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/pol
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
935 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
936 echo "OSMUI_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/lwui
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
939 echo "Finished generation of docker env files
"
942 function generate_osmclient_script () {
943 echo "docker run
-ti --network net
${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
944 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm
"
945 echo "osmclient sidecar container can be found
at: $OSM_DOCKER_WORK_DIR/osm
"
948 #installs kubernetes packages
949 function install_kube() {
950 sudo apt-get update && sudo apt-get install -y apt-transport-https
951 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
952 sudo add-apt-repository "deb https
://apt.kubernetes.io
/ kubernetes-xenial main
"
954 echo "Installing Kubernetes Packages ...
"
955 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
958 #initializes kubernetes control plane
959 function init_kubeadm() {
961 sudo kubeadm init --config $1
965 function kube_config_dir() {
966 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes
"
968 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
969 sudo chown $(id -u):$(id -g) $HOME/.kube/config
972 #deploys flannel as daemonsets
973 function deploy_cni_provider() {
974 CNI_DIR="$
(mktemp
-d -q --tmpdir "flannel.XXXXXX")"
975 trap 'rm -rf "${CNI_DIR}"' EXIT
976 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
977 kubectl apply -f $CNI_DIR
978 [ $? -ne 0 ] && FATAL "Cannot Install Flannel
"
981 #creates secrets from env files which will be used by containers
982 function kube_secrets(){
983 kubectl create ns $OSM_STACK_NAME
984 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
985 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
986 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
987 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
988 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
989 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
990 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
991 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
994 #deploys osm pods and services
995 function deploy_osm_services() {
996 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
997 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
999 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1002 function parse_yaml() {
1003 osm_services="nbi lcm ro pol mon light-ui keystone
"
1005 for osm in $osm_services; do
1006 $WORKDIR_SUDO sed -i "s
/opensourcemano\
/$osm:.
*/opensourcemano\
/$osm:$TAG/g
" $OSM_K8S_WORK_DIR/$osm.yaml
1010 function namespace_vol() {
1011 osm_services="nbi lcm ro pol mon kafka mongo mysql
"
1012 for osm in $osm_services; do
1013 $WORKDIR_SUDO sed -i "s
#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1017 function init_docker_swarm
() {
1018 if [ "${DEFAULT_MTU}" != "1500" ]; then
1019 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1020 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1021 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1023 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1027 function create_docker_network
() {
1028 echo "creating network"
1029 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1030 echo "creating network DONE"
1033 function deploy_lightweight
() {
1035 echo "Deploying lightweight build"
1038 OSM_KEYSTONE_PORT
=5000
1042 OSM_PROM_HOSTPORT
=9091
1043 OSM_GRAFANA_PORT
=3000
1044 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
1045 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1047 if [ -n "$NO_HOST_PORTS" ]; then
1048 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
1049 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
1050 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
1051 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
1052 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
1053 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
1054 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
1055 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1056 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
1058 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
1059 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
1060 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1061 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
1062 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
1063 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1064 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1065 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1066 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
1068 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1069 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1070 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1071 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1072 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1073 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1074 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1075 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1076 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1078 pushd $OSM_DOCKER_WORK_DIR
1079 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1082 echo "Finished deployment of lightweight build"
1085 function deploy_elk
() {
1086 echo "Pulling docker images for ELK"
1087 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
1088 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
1089 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
1090 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
1091 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
1092 echo "Finished pulling elk docker images"
1093 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
1094 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
1095 remove_stack osm_elk
1096 echo "Deploying ELK stack"
1097 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1098 echo "Waiting for ELK stack to be up and running"
1103 while [ $time -le $timelength ]; do
1104 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
1111 if [ $elk_is_up -eq 0 ]; then
1112 echo "ELK is up and running. Trying to create index pattern..."
1113 #Create index pattern
1114 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1115 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1116 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
1117 #Make it the default index
1118 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1119 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1120 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
1122 echo "Cannot connect to Kibana to create index pattern."
1123 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1124 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1125 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1126 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1127 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1128 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1129 -d"{\"value\":\"filebeat-*\"}"'
1131 echo "Finished deployment of ELK stack"
1135 function install_lightweight
() {
1136 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1137 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1138 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1139 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1142 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1145 if [ -n "$KUBERNETES" ]; then
1146 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1147 1. Install and configure LXD
1149 3. Install docker CE
1150 4. Disable swap space
1151 5. Install and initialize Kubernetes
1152 as pre-requirements.
1153 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1156 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1160 echo "Installing lightweight build of OSM"
1161 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1162 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1163 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1164 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1165 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1166 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1167 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1169 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1170 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1171 need_packages_lw
="lxd snapd"
1172 echo -e "Checking required packages: $need_packages_lw"
1173 dpkg
-l $need_packages_lw &>/dev
/null \
1174 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1175 || sudo apt-get update \
1176 || FATAL
"failed to run apt-get update"
1177 dpkg
-l $need_packages_lw &>/dev
/null \
1178 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1179 || sudo apt-get
install -y $need_packages_lw \
1180 || FATAL
"failed to install $need_packages_lw"
1184 [ -z "$INSTALL_NOJUJU" ] && install_juju
1187 if [ -z "$OSM_VCA_HOST" ]; then
1188 juju_createcontroller
1189 OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1190 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1192 track juju_controller
1194 if [ -z "$OSM_VCA_SECRET" ]; then
1195 OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1196 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1198 if [ -z "$OSM_VCA_PUBKEY" ]; then
1199 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1200 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1202 if [ -z "$OSM_VCA_APIPROXY" ]; then
1203 OSM_VCA_APIPROXY
=$DEFAULT_IP
1204 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1208 if [ -z "$OSM_VCA_CACERT" ]; then
1209 OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r '.controllers["osm"]["ca-cert"]' | base64 |
tr -d \\n
)
1210 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1212 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1213 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1214 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1218 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1221 #Installs Kubernetes and deploys osm services
1222 if [ -n "$KUBERNETES" ]; then
1225 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1229 #install_docker_compose
1230 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1234 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1237 generate_docker_env_files
1239 if [ -n "$KUBERNETES" ]; then
1240 #remove old namespace
1241 remove_k8s_namespace
$OSM_STACK_NAME
1244 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml
$OSM_DOCKER_TAG
1247 track deploy_osm_services_k8s
1250 remove_stack
$OSM_STACK_NAME
1251 create_docker_network
1253 generate_osmclient_script
1255 install_prometheus_nodeexporter
1256 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1257 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1260 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1263 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-6.0
-six/README2.txt
&> /dev
/null
1268 function install_vimemu
() {
1269 echo "\nInstalling vim-emu"
1270 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1271 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1272 # clone vim-emu repository (attention: branch is currently master only)
1273 echo "Cloning vim-emu repository ..."
1274 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1275 # build vim-emu docker
1276 echo "Building vim-emu Docker container..."
1278 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1279 # start vim-emu container as daemon
1280 echo "Starting vim-emu Docker container 'vim-emu' ..."
1281 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1282 # in lightweight mode, the emulator needs to be attached to netOSM
1283 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1285 # classic build mode
1286 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1288 echo "Waiting for 'vim-emu' container to start ..."
1290 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1291 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1292 # print vim-emu connection info
1293 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1294 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1295 echo -e "To add the emulated VIM to OSM you should do:"
1296 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1299 function dump_vars
(){
1300 echo "DEVELOP=$DEVELOP"
1301 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1302 echo "UNINSTALL=$UNINSTALL"
1304 echo "UPDATE=$UPDATE"
1305 echo "RECONFIGURE=$RECONFIGURE"
1306 echo "TEST_INSTALLER=$TEST_INSTALLER"
1307 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1308 echo "INSTALL_LXD=$INSTALL_LXD"
1309 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1310 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1311 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1312 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1313 echo "INSTALL_ONLY=$INSTALL_ONLY"
1314 echo "INSTALL_ELK=$INSTALL_ELK"
1315 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1316 echo "TO_REBUILD=$TO_REBUILD"
1317 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1318 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1319 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1320 echo "RELEASE=$RELEASE"
1321 echo "REPOSITORY=$REPOSITORY"
1322 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1323 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1324 echo "NOCONFIGURE=$NOCONFIGURE"
1325 echo "OSM_DEVOPS=$OSM_DEVOPS"
1326 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1327 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1328 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1329 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1330 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1331 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1332 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1333 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1334 echo "DOCKER_USER=$DOCKER_USER"
1335 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1336 echo "PULL_IMAGES=$PULL_IMAGES"
1337 echo "KUBERNETES=$KUBERNETES"
1338 echo "SHOWOPTS=$SHOWOPTS"
1339 echo "Install from specific refspec (-b): $COMMIT_ID"
1344 duration
=$
((ctime
- SESSION_ID
))
1345 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1346 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1348 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1349 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1350 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1351 event_name
="${event_name}_$1"
1352 url
="${url}&event=${event_name}&ce_duration=${duration}"
1353 wget
-q -O /dev
/null
$url
1366 INSTALL_FROM_SOURCE
=""
1367 RELEASE
="ReleaseSIX"
1370 INSTALL_FROM_LXDIMAGES
=""
1371 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1372 LXD_REPOSITORY_PATH
=""
1373 INSTALL_LIGHTWEIGHT
="y"
1382 INSTALL_NOHOSTCLIENT
=""
1385 SESSION_ID
=`date +%s`
1393 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1394 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1396 OSM_WORK_DIR
="/etc/osm"
1397 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1398 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1399 OSM_HOST_VOL
="/var/lib/osm"
1400 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1401 OSM_DOCKER_TAG
=latest
1402 DOCKER_USER
=opensourcemano
1404 KAFKA_TAG
=2.11-1.0
.2
1405 PROMETHEUS_TAG
=v2.4
.3
1407 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1408 PROMETHEUS_CADVISOR_TAG
=latest
1410 OSM_DATABASE_COMMONKEY
=
1411 ELASTIC_VERSION
=6.4.2
1412 ELASTIC_CURATOR_VERSION
=5.5.4
1413 POD_NETWORK_CIDR
=10.244.0.0/16
1414 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1415 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1417 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o
; do
1427 REPOSITORY
="${OPTARG}"
1428 REPO_ARGS
+=(-r "$REPOSITORY")
1431 [ "${OPTARG}" == "swarm" ] && continue
1432 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1433 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1438 REPO_ARGS
+=(-R "$RELEASE")
1441 REPOSITORY_KEY
="${OPTARG}"
1442 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1445 REPOSITORY_BASE
="${OPTARG}"
1446 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1449 DOCKER_USER
="${OPTARG}"
1452 LXD_REPOSITORY_BASE
="${OPTARG}"
1455 LXD_REPOSITORY_PATH
="${OPTARG}"
1458 OSM_DEVOPS
="${OPTARG}"
1461 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1464 OSM_VCA_HOST
="${OPTARG}"
1467 OSM_VCA_SECRET
="${OPTARG}"
1470 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1473 OSM_VCA_APIPROXY
="${OPTARG}"
1476 # when specifying workdir, do not use sudo for access
1478 OSM_WORK_DIR
="${OPTARG}"
1481 OSM_DOCKER_TAG
="${OPTARG}"
1485 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1486 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1487 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1490 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1491 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1492 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1493 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1494 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1495 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1496 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1497 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1498 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1499 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1500 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1501 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1504 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1505 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1506 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1507 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1508 [ "${OPTARG}" == "nat" ] && NAT
="y" && continue
1509 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1510 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1511 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1512 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1513 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1514 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1515 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES
="y" && continue
1516 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1517 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT
="" && RELEASE
="-R ReleaseTHREE" && REPOSITORY
="-r stable" && continue
1518 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1519 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1520 #[ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON="y" && continue
1521 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE
="y" && continue
1522 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1523 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY
="y" && continue
1524 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1525 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1526 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1527 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1528 [ "${OPTARG}" == "pullimages" ] && continue
1529 echo -e "Invalid option: '--$OPTARG'\n" >&2
1533 echo -e "Invalid option: '-$OPTARG'\n" >&2
1545 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --lxd can only be used with --soui"
1546 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --nat can only be used with --soui"
1547 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --noconfigure can only be used with --soui"
1548 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --daily can only be used with --soui"
1549 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nolxd cannot be used with --soui"
1550 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nodocker cannot be used with --soui"
1551 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: -m cannot be used with --soui"
1552 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1554 if [ -n "$SHOWOPTS" ]; then
1559 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE
="-R ReleaseTHREE-daily" && REPOSITORY
="-r testing" && COMMIT_ID
="master"
1561 # if develop, we force master
1562 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1564 need_packages
="git jq wget curl tar"
1565 echo -e "Checking required packages: $need_packages"
1566 dpkg
-l $need_packages &>/dev
/null \
1567 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1568 || sudo apt-get update \
1569 || FATAL
"failed to run apt-get update"
1570 dpkg
-l $need_packages &>/dev
/null \
1571 ||
! echo -e "Installing $need_packages requires root privileges." \
1572 || sudo apt-get
install -y $need_packages \
1573 || FATAL
"failed to install $need_packages"
1575 if [ -z "$OSM_DEVOPS" ]; then
1576 if [ -n "$TEST_INSTALLER" ]; then
1577 echo -e "\nUsing local devops repo for OSM installation"
1578 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1580 echo -e "\nCreating temporary dir for OSM installation"
1581 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1582 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1584 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1586 if [ -z "$COMMIT_ID" ]; then
1587 echo -e "\nGuessing the current stable release"
1588 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1589 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1591 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1592 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1594 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1596 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1600 .
$OSM_DEVOPS/common
/all_funcs
1602 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1603 [ -n "$UNINSTALL" ] && uninstall
&& echo -e "\nDONE" && exit 0
1604 [ -n "$NAT" ] && nat
&& echo -e "\nDONE" && exit 0
1605 [ -n "$UPDATE" ] && update
&& echo -e "\nDONE" && exit 0
1606 [ -n "$RECONFIGURE" ] && configure
&& echo -e "\nDONE" && exit 0
1607 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1608 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1609 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1610 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1612 #Installation starts here
1613 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-6.0
-six/README.txt
&> /dev
/null
1616 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1617 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1618 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1619 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1622 echo -e "Checking required packages: lxd"
1623 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1624 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1626 # use local devops for containers
1627 export OSM_USE_LOCAL_DEVOPS
=true
1628 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1629 echo -e "\nCreating the containers and building from source ..."
1630 $OSM_DEVOPS/jenkins
/host
/start_build RO
--notest checkout
$COMMIT_ID || FATAL
"RO container build failed (refspec: '$COMMIT_ID')"
1631 ro_is_up
&& track RO
1632 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA container build failed"
1633 vca_is_up
&& track VCA
1634 $OSM_DEVOPS/jenkins
/host
/start_build MON || FATAL
"MON install failed"
1635 mon_is_up
&& track MON
1636 $OSM_DEVOPS/jenkins
/host
/start_build SO checkout
$COMMIT_ID || FATAL
"SO container build failed (refspec: '$COMMIT_ID')"
1637 $OSM_DEVOPS/jenkins
/host
/start_build UI checkout
$COMMIT_ID || FATAL
"UI container build failed (refspec: '$COMMIT_ID')"
1638 #so_is_up && track SOUI
1640 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1641 echo -e "\nInstalling from lxd images ..."
1642 install_from_lxdimages
1643 else #install from binaries
1644 echo -e "\nCreating the containers and installing from binaries ..."
1645 $OSM_DEVOPS/jenkins
/host
/install RO
${REPO_ARGS[@]} || FATAL
"RO install failed"
1646 ro_is_up
&& track RO
1647 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA install failed"
1648 vca_is_up
&& track VCA
1649 $OSM_DEVOPS/jenkins
/host
/install MON || FATAL
"MON build failed"
1650 mon_is_up
&& track MON
1651 $OSM_DEVOPS/jenkins
/host
/install SO
${REPO_ARGS[@]} || FATAL
"SO install failed"
1652 $OSM_DEVOPS/jenkins
/host
/install UI
${REPO_ARGS[@]} || FATAL
"UI install failed"
1653 #so_is_up && track SOUI
1657 #Install iptables-persistent and configure NAT rules
1658 [ -z "$NOCONFIGURE" ] && nat
1660 #Configure components
1661 [ -z "$NOCONFIGURE" ] && configure
1664 [ -z "$NOCONFIGURE" ] && install_osmclient
1666 #Install vim-emu (optional)
1667 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1669 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-6.0
-six/README2.txt
&> /dev
/null