2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pm_stack: additionally deploy a Prometheus+Grafana stack for performance monitoring (PM)"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, pm_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
54 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
57 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
60 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
61 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
62 echo -e " --showopts: print chosen options and exit (only for debugging)"
63 echo -e " -y: do not prompt for confirmation, assumes yes"
64 echo -e " -h / --help: print this help"
67 #Uninstall OSM: remove containers
69 echo -e "\nUninstalling OSM"
70 if [ $RC_CLONE ] ||
[ -n "$TEST_INSTALLER" ]; then
71 $OSM_DEVOPS/jenkins
/host
/clean_container RO
72 $OSM_DEVOPS/jenkins
/host
/clean_container VCA
73 $OSM_DEVOPS/jenkins
/host
/clean_container MON
74 $OSM_DEVOPS/jenkins
/host
/clean_container SO
75 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 lxc stop RO
&& lxc delete RO
78 lxc stop VCA
&& lxc delete VCA
79 lxc stop MON
&& lxc delete MON
80 lxc stop SO-ub
&& lxc delete SO-ub
82 echo -e "\nDeleting imported lxd images if they exist"
83 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
84 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
85 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
89 # takes a juju/accounts.yaml file and returns the password specific
90 # for a controller. I wrote this using only bash tools to minimize
91 # additions of other packages
92 function parse_juju_password
{
93 password_file
="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name
=$1
95 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller
=$controller_name '{
100 indent = length($1)/2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
112 function generate_secret
() {
113 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
116 function remove_volumes
() {
117 if [ -n "$KUBERNETES" ]; then
119 echo "Removing ${k8_volume}"
120 $WORKDIR_SUDO rm -rf ${k8_volume}
123 volumes
="mongo_db mon_db osm_packages ro_db"
124 for volume
in $volumes; do
125 sg docker
-c "docker volume rm ${stack}_${volume}"
130 function remove_network
() {
132 sg docker
-c "docker network rm net${stack}"
135 function remove_iptables
() {
137 if [ -z "$OSM_VCA_HOST" ]; then
138 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
139 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
142 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
143 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
144 sudo netfilter-persistent save
148 function remove_stack
() {
150 if sg docker
-c "docker stack ps ${stack}" ; then
151 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
154 while [ ${COUNTER} -lt 30 ]; do
155 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
156 #echo "Dockers running: $result"
157 if [ "${result}" == "0" ]; then
160 let COUNTER
=COUNTER
+1
163 if [ "${result}" == "0" ]; then
164 echo "All dockers of the stack ${stack} were removed"
166 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 #removes osm deployments and services
173 function remove_k8s_namespace
() {
177 #Uninstall lightweight OSM: remove dockers
178 function uninstall_lightweight
() {
179 if [ -n "$INSTALL_ONLY" ]; then
180 if [ -n "$INSTALL_ELK" ]; then
181 echo -e "\nUninstalling OSM ELK stack"
183 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
185 if [ -n "$INSTALL_PERFMON" ]; then
186 echo -e "\nUninstalling OSM Performance Monitoring stack"
187 remove_stack osm_metrics
188 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_metrics
191 echo -e "\nUninstalling OSM"
192 if [ -n "$KUBERNETES" ]; then
193 remove_k8s_namespace
$OSM_STACK_NAME
195 remove_stack
$OSM_STACK_NAME
197 remove_stack osm_metrics
199 echo "Now osm docker images and volumes will be deleted"
200 newgrp docker
<< EONG
201 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
208 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
211 if [ -n "$KUBERNETES" ]; then
212 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
213 remove_volumes
$OSM_NAMESPACE_VOL
215 remove_volumes
$OSM_STACK_NAME
216 remove_network
$OSM_STACK_NAME
218 remove_iptables
$OSM_STACK_NAME
219 echo "Removing $OSM_DOCKER_WORK_DIR"
220 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
221 sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
223 echo "Some docker images will be kept in case they are used by other docker stacks"
224 echo "To remove them, just run 'docker image prune' in a terminal"
228 #Configure NAT rules, based on the current IP addresses of containers
230 echo -e "\nChecking required packages: iptables-persistent"
231 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
232 sudo DEBIAN_FRONTEND
=noninteractive apt-get
-yq install iptables-persistent
233 echo -e "\nConfiguring NAT rules"
234 echo -e " Required root privileges"
235 sudo
$OSM_DEVOPS/installers
/nat_osm
239 echo "FATAL error: Cannot install OSM due to \"$1\""
243 #Update RO, SO and UI:
245 echo -e "\nUpdating components"
247 echo -e " Updating RO"
250 INSTALL_FOLDER
="/opt/openmano"
251 echo -e " Fetching the repo"
252 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
254 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
255 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
256 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
257 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
258 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
259 # COMMIT_ID either was previously set with -b option, or is an empty string
260 CHECKOUT_ID
=$COMMIT_ID
261 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
262 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
263 if [[ $CHECKOUT_ID == "tags/"* ]]; then
264 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
266 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
268 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
269 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
270 echo " Nothing to be done."
272 echo " Update required."
273 lxc
exec $CONTAINER -- service osm-ro stop
274 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash
275 lxc
exec $CONTAINER -- git
-C /opt
/openmano pull
--rebase
276 lxc
exec $CONTAINER -- git
-C /opt
/openmano checkout
$CHECKOUT_ID
277 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash pop
278 lxc
exec $CONTAINER -- /opt
/openmano
/database_utils
/migrate_mano_db.sh
279 lxc
exec $CONTAINER -- service osm-ro start
283 echo -e " Updating SO and UI"
286 INSTALL_FOLDER
="" # To be filled in
287 echo -e " Fetching the repo"
288 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
290 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
291 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
292 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
293 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
294 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
295 # COMMIT_ID either was previously set with -b option, or is an empty string
296 CHECKOUT_ID
=$COMMIT_ID
297 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
298 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
299 if [[ $CHECKOUT_ID == "tags/"* ]]; then
300 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
302 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
304 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
305 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
306 echo " Nothing to be done."
308 echo " Update required."
309 # Instructions to be added
310 # lxc exec SO-ub -- ...
313 echo -e "Updating MON Container"
316 INSTALL_FOLDER
="/root/MON"
317 echo -e " Fetching the repo"
318 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
320 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
321 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
322 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
323 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
324 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
325 # COMMIT_ID either was previously set with -b option, or is an empty string
326 CHECKOUT_ID
=$COMMIT_ID
327 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
328 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
329 if [[ $CHECKOUT_ID == "tags/"* ]]; then
330 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
332 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
334 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
335 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
336 echo " Nothing to be done."
338 echo " Update required."
343 function so_is_up
() {
347 SO_IP
=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
352 while [ $time -le $timelength ]
354 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
355 -H 'accept: application/vnd.yang.data+json' \
356 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
357 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
359 echo "RW.Restconf running....SO is up"
368 FATAL
"OSM Failed to startup. SO failed to startup"
371 function vca_is_up
() {
372 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
373 echo "VCA is up and running"
377 FATAL
"OSM Failed to startup. VCA failed to startup"
380 function mon_is_up
() {
381 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
382 echo "MON is up and running"
386 FATAL
"OSM Failed to startup. MON failed to startup"
389 function ro_is_up
() {
393 RO_IP
=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
398 while [ $time -le $timelength ]; do
399 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
400 echo "RO is up and running"
408 FATAL
"OSM Failed to startup. RO failed to startup"
412 function configure_RO
(){
413 .
$OSM_DEVOPS/installers
/export_ips
414 echo -e " Configuring RO"
415 lxc
exec RO
-- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc
/osm
/openmanod.cfg
416 lxc
exec RO
-- service osm-ro restart
420 lxc
exec RO
-- openmano tenant-delete
-f osm
>/dev
/null
421 lxc
exec RO
-- openmano tenant-create osm
> /dev
/null
422 lxc
exec RO
-- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
423 lxc
exec RO
-- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
424 lxc
exec RO
-- sh
-c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
427 function configure_VCA
(){
428 echo -e " Configuring VCA"
429 JUJU_PASSWD
=$
(generate_secret
)
430 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc
exec VCA
-- juju change-user-password
433 function configure_SOUI
(){
434 .
$OSM_DEVOPS/installers
/export_ips
435 JUJU_CONTROLLER_IP
=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
436 RO_TENANT_ID
=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
438 echo -e " Configuring MON"
439 #Information to be added about SO socket for logging
441 echo -e " Configuring SO"
442 sudo route add
-host $JUJU_CONTROLLER_IP gw
$VCA_CONTAINER_IP
443 sudo ip route add
10.44.127.0/24 via
$VCA_CONTAINER_IP
444 sudo
sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc
/rc.
local
445 sudo
sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc
/rc.
local
446 # make journaling persistent
447 lxc
exec SO-ub
-- mkdir
-p /var
/log
/journal
448 lxc
exec SO-ub
-- systemd-tmpfiles
--create --prefix /var
/log
/journal
449 lxc
exec SO-ub
-- systemctl restart systemd-journald
451 echo RIFT_EXTERNAL_ADDRESS
=$DEFAULT_IP | lxc
exec SO-ub
-- tee -a /usr
/rift
/etc
/default
/launchpad
453 lxc
exec SO-ub
-- systemctl restart launchpad
455 so_is_up
$SO_CONTAINER_IP
457 #delete existing config agent (could be there on reconfigure)
458 curl
-k --request DELETE \
459 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent
/account
/osmjuju \
460 --header 'accept: application/vnd.yang.data+json' \
461 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
462 --header 'cache-control: no-cache' \
463 --header 'content-type: application/vnd.yang.data+json' &> /dev
/null
465 result
=$
(curl
-k --request POST \
466 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent \
467 --header 'accept: application/vnd.yang.data+json' \
468 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
469 --header 'cache-control: no-cache' \
470 --header 'content-type: application/vnd.yang.data+json' \
471 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
472 [[ $result =~ .
*success.
* ]] || FATAL
"Failed config-agent configuration: $result"
475 #result=$(curl -k --request PUT \
476 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
477 # --header 'accept: application/vnd.yang.data+json' \
478 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
479 # --header 'cache-control: no-cache' \
480 # --header 'content-type: application/vnd.yang.data+json' \
481 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
483 result
=$
(curl
-k --request PUT \
484 --url https
://$SO_CONTAINER_IP:8008/api
/config
/project
/default
/ro-account
/account \
485 --header 'accept: application/vnd.yang.data+json' \
486 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
487 --header 'cache-control: no-cache' \
488 --header 'content-type: application/vnd.yang.data+json' \
489 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
490 [[ $result =~ .
*success.
* ]] || FATAL
"Failed resource-orchestrator configuration: $result"
492 result
=$
(curl
-k --request PATCH \
493 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/redirect-uri \
494 --header 'accept: application/vnd.yang.data+json' \
495 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
496 --header 'cache-control: no-cache' \
497 --header 'content-type: application/vnd.yang.data+json' \
498 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
499 [[ $result =~ .
*success.
* ]] || FATAL
"Failed redirect-uri configuration: $result"
501 result
=$
(curl
-k --request PATCH \
502 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/post-logout-redirect-uri \
503 --header 'accept: application/vnd.yang.data+json' \
504 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
505 --header 'cache-control: no-cache' \
506 --header 'content-type: application/vnd.yang.data+json' \
507 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
508 [[ $result =~ .
*success.
* ]] || FATAL
"Failed post-logout-redirect-uri configuration: $result"
510 lxc
exec SO-ub
-- tee /etc
/network
/interfaces.d
/60-rift.cfg
<<EOF
512 iface lo:1 inet static
514 netmask 255.255.255.255
516 lxc
exec SO-ub ifup lo
:1
519 #Configure RO, VCA, and SO with the initial configuration:
520 # RO -> tenant:osm, logs to be sent to SO
521 # VCA -> juju-password
522 # SO -> route to Juju Controller, add RO account, add VCA account
523 function configure
(){
524 #Configure components
525 echo -e "\nConfiguring components"
531 function install_lxd
() {
533 sudo apt-get
install -y lxd
537 lxc network create lxdbr0 ipv4.address
=auto ipv4.nat
=true ipv6.address
=none ipv6.nat
=false
538 DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8}')
539 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
540 lxc profile device
set default eth0 mtu
$DEFAULT_MTU
541 #sudo systemctl stop lxd-bridge
542 #sudo systemctl --system daemon-reload
543 #sudo systemctl enable lxd-bridge
544 #sudo systemctl start lxd-bridge
548 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
549 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
550 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
551 read -e -p "$1" USER_CONFIRMATION
553 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
554 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
555 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
556 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
557 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
561 function launch_container_from_lxd
(){
564 export OSM_BASE_IMAGE
=$2
565 if ! container_exists
$OSM_BUILD_CONTAINER; then
567 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.privileged=true"
568 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.nesting=true"
569 create_container
$OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
570 wait_container_up
$OSM_BUILD_CONTAINER
574 function install_osmclient
(){
575 CLIENT_RELEASE
=${RELEASE#"-R "}
576 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
577 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
578 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
579 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
580 curl
$key_location | sudo apt-key add
-
581 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
583 sudo apt-get
install -y python3-pip
584 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
585 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind
586 sudo apt-get
install -y python3-osm-im python3-osmclient
587 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
588 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
589 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
590 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
591 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
592 echo -e "\nOSM client installed"
593 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
594 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
595 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
596 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
598 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
599 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
600 echo " export OSM_HOSTNAME=<OSM_host>"
605 function install_from_lxdimages
(){
606 LXD_RELEASE
=${RELEASE#"-R "}
607 if [ -n "$LXD_REPOSITORY_PATH" ]; then
608 LXD_IMAGE_DIR
="$LXD_REPOSITORY_PATH"
610 LXD_IMAGE_DIR
="$(mktemp -d -q --tmpdir "osmimages.XXXXXX
")"
611 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
613 echo -e "\nDeleting previous lxd images if they exist"
614 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
615 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
616 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
617 echo -e "\nImporting osm-ro"
618 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-ro.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.
tar.gz
619 lxc image import
$LXD_IMAGE_DIR/osm-ro.
tar.gz
--alias osm-ro
620 rm -f $LXD_IMAGE_DIR/osm-ro.
tar.gz
621 echo -e "\nImporting osm-vca"
622 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-vca.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.
tar.gz
623 lxc image import
$LXD_IMAGE_DIR/osm-vca.
tar.gz
--alias osm-vca
624 rm -f $LXD_IMAGE_DIR/osm-vca.
tar.gz
625 echo -e "\nImporting osm-soui"
626 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-soui.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.
tar.gz
627 lxc image import
$LXD_IMAGE_DIR/osm-soui.
tar.gz
--alias osm-soui
628 rm -f $LXD_IMAGE_DIR/osm-soui.
tar.gz
629 launch_container_from_lxd RO osm-ro
631 launch_container_from_lxd VCA osm-vca
632 vca_is_up
&& track VCA
633 launch_container_from_lxd MON osm-mon
634 mon_is_up
&& track MON
635 launch_container_from_lxd SO osm-soui
636 #so_is_up && track SOUI
640 function install_docker_ce
() {
641 # installs and configures Docker CE
642 echo "Installing Docker CE ..."
643 sudo apt-get
-qq update
644 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
645 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
646 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
647 sudo apt-get
-qq update
648 sudo apt-get
install -y docker-ce
649 echo "Adding user to group 'docker'"
650 sudo groupadd
-f docker
651 sudo usermod
-aG docker
$USER
653 sudo service docker restart
654 echo "... restarted Docker service"
655 sg docker
-c "docker version" || FATAL
"Docker installation failed"
656 echo "... Docker CE installation done"
660 function install_docker_compose
() {
661 # installs and configures docker-compose
662 echo "Installing Docker Compose ..."
663 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
664 sudo
chmod +x
/usr
/local
/bin
/docker-compose
665 echo "... Docker Compose installation done"
668 function install_juju
() {
669 echo "Installing juju"
670 sudo snap
install juju
--classic
671 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure
-p medium lxd
672 echo "Finished installation of juju"
676 function juju_createcontroller
() {
677 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
678 # Not found created, create the controller
679 sg lxd
-c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
681 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
684 function juju_createproxy
() {
685 echo -e "\nChecking required packages: iptables-persistent"
686 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
687 sudo DEBIAN_FRONTEND
=noninteractive apt-get
-yq install iptables-persistent
689 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
690 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
--dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
691 sudo netfilter-persistent save
695 function generate_docker_images
() {
696 echo "Pulling and generating docker images"
697 _build_from
=$COMMIT_ID
698 [ -z "$_build_from" ] && _build_from
="master"
700 echo "OSM Docker images generated from $_build_from"
702 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
703 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
704 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
705 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
707 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
708 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
709 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
712 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
713 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
716 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
717 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
720 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
721 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
724 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
725 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
728 if [ -n "$PULL_IMAGES" ]; then
729 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
730 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
731 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
732 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
733 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
736 if [ -n "$PULL_IMAGES" ]; then
737 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
738 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
739 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
740 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
741 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
744 if [ -n "$PULL_IMAGES" ]; then
745 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
746 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
747 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
748 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
749 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
750 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
751 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
754 if [ -n "$PULL_IMAGES" ]; then
755 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
756 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
757 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
758 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
759 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
762 if [ -n "$PULL_IMAGES" ]; then
763 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
764 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
765 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
766 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
767 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
770 if [ -n "$PULL_IMAGES" ]; then
771 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
772 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
773 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
774 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
775 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
778 if [ -n "$PULL_IMAGES" ]; then
779 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
780 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
781 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
783 echo "Finished generation of docker images"
786 function cmp_overwrite
() {
789 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
790 if [ -f "${file2}" ]; then
791 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
793 cp -b ${file1} ${file2}
798 function generate_docker_env_files() {
799 echo "Doing a backup of existing env files
"
800 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
801 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
802 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
803 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
804 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
805 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
806 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
807 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
808 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
810 echo "Generating docker env files
"
811 if [ -n "$KUBERNETES" ]; then
812 #Kubernetes resources
813 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
816 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
819 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
823 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
824 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
827 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
828 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
830 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
833 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
834 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
836 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
839 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
840 echo "OSMLCM_VCA_PUBKEY
=\"${OSM_VCA_PUBKEY}\"" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
842 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=\"${OSM_VCA_PUBKEY}\"|g
" $OSM_DOCKER_WORK_DIR/lcm.env
845 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
846 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
848 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
851 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
852 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
854 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
858 MYSQL_ROOT_PASSWORD=$(generate_secret)
859 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
860 echo "MYSQL_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
862 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
863 echo "RO_DB_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
867 KEYSTONE_DB_PASSWORD=$(generate_secret)
868 SERVICE_PASSWORD=$(generate_secret)
869 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
870 echo "MYSQL_ROOT_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
872 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
873 echo "ROOT_DB_PASSWORD
=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
874 echo "KEYSTONE_DB_PASSWORD
=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
875 echo "SERVICE_PASSWORD
=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
879 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
880 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD
=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
881 echo "OSMNBI_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
885 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
886 echo "OSMMON_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
887 echo "OSMMON_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/mon
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
890 if ! grep -Fq "OS_NOTIFIER_URI
" $OSM_DOCKER_WORK_DIR/mon.env; then
891 echo "OS_NOTIFIER_URI
=http
://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
893 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.
*|OS_NOTIFIER_URI
=http
://$DEFAULT_IP:8662|g
" $OSM_DOCKER_WORK_DIR/mon.env
896 if ! grep -Fq "OSMMON_VCA_HOST
" $OSM_DOCKER_WORK_DIR/mon.env; then
897 echo "OSMMON_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
899 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.
*|OSMMON_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/mon.env
902 if ! grep -Fq "OSMMON_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/mon.env; then
903 echo "OSMMON_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
905 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.
*|OSMMON_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/mon.env
909 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
910 echo "OSMPOL_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/pol
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
914 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
915 echo "OSMUI_SQL_DATABASE_URI
=mysql
://root
:${MYSQL_ROOT_PASSWORD}@mysql
:3306/lwui
" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
918 echo "Finished generation of docker env files
"
921 function generate_osmclient_script () {
922 echo "docker run
-ti --network net
${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
923 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm
"
924 echo "osmclient sidecar container can be found
at: $OSM_DOCKER_WORK_DIR/osm
"
927 #installs kubernetes packages
928 function install_kube() {
929 sudo apt-get update && sudo apt-get install -y apt-transport-https
930 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
931 sudo add-apt-repository "deb https
://apt.kubernetes.io
/ kubernetes-xenial main
"
933 echo "Installing Kubernetes Packages ...
"
934 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
937 #initializes kubernetes control plane
938 function init_kubeadm() {
940 sudo kubeadm init --config $1
944 function kube_config_dir() {
945 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes
"
947 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
948 sudo chown $(id -u):$(id -g) $HOME/.kube/config
951 #deploys flannel as daemonsets
952 function deploy_cni_provider() {
953 CNI_DIR="$
(mktemp
-d -q --tmpdir "flannel.XXXXXX")"
954 trap 'rm -rf "${CNI_DIR}"' EXIT
955 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
956 kubectl apply -f $CNI_DIR
957 [ $? -ne 0 ] && FATAL "Cannot Install Flannel
"
960 #creates secrets from env files which will be used by containers
961 function kube_secrets(){
962 kubectl create ns $OSM_STACK_NAME
963 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
964 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
965 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
966 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
967 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
968 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
969 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
970 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
973 #deploys osm pods and services
974 function deploy_osm_services() {
975 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
976 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
978 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
981 function parse_yaml() {
982 osm_services="nbi lcm ro pol mon light-ui keystone
"
984 for osm in $osm_services; do
985 $WORKDIR_SUDO sed -i "s
/opensourcemano\
/$osm:.
*/opensourcemano\
/$osm:$TAG/g
" $OSM_K8S_WORK_DIR/$osm.yaml
989 function namespace_vol() {
990 osm_services="nbi lcm ro pol mon kafka mongo mysql
"
991 for osm in $osm_services; do
992 $WORKDIR_SUDO sed -i "s
#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
996 function init_docker_swarm
() {
997 if [ "${DEFAULT_MTU}" != "1500" ]; then
998 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
999 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1000 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1002 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1006 function create_docker_network
() {
1007 echo "creating network"
1008 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1009 echo "creating network DONE"
1012 function deploy_lightweight
() {
1014 echo "Deploying lightweight build"
1017 OSM_KEYSTONE_PORT
=5000
1021 OSM_PROM_HOSTPORT
=9091
1022 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
1023 [ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT
=3000
1025 if [ -n "$NO_HOST_PORTS" ]; then
1026 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
1027 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
1028 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
1029 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
1030 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
1031 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
1032 [ -n "$INSTALL_PERFMON" ] && OSM_PORTS
+=(OSM_PM_PORTS
=$OSM_PM_PORT)
1033 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
1035 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
1036 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
1037 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1038 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
1039 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
1040 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1041 [ -n "$INSTALL_PERFMON" ] && OSM_PORTS
+=(OSM_PM_PORTS
=$OSM_PM_PORT:$OSM_PM_PORT)
1042 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
1044 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1045 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1046 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1047 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1048 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1049 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1050 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1052 pushd $OSM_DOCKER_WORK_DIR
1053 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1056 echo "Finished deployment of lightweight build"
1059 function deploy_elk
() {
1060 echo "Pulling docker images for ELK"
1061 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
1062 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
1063 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
1064 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
1065 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
1066 echo "Finished pulling elk docker images"
1067 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
1068 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
1069 remove_stack osm_elk
1070 echo "Deploying ELK stack"
1071 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1072 echo "Waiting for ELK stack to be up and running"
1077 while [ $time -le $timelength ]; do
1078 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
1085 if [ $elk_is_up -eq 0 ]; then
1086 echo "ELK is up and running. Trying to create index pattern..."
1087 #Create index pattern
1088 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1089 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1090 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
1091 #Make it the default index
1092 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1093 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1094 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
1096 echo "Cannot connect to Kibana to create index pattern."
1097 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1098 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1099 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1100 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1101 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1102 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1103 -d"{\"value\":\"filebeat-*\"}"'
1105 echo "Finished deployment of ELK stack"
1109 function deploy_perfmon
() {
1110 echo "Pulling docker images for PM (Grafana)"
1111 sg docker
-c "docker pull grafana/grafana" || FATAL
"cannot get grafana docker image"
1112 echo "Finished pulling PM docker images"
1113 $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_metrics
1114 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_metrics
/*.yml
$OSM_DOCKER_WORK_DIR/osm_metrics
1115 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_metrics
/*.json
$OSM_DOCKER_WORK_DIR/osm_metrics
1116 remove_stack osm_metrics
1117 echo "Deploying PM stack (Grafana)"
1118 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_metrics/docker-compose.yml osm_metrics"
1119 echo "Finished deployment of PM stack"
1123 function install_lightweight
() {
1124 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1125 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1126 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1127 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1130 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1133 if [ -n "$KUBERNETES" ]; then
1134 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1135 1. Install and configure LXD
1137 3. Install docker CE
1138 4. Disable swap space
1139 5. Install and initialize Kubernetes
1140 as pre-requirements.
1141 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1144 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1148 echo "Installing lightweight build of OSM"
1149 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1150 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1151 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1152 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1153 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1154 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1155 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1157 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1158 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1159 need_packages_lw
="lxd snapd"
1160 echo -e "Checking required packages: $need_packages_lw"
1161 dpkg
-l $need_packages_lw &>/dev
/null \
1162 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1163 || sudo apt-get update \
1164 || FATAL
"failed to run apt-get update"
1165 dpkg
-l $need_packages_lw &>/dev
/null \
1166 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1167 || sudo apt-get
install -y $need_packages_lw \
1168 || FATAL
"failed to install $need_packages_lw"
1172 [ -z "$INSTALL_NOJUJU" ] && install_juju
1175 if [ -z "$OSM_VCA_HOST" ]; then
1176 juju_createcontroller
1177 OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1178 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1180 track juju_controller
1182 if [ -z "$OSM_VCA_SECRET" ]; then
1183 OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1184 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1186 if [ -z "$OSM_VCA_PUBKEY" ]; then
1187 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1188 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1190 if [ -z "$OSM_VCA_APIPROXY" ]; then
1191 OSM_VCA_APIPROXY
=$DEFAULT_IP
1192 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1196 if [ -z "$OSM_VCA_CACERT" ]; then
1197 OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r '.controllers["osm"]["ca-cert"]' | base64 |
tr -d \\n
)
1198 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1200 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1201 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1202 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1206 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1209 #Installs Kubernetes and deploys osm services
1210 if [ -n "$KUBERNETES" ]; then
1213 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1217 #install_docker_compose
1218 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1222 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1225 generate_docker_env_files
1227 if [ -n "$KUBERNETES" ]; then
1228 #remove old namespace
1229 remove_k8s_namespace
$OSM_STACK_NAME
1232 [ ! $OSM_DOCKER_TAG == "latest" ] && parse_yaml
$OSM_DOCKER_TAG
1235 track deploy_osm_services_k8s
1238 remove_stack
$OSM_STACK_NAME
1239 create_docker_network
1241 generate_osmclient_script
1243 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1244 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1245 [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
&& track perfmon
1248 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1251 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-6.0
-six/README2.txt
&> /dev
/null
1256 function install_vimemu
() {
1257 echo "\nInstalling vim-emu"
1258 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1259 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1260 # clone vim-emu repository (attention: branch is currently master only)
1261 echo "Cloning vim-emu repository ..."
1262 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1263 # build vim-emu docker
1264 echo "Building vim-emu Docker container..."
1266 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1267 # start vim-emu container as daemon
1268 echo "Starting vim-emu Docker container 'vim-emu' ..."
1269 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1270 # in lightweight mode, the emulator needs to be attached to netOSM
1271 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1273 # classic build mode
1274 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1276 echo "Waiting for 'vim-emu' container to start ..."
1278 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1279 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1280 # print vim-emu connection info
1281 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1282 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1283 echo -e "To add the emulated VIM to OSM you should do:"
1284 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1287 function dump_vars
(){
1288 echo "DEVELOP=$DEVELOP"
1289 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1290 echo "UNINSTALL=$UNINSTALL"
1292 echo "UPDATE=$UPDATE"
1293 echo "RECONFIGURE=$RECONFIGURE"
1294 echo "TEST_INSTALLER=$TEST_INSTALLER"
1295 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1296 echo "INSTALL_LXD=$INSTALL_LXD"
1297 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1298 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1299 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1300 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1301 echo "INSTALL_ONLY=$INSTALL_ONLY"
1302 echo "INSTALL_ELK=$INSTALL_ELK"
1303 echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1304 echo "TO_REBUILD=$TO_REBUILD"
1305 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1306 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1307 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1308 echo "RELEASE=$RELEASE"
1309 echo "REPOSITORY=$REPOSITORY"
1310 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1311 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1312 echo "NOCONFIGURE=$NOCONFIGURE"
1313 echo "OSM_DEVOPS=$OSM_DEVOPS"
1314 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1315 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1316 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1317 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1318 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1319 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1320 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1321 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1322 echo "DOCKER_USER=$DOCKER_USER"
1323 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1324 echo "PULL_IMAGES=$PULL_IMAGES"
1325 echo "KUBERNETES=$KUBERNETES"
1326 echo "SHOWOPTS=$SHOWOPTS"
1327 echo "Install from specific refspec (-b): $COMMIT_ID"
1332 duration
=$
((ctime
- SESSION_ID
))
1333 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1334 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1336 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1337 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1338 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1339 event_name
="${event_name}_$1"
1340 url
="${url}&event=${event_name}&ce_duration=${duration}"
1341 wget
-q -O /dev
/null
$url
1354 INSTALL_FROM_SOURCE
=""
1355 RELEASE
="ReleaseSIX"
1358 INSTALL_FROM_LXDIMAGES
=""
1359 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1360 LXD_REPOSITORY_PATH
=""
1361 INSTALL_LIGHTWEIGHT
="y"
1370 INSTALL_NOHOSTCLIENT
=""
1373 SESSION_ID
=`date +%s`
1381 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1382 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1384 OSM_WORK_DIR
="/etc/osm"
1385 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1386 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1387 OSM_HOST_VOL
="/var/lib/osm"
1388 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1389 OSM_DOCKER_TAG
=latest
1390 DOCKER_USER
=opensourcemano
1392 KAFKA_TAG
=2.11-1.0
.2
1393 PROMETHEUS_TAG
=v2.4
.3
1395 OSM_DATABASE_COMMONKEY
=
1396 ELASTIC_VERSION
=6.4.2
1397 ELASTIC_CURATOR_VERSION
=5.5.4
1398 POD_NETWORK_CIDR
=10.244.0.0/16
1399 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1400 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1402 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o
; do
1412 REPOSITORY
="${OPTARG}"
1413 REPO_ARGS
+=(-r "$REPOSITORY")
1416 [ "${OPTARG}" == "swarm" ] && continue
1417 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1418 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1423 REPO_ARGS
+=(-R "$RELEASE")
1426 REPOSITORY_KEY
="${OPTARG}"
1427 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1430 REPOSITORY_BASE
="${OPTARG}"
1431 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1434 DOCKER_USER
="${OPTARG}"
1437 LXD_REPOSITORY_BASE
="${OPTARG}"
1440 LXD_REPOSITORY_PATH
="${OPTARG}"
1443 OSM_DEVOPS
="${OPTARG}"
1446 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1449 OSM_VCA_HOST
="${OPTARG}"
1452 OSM_VCA_SECRET
="${OPTARG}"
1455 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1458 OSM_VCA_APIPROXY
="${OPTARG}"
1461 # when specifying workdir, do not use sudo for access
1463 OSM_WORK_DIR
="${OPTARG}"
1466 OSM_DOCKER_TAG
="${OPTARG}"
1470 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1471 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1472 [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON
="y" && continue
1475 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1476 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1477 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1478 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1479 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1480 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1481 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1482 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1483 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1484 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1485 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1488 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1489 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1490 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1491 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1492 [ "${OPTARG}" == "nat" ] && NAT
="y" && continue
1493 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1494 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1495 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1496 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1497 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1498 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1499 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES
="y" && continue
1500 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1501 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT
="" && RELEASE
="-R ReleaseTHREE" && REPOSITORY
="-r stable" && continue
1502 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1503 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1504 [ "${OPTARG}" == "pm_stack" ] && INSTALL_PERFMON
="y" && continue
1505 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE
="y" && continue
1506 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1507 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY
="y" && continue
1508 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1509 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1510 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1511 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1512 [ "${OPTARG}" == "pullimages" ] && continue
1513 echo -e "Invalid option: '--$OPTARG'\n" >&2
1517 echo -e "Invalid option: '-$OPTARG'\n" >&2
1529 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --lxd can only be used with --soui"
1530 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --nat can only be used with --soui"
1531 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --noconfigure can only be used with --soui"
1532 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --daily can only be used with --soui"
1533 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nolxd cannot be used with --soui"
1534 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nodocker cannot be used with --soui"
1535 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: -m cannot be used with --soui"
1536 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1538 if [ -n "$SHOWOPTS" ]; then
1543 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE
="-R ReleaseTHREE-daily" && REPOSITORY
="-r testing" && COMMIT_ID
="master"
1545 # if develop, we force master
1546 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1548 need_packages
="git jq wget curl tar"
1549 echo -e "Checking required packages: $need_packages"
1550 dpkg
-l $need_packages &>/dev
/null \
1551 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1552 || sudo apt-get update \
1553 || FATAL
"failed to run apt-get update"
1554 dpkg
-l $need_packages &>/dev
/null \
1555 ||
! echo -e "Installing $need_packages requires root privileges." \
1556 || sudo apt-get
install -y $need_packages \
1557 || FATAL
"failed to install $need_packages"
1559 if [ -z "$OSM_DEVOPS" ]; then
1560 if [ -n "$TEST_INSTALLER" ]; then
1561 echo -e "\nUsing local devops repo for OSM installation"
1562 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1564 echo -e "\nCreating temporary dir for OSM installation"
1565 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1566 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1568 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1570 if [ -z "$COMMIT_ID" ]; then
1571 echo -e "\nGuessing the current stable release"
1572 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1573 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1575 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1576 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1578 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1580 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1584 .
$OSM_DEVOPS/common
/all_funcs
1586 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1587 [ -n "$UNINSTALL" ] && uninstall
&& echo -e "\nDONE" && exit 0
1588 [ -n "$NAT" ] && nat
&& echo -e "\nDONE" && exit 0
1589 [ -n "$UPDATE" ] && update
&& echo -e "\nDONE" && exit 0
1590 [ -n "$RECONFIGURE" ] && configure
&& echo -e "\nDONE" && exit 0
1591 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1592 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1593 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1594 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1596 #Installation starts here
1597 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-6.0
-six/README.txt
&> /dev
/null
1600 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1601 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1602 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1603 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1606 echo -e "Checking required packages: lxd"
1607 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1608 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1610 # use local devops for containers
1611 export OSM_USE_LOCAL_DEVOPS
=true
1612 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1613 echo -e "\nCreating the containers and building from source ..."
1614 $OSM_DEVOPS/jenkins
/host
/start_build RO
--notest checkout
$COMMIT_ID || FATAL
"RO container build failed (refspec: '$COMMIT_ID')"
1615 ro_is_up
&& track RO
1616 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA container build failed"
1617 vca_is_up
&& track VCA
1618 $OSM_DEVOPS/jenkins
/host
/start_build MON || FATAL
"MON install failed"
1619 mon_is_up
&& track MON
1620 $OSM_DEVOPS/jenkins
/host
/start_build SO checkout
$COMMIT_ID || FATAL
"SO container build failed (refspec: '$COMMIT_ID')"
1621 $OSM_DEVOPS/jenkins
/host
/start_build UI checkout
$COMMIT_ID || FATAL
"UI container build failed (refspec: '$COMMIT_ID')"
1622 #so_is_up && track SOUI
1624 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1625 echo -e "\nInstalling from lxd images ..."
1626 install_from_lxdimages
1627 else #install from binaries
1628 echo -e "\nCreating the containers and installing from binaries ..."
1629 $OSM_DEVOPS/jenkins
/host
/install RO
${REPO_ARGS[@]} || FATAL
"RO install failed"
1630 ro_is_up
&& track RO
1631 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA install failed"
1632 vca_is_up
&& track VCA
1633 $OSM_DEVOPS/jenkins
/host
/install MON || FATAL
"MON build failed"
1634 mon_is_up
&& track MON
1635 $OSM_DEVOPS/jenkins
/host
/install SO
${REPO_ARGS[@]} || FATAL
"SO install failed"
1636 $OSM_DEVOPS/jenkins
/host
/install UI
${REPO_ARGS[@]} || FATAL
"UI install failed"
1637 #so_is_up && track SOUI
1641 #Install iptables-persistent and configure NAT rules
1642 [ -z "$NOCONFIGURE" ] && nat
1644 #Configure components
1645 [ -z "$NOCONFIGURE" ] && configure
1648 [ -z "$NOCONFIGURE" ] && install_osmclient
1650 #Install vim-emu (optional)
1651 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1653 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-6.0
-six/README2.txt
&> /dev
/null