2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --soui: install classic build of OSM (Rel THREE v3.1, based on LXD containers, with SO and UI)"
53 echo -e " --lxdimages: (only for Rel THREE with --soui) download lxd images from OSM repository instead of creating them from scratch"
54 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
55 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
56 echo -e " -l <lxd_repo>: (only for Rel THREE with --soui) use specified repository url for lxd images"
57 echo -e " -p <path>: (only for Rel THREE with --soui) use specified repository path for lxd images"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 echo -e " --nat: (only for Rel THREE with --soui) install only NAT rules"
60 echo -e " --noconfigure: (only for Rel THREE with --soui) DO NOT install osmclient, DO NOT install NAT rules, DO NOT configure modules"
61 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
62 echo -e " --showopts: print chosen options and exit (only for debugging)"
63 echo -e " -y: do not prompt for confirmation, assumes yes"
64 echo -e " -h / --help: print this help"
67 #Uninstall OSM: remove containers
69 echo -e "\nUninstalling OSM"
70 if [ $RC_CLONE ] ||
[ -n "$TEST_INSTALLER" ]; then
71 $OSM_DEVOPS/jenkins
/host
/clean_container RO
72 $OSM_DEVOPS/jenkins
/host
/clean_container VCA
73 $OSM_DEVOPS/jenkins
/host
/clean_container MON
74 $OSM_DEVOPS/jenkins
/host
/clean_container SO
75 #$OSM_DEVOPS/jenkins/host/clean_container UI
77 lxc stop RO
&& lxc delete RO
78 lxc stop VCA
&& lxc delete VCA
79 lxc stop MON
&& lxc delete MON
80 lxc stop SO-ub
&& lxc delete SO-ub
82 echo -e "\nDeleting imported lxd images if they exist"
83 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
84 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
85 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
89 # takes a juju/accounts.yaml file and returns the password specific
90 # for a controller. I wrote this using only bash tools to minimize
91 # additions of other packages
92 function parse_juju_password
{
93 password_file
="${HOME}/.local/share/juju/accounts.yaml"
94 local controller_name
=$1
95 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
96 sed -ne "s|^\($s\):|\1|" \
97 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
98 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
99 awk -F$fs -v controller
=$controller_name '{
100 indent = length($1)/2;
102 for (i in vname) {if (i > indent) {delete vname[i]}}
103 if (length($3) > 0) {
104 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
105 if (match(vn,controller) && match($2,"password")) {
112 function generate_secret
() {
113 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
116 function remove_volumes
() {
117 if [ -n "$KUBERNETES" ]; then
119 echo "Removing ${k8_volume}"
120 $WORKDIR_SUDO rm -rf ${k8_volume}
123 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
124 for volume
in $volumes; do
125 sg docker
-c "docker volume rm ${stack}_${volume}"
130 function remove_network
() {
132 sg docker
-c "docker network rm net${stack}"
135 function remove_iptables
() {
137 if [ -z "$OSM_VCA_HOST" ]; then
138 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
139 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
142 if [ -z "$DEFAULT_IP" ]; then
143 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
144 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
145 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
146 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
149 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
150 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
151 sudo netfilter-persistent save
155 function remove_stack
() {
157 if sg docker
-c "docker stack ps ${stack}" ; then
158 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
161 while [ ${COUNTER} -lt 30 ]; do
162 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
163 #echo "Dockers running: $result"
164 if [ "${result}" == "0" ]; then
167 let COUNTER
=COUNTER
+1
170 if [ "${result}" == "0" ]; then
171 echo "All dockers of the stack ${stack} were removed"
173 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
179 #removes osm deployments and services
180 function remove_k8s_namespace
() {
184 #Uninstall lightweight OSM: remove dockers
185 function uninstall_lightweight
() {
186 if [ -n "$INSTALL_ONLY" ]; then
187 if [ -n "$INSTALL_ELK" ]; then
188 echo -e "\nUninstalling OSM ELK stack"
190 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
193 echo -e "\nUninstalling OSM"
194 if [ -n "$KUBERNETES" ]; then
195 if [ -n "$K8S_MONITOR" ]; then
196 # uninstall OSM MONITORING
197 uninstall_k8s_monitoring
199 remove_k8s_namespace
$OSM_STACK_NAME
202 remove_stack
$OSM_STACK_NAME
205 echo "Now osm docker images and volumes will be deleted"
206 newgrp docker
<< EONG
207 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
208 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
209 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
210 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
211 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
212 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
213 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
214 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
217 if [ -n "$KUBERNETES" ]; then
218 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
219 remove_volumes
$OSM_NAMESPACE_VOL
221 remove_volumes
$OSM_STACK_NAME
222 remove_network
$OSM_STACK_NAME
224 remove_iptables
$OSM_STACK_NAME
225 echo "Removing $OSM_DOCKER_WORK_DIR"
226 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
227 sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
229 echo "Some docker images will be kept in case they are used by other docker stacks"
230 echo "To remove them, just run 'docker image prune' in a terminal"
234 #Configure NAT rules, based on the current IP addresses of containers
236 echo -e "\nChecking required packages: iptables-persistent"
237 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
238 sudo apt-get
-yq install iptables-persistent
239 echo -e "\nConfiguring NAT rules"
240 echo -e " Required root privileges"
241 sudo
$OSM_DEVOPS/installers
/nat_osm
245 echo "FATAL error: Cannot install OSM due to \"$1\""
249 #Update RO, SO and UI:
251 echo -e "\nUpdating components"
253 echo -e " Updating RO"
256 INSTALL_FOLDER
="/opt/openmano"
257 echo -e " Fetching the repo"
258 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
260 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
261 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
262 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
263 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
264 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
265 # COMMIT_ID either was previously set with -b option, or is an empty string
266 CHECKOUT_ID
=$COMMIT_ID
267 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
268 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
269 if [[ $CHECKOUT_ID == "tags/"* ]]; then
270 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
272 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
274 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
275 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
276 echo " Nothing to be done."
278 echo " Update required."
279 lxc
exec $CONTAINER -- service osm-ro stop
280 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash
281 lxc
exec $CONTAINER -- git
-C /opt
/openmano pull
--rebase
282 lxc
exec $CONTAINER -- git
-C /opt
/openmano checkout
$CHECKOUT_ID
283 lxc
exec $CONTAINER -- git
-C /opt
/openmano stash pop
284 lxc
exec $CONTAINER -- /opt
/openmano
/database_utils
/migrate_mano_db.sh
285 lxc
exec $CONTAINER -- service osm-ro start
289 echo -e " Updating SO and UI"
292 INSTALL_FOLDER
="" # To be filled in
293 echo -e " Fetching the repo"
294 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
296 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
297 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
298 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
299 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
300 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
301 # COMMIT_ID either was previously set with -b option, or is an empty string
302 CHECKOUT_ID
=$COMMIT_ID
303 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
304 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
305 if [[ $CHECKOUT_ID == "tags/"* ]]; then
306 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
308 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
310 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
311 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
312 echo " Nothing to be done."
314 echo " Update required."
315 # Instructions to be added
316 # lxc exec SO-ub -- ...
319 echo -e "Updating MON Container"
322 INSTALL_FOLDER
="/root/MON"
323 echo -e " Fetching the repo"
324 lxc
exec $CONTAINER -- git
-C $INSTALL_FOLDER fetch
--all
326 BRANCH
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status -sb | head -n1 | sed -n 's/^## \(.*\).*/\1/p'|awk '{print $1}' |sed 's/\(.*\)\.\.\..*/\1/'`
327 [ -z "$BRANCH" ] && FATAL
"Could not find the current branch in use in the '$MDG'"
328 CURRENT
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER status |head -n1`
329 CURRENT_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse HEAD`
330 echo " FROM: $CURRENT ($CURRENT_COMMIT_ID)"
331 # COMMIT_ID either was previously set with -b option, or is an empty string
332 CHECKOUT_ID
=$COMMIT_ID
333 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" == "HEAD" ] && CHECKOUT_ID
="tags/$LATEST_STABLE_DEVOPS"
334 [ -z "$CHECKOUT_ID" ] && [ "$BRANCH" != "HEAD" ] && CHECKOUT_ID
="$BRANCH"
335 if [[ $CHECKOUT_ID == "tags/"* ]]; then
336 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-list -n 1 $CHECKOUT_ID`
338 REMOTE_COMMIT_ID
=`lxc exec $CONTAINER -- git -C $INSTALL_FOLDER rev-parse origin/$CHECKOUT_ID`
340 echo " TO: $CHECKOUT_ID ($REMOTE_COMMIT_ID)"
341 if [ "$CURRENT_COMMIT_ID" == "$REMOTE_COMMIT_ID" ]; then
342 echo " Nothing to be done."
344 echo " Update required."
349 function so_is_up
() {
353 SO_IP
=`lxc list SO-ub -c 4|grep eth0 |awk '{print $2}'`
358 while [ $time -le $timelength ]
360 if [[ `curl -k -X GET https://$SO_IP:8008/api/operational/vcs/info \
361 -H 'accept: application/vnd.yang.data+json' \
362 -H 'authorization: Basic YWRtaW46YWRtaW4=' \
363 -H 'cache-control: no-cache' 2> /dev/null | jq '.[].components.component_info[] | select(.component_name=="RW.Restconf")' 2>/dev/null | grep "RUNNING" | wc -l` -eq 1 ]]
365 echo "RW.Restconf running....SO is up"
374 FATAL
"OSM Failed to startup. SO failed to startup"
377 function vca_is_up
() {
378 if [[ `lxc exec VCA -- juju status | grep "osm" | wc -l` -eq 1 ]]; then
379 echo "VCA is up and running"
383 FATAL
"OSM Failed to startup. VCA failed to startup"
386 function mon_is_up
() {
387 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
388 echo "MON is up and running"
392 FATAL
"OSM Failed to startup. MON failed to startup"
395 function ro_is_up
() {
399 RO_IP
=`lxc list RO -c 4|grep eth0 |awk '{print $2}'`
404 while [ $time -le $timelength ]; do
405 if [[ `curl http://$RO_IP:9090/openmano/ | grep "works" | wc -l` -eq 1 ]]; then
406 echo "RO is up and running"
414 FATAL
"OSM Failed to startup. RO failed to startup"
418 function configure_RO
(){
419 .
$OSM_DEVOPS/installers
/export_ips
420 echo -e " Configuring RO"
421 lxc
exec RO
-- sed -i -e "s/^\#\?log_socket_host:.*/log_socket_host: $SO_CONTAINER_IP/g" /etc
/osm
/openmanod.cfg
422 lxc
exec RO
-- service osm-ro restart
426 lxc
exec RO
-- openmano tenant-delete
-f osm
>/dev
/null
427 lxc
exec RO
-- openmano tenant-create osm
> /dev
/null
428 lxc
exec RO
-- sed -i '/export OPENMANO_TENANT=osm/d' .bashrc
429 lxc
exec RO
-- sed -i '$ i export OPENMANO_TENANT=osm' .bashrc
430 lxc
exec RO
-- sh
-c 'echo "export OPENMANO_TENANT=osm" >> .bashrc'
433 function configure_VCA
(){
434 echo -e " Configuring VCA"
435 JUJU_PASSWD
=$
(generate_secret
)
436 echo -e "$JUJU_PASSWD\n$JUJU_PASSWD" | lxc
exec VCA
-- juju change-user-password
439 function configure_SOUI
(){
440 .
$OSM_DEVOPS/installers
/export_ips
441 JUJU_CONTROLLER_IP
=`lxc exec VCA -- lxc list -c 4 |grep eth0 |awk '{print $2}'`
442 RO_TENANT_ID
=`lxc exec RO -- openmano tenant-list osm |awk '{print $1}'`
444 echo -e " Configuring MON"
445 #Information to be added about SO socket for logging
447 echo -e " Configuring SO"
448 sudo route add
-host $JUJU_CONTROLLER_IP gw
$VCA_CONTAINER_IP
449 sudo ip route add
10.44.127.0/24 via
$VCA_CONTAINER_IP
450 sudo
sed -i "$ i route add -host $JUJU_CONTROLLER_IP gw $VCA_CONTAINER_IP" /etc
/rc.
local
451 sudo
sed -i "$ i ip route add 10.44.127.0/24 via $VCA_CONTAINER_IP" /etc
/rc.
local
452 # make journaling persistent
453 lxc
exec SO-ub
-- mkdir
-p /var
/log
/journal
454 lxc
exec SO-ub
-- systemd-tmpfiles
--create --prefix /var
/log
/journal
455 lxc
exec SO-ub
-- systemctl restart systemd-journald
457 echo RIFT_EXTERNAL_ADDRESS
=$DEFAULT_IP | lxc
exec SO-ub
-- tee -a /usr
/rift
/etc
/default
/launchpad
459 lxc
exec SO-ub
-- systemctl restart launchpad
461 so_is_up
$SO_CONTAINER_IP
463 #delete existing config agent (could be there on reconfigure)
464 curl
-k --request DELETE \
465 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent
/account
/osmjuju \
466 --header 'accept: application/vnd.yang.data+json' \
467 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
468 --header 'cache-control: no-cache' \
469 --header 'content-type: application/vnd.yang.data+json' &> /dev
/null
471 result
=$
(curl
-k --request POST \
472 --url https
://$SO_CONTAINER_IP:8008/api
/config
/config-agent \
473 --header 'accept: application/vnd.yang.data+json' \
474 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
475 --header 'cache-control: no-cache' \
476 --header 'content-type: application/vnd.yang.data+json' \
477 --data '{"account": [ { "name": "osmjuju", "account-type": "juju", "juju": { "ip-address": "'$JUJU_CONTROLLER_IP'", "port": "17070", "user": "admin", "secret": "'$JUJU_PASSWD'" } } ]}')
478 [[ $result =~ .
*success.
* ]] || FATAL
"Failed config-agent configuration: $result"
481 #result=$(curl -k --request PUT \
482 # --url https://$SO_CONTAINER_IP:8008/api/config/resource-orchestrator \
483 # --header 'accept: application/vnd.yang.data+json' \
484 # --header 'authorization: Basic YWRtaW46YWRtaW4=' \
485 # --header 'cache-control: no-cache' \
486 # --header 'content-type: application/vnd.yang.data+json' \
487 # --data '{ "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'" }, "name": "osmopenmano", "account-type": "openmano" }')
489 result
=$
(curl
-k --request PUT \
490 --url https
://$SO_CONTAINER_IP:8008/api
/config
/project
/default
/ro-account
/account \
491 --header 'accept: application/vnd.yang.data+json' \
492 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
493 --header 'cache-control: no-cache' \
494 --header 'content-type: application/vnd.yang.data+json' \
495 --data '{"rw-ro-account:account": [ { "openmano": { "host": "'$RO_CONTAINER_IP'", "port": "9090", "tenant-id": "'$RO_TENANT_ID'"}, "name": "osmopenmano", "ro-account-type": "openmano" }]}')
496 [[ $result =~ .
*success.
* ]] || FATAL
"Failed resource-orchestrator configuration: $result"
498 result
=$
(curl
-k --request PATCH \
499 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/redirect-uri \
500 --header 'accept: application/vnd.yang.data+json' \
501 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
502 --header 'cache-control: no-cache' \
503 --header 'content-type: application/vnd.yang.data+json' \
504 --data '{"redirect-uri": "https://'$DEFAULT_IP':8443/callback" }')
505 [[ $result =~ .
*success.
* ]] || FATAL
"Failed redirect-uri configuration: $result"
507 result
=$
(curl
-k --request PATCH \
508 --url https
://$SO_CONTAINER_IP:8008/v
2/api
/config
/openidc-provider-config
/rw-ui-client
/post-logout-redirect-uri \
509 --header 'accept: application/vnd.yang.data+json' \
510 --header 'authorization: Basic YWRtaW46YWRtaW4=' \
511 --header 'cache-control: no-cache' \
512 --header 'content-type: application/vnd.yang.data+json' \
513 --data '{"post-logout-redirect-uri": "https://'$DEFAULT_IP':8443/?api_server=https://'$DEFAULT_IP'" }')
514 [[ $result =~ .
*success.
* ]] || FATAL
"Failed post-logout-redirect-uri configuration: $result"
516 lxc
exec SO-ub
-- tee /etc
/network
/interfaces.d
/60-rift.cfg
<<EOF
518 iface lo:1 inet static
520 netmask 255.255.255.255
522 lxc
exec SO-ub ifup lo
:1
525 #Configure RO, VCA, and SO with the initial configuration:
526 # RO -> tenant:osm, logs to be sent to SO
527 # VCA -> juju-password
528 # SO -> route to Juju Controller, add RO account, add VCA account
529 function configure
(){
530 #Configure components
531 echo -e "\nConfiguring components"
537 function install_lxd
() {
539 sudo apt-get
install -y lxd
543 lxc network create lxdbr0 ipv4.address
=auto ipv4.nat
=true ipv6.address
=none ipv6.nat
=false
544 DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8}')
545 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
546 lxc profile device
set default eth0 mtu
$DEFAULT_MTU
547 #sudo systemctl stop lxd-bridge
548 #sudo systemctl --system daemon-reload
549 #sudo systemctl enable lxd-bridge
550 #sudo systemctl start lxd-bridge
554 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
555 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
556 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
557 read -e -p "$1" USER_CONFIRMATION
559 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
560 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
561 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
562 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
563 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
567 function launch_container_from_lxd
(){
570 export OSM_BASE_IMAGE
=$2
571 if ! container_exists
$OSM_BUILD_CONTAINER; then
573 [[ "$OSM_BUILD_CONTAINER_PRIVILEGED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.privileged=true"
574 [[ "$OSM_BUILD_CONTAINER_ALLOW_NESTED" == yes ]] && CONTAINER_OPTS
="$CONTAINER_OPTS -c security.nesting=true"
575 create_container
$OSM_BASE_IMAGE $OSM_BUILD_CONTAINER $CONTAINER_OPTS
576 wait_container_up
$OSM_BUILD_CONTAINER
580 function install_osmclient
(){
581 CLIENT_RELEASE
=${RELEASE#"-R "}
582 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
583 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
584 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
585 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
586 curl
$key_location | sudo apt-key add
-
587 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
589 sudo apt-get
install -y python3-pip
590 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
591 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind
592 sudo apt-get
install -y python3-osm-im python3-osmclient
593 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
594 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
595 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
596 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
597 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
598 echo -e "\nOSM client installed"
599 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
600 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
601 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
602 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
604 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
605 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
606 echo " export OSM_HOSTNAME=<OSM_host>"
611 function install_prometheus_nodeexporter
(){
612 if (systemctl
-q is-active node_exporter
)
614 echo "Node Exporter is already running."
616 echo "Node Exporter is not active, installing..."
617 if getent passwd node_exporter
> /dev
/null
2>&1; then
618 echo "node_exporter user exists"
620 echo "Creating user node_exporter"
621 sudo useradd
--no-create-home --shell /bin
/false node_exporter
623 sudo wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
624 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
625 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
626 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
627 sudo
rm -rf node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
628 sudo
cp ${OSM_DEVOPS}/installers
/docker
/files
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
629 sudo systemctl daemon-reload
630 sudo systemctl restart node_exporter
631 sudo systemctl
enable node_exporter
632 echo "Node Exporter has been activated in this host."
637 function uninstall_prometheus_nodeexporter
(){
638 sudo systemctl stop node_exporter
639 sudo systemctl disable node_exporter
640 sudo
rm /etc
/systemd
/system
/node_exporter.service
641 sudo systemctl daemon-reload
642 sudo userdel node_exporter
643 sudo
rm /usr
/local
/bin
/node_exporter
647 function install_from_lxdimages
(){
648 LXD_RELEASE
=${RELEASE#"-R "}
649 if [ -n "$LXD_REPOSITORY_PATH" ]; then
650 LXD_IMAGE_DIR
="$LXD_REPOSITORY_PATH"
652 LXD_IMAGE_DIR
="$(mktemp -d -q --tmpdir "osmimages.XXXXXX
")"
653 trap 'rm -rf "$LXD_IMAGE_DIR"' EXIT
655 echo -e "\nDeleting previous lxd images if they exist"
656 lxc image show osm-ro
&>/dev
/null
&& lxc image delete osm-ro
657 lxc image show osm-vca
&>/dev
/null
&& lxc image delete osm-vca
658 lxc image show osm-soui
&>/dev
/null
&& lxc image delete osm-soui
659 echo -e "\nImporting osm-ro"
660 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-ro.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-ro.
tar.gz
661 lxc image import
$LXD_IMAGE_DIR/osm-ro.
tar.gz
--alias osm-ro
662 rm -f $LXD_IMAGE_DIR/osm-ro.
tar.gz
663 echo -e "\nImporting osm-vca"
664 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-vca.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-vca.
tar.gz
665 lxc image import
$LXD_IMAGE_DIR/osm-vca.
tar.gz
--alias osm-vca
666 rm -f $LXD_IMAGE_DIR/osm-vca.
tar.gz
667 echo -e "\nImporting osm-soui"
668 [ -z "$LXD_REPOSITORY_PATH" ] && wget
-O $LXD_IMAGE_DIR/osm-soui.
tar.gz
$LXD_REPOSITORY_BASE/$LXD_RELEASE/osm-soui.
tar.gz
669 lxc image import
$LXD_IMAGE_DIR/osm-soui.
tar.gz
--alias osm-soui
670 rm -f $LXD_IMAGE_DIR/osm-soui.
tar.gz
671 launch_container_from_lxd RO osm-ro
673 launch_container_from_lxd VCA osm-vca
674 vca_is_up
&& track VCA
675 launch_container_from_lxd MON osm-mon
676 mon_is_up
&& track MON
677 launch_container_from_lxd SO osm-soui
678 #so_is_up && track SOUI
682 function install_docker_ce
() {
683 # installs and configures Docker CE
684 echo "Installing Docker CE ..."
685 sudo apt-get
-qq update
686 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
687 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
688 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
689 sudo apt-get
-qq update
690 sudo apt-get
install -y docker-ce
691 echo "Adding user to group 'docker'"
692 sudo groupadd
-f docker
693 sudo usermod
-aG docker
$USER
695 sudo service docker restart
696 echo "... restarted Docker service"
697 sg docker
-c "docker version" || FATAL
"Docker installation failed"
698 echo "... Docker CE installation done"
702 function install_docker_compose
() {
703 # installs and configures docker-compose
704 echo "Installing Docker Compose ..."
705 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
706 sudo
chmod +x
/usr
/local
/bin
/docker-compose
707 echo "... Docker Compose installation done"
710 function install_juju
() {
711 echo "Installing juju"
712 sudo snap
install juju
--classic
713 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure
-p medium lxd
714 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
715 echo "Finished installation of juju"
719 function juju_createcontroller
() {
720 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
721 # Not found created, create the controller
722 sg lxd
-c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
724 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
727 function juju_createproxy
() {
728 echo -e "\nChecking required packages: iptables-persistent"
729 dpkg
-l iptables-persistent
&>/dev
/null ||
! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
730 sudo apt-get
-yq install iptables-persistent
732 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
733 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
734 sudo netfilter-persistent save
738 function generate_docker_images
() {
739 echo "Pulling and generating docker images"
740 _build_from
=$COMMIT_ID
741 [ -z "$_build_from" ] && _build_from
="master"
743 echo "OSM Docker images generated from $_build_from"
745 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
746 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
747 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
748 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
750 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
751 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
752 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
755 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
756 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
759 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
760 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
763 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
764 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
767 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
768 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
771 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
772 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
775 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
776 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
779 if [ -n "$PULL_IMAGES" ]; then
780 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
781 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
782 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
783 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
784 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
787 if [ -n "$PULL_IMAGES" ]; then
788 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
789 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
790 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
791 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
792 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
795 if [ -n "$PULL_IMAGES" ]; then
796 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
797 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
798 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
799 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
800 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
801 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
802 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
805 if [ -n "$PULL_IMAGES" ]; then
806 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
807 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
808 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
809 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
810 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
813 if [ -n "$PULL_IMAGES" ]; then
814 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
815 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
816 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
817 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
818 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
821 if [ -n "$PULL_IMAGES" ]; then
822 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
823 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
824 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
825 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
826 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
829 if [ -n "$PULL_IMAGES" ]; then
830 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
831 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
832 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
835 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
836 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
839 echo "Finished generation of docker images"
842 function cmp_overwrite
() {
845 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
846 if [ -f "${file2}" ]; then
847 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
849 cp -b ${file1} ${file2}
854 function generate_docker_env_files() {
855 echo "Doing a backup of existing env files
"
856 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
857 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
858 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
859 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
860 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
861 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
862 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
863 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
864 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
866 echo "Generating docker env files
"
867 if [ -n "$KUBERNETES" ]; then
868 #Kubernetes resources
869 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
872 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
875 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
877 # Grafana & Prometheus Exporter files
878 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
879 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
883 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
884 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
887 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
888 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
890 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
893 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
894 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
896 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
899 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
900 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
902 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
905 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
906 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
908 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
911 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
912 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
914 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
917 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
918 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
921 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
922 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
926 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
927 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
928 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
930 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
931 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
935 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
936 SERVICE_PASSWORD
=$
(generate_secret
)
937 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
938 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
940 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
941 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
942 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
943 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
947 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
948 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
949 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
953 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
954 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
955 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
958 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
959 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
961 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
964 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
965 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
967 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
970 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
971 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
973 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
976 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
977 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
979 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
984 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
985 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
989 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
990 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
993 echo "Finished generation of docker env files"
996 function generate_osmclient_script
() {
997 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
998 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
999 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
1002 #installs kubernetes packages
1003 function install_kube
() {
1004 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
1005 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
1006 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
1008 echo "Installing Kubernetes Packages ..."
1009 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
1012 #initializes kubernetes control plane
1013 function init_kubeadm
() {
1015 sudo kubeadm init
--config $1
1019 function kube_config_dir
() {
1020 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
1021 mkdir
-p $HOME/.kube
1022 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
1023 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
1026 #deploys flannel as daemonsets
1027 function deploy_cni_provider
() {
1028 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
1029 trap 'rm -rf "${CNI_DIR}"' EXIT
1030 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
1031 kubectl apply
-f $CNI_DIR
1032 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
1035 #creates secrets from env files which will be used by containers
1036 function kube_secrets
(){
1037 kubectl create ns
$OSM_STACK_NAME
1038 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
1039 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
1040 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
1041 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
1042 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
1043 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
1044 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
1045 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
1048 #deploys osm pods and services
1049 function deploy_osm_services
() {
1050 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
1051 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
1053 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1056 function parse_yaml
() {
1057 osm_services
="nbi lcm ro pol mon light-ui keystone"
1059 for osm
in $osm_services; do
1060 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
1064 function namespace_vol
() {
1065 osm_services
="nbi lcm ro pol mon kafka mongo mysql"
1066 for osm
in $osm_services; do
1067 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1071 function init_docker_swarm
() {
1072 if [ "${DEFAULT_MTU}" != "1500" ]; then
1073 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1074 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1075 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1077 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1081 function create_docker_network
() {
1082 echo "creating network"
1083 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1084 echo "creating network DONE"
1087 function deploy_lightweight
() {
1089 echo "Deploying lightweight build"
1092 OSM_KEYSTONE_PORT
=5000
1096 OSM_PROM_CADVISOR_PORT
=8080
1097 OSM_PROM_HOSTPORT
=9091
1098 OSM_GRAFANA_PORT
=3000
1099 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
1100 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1102 if [ -n "$NO_HOST_PORTS" ]; then
1103 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
1104 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
1105 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
1106 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
1107 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
1108 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
1109 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
1110 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
1111 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1112 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
1114 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
1115 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
1116 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1117 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
1118 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
1119 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1120 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1121 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1122 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1123 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
1125 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1126 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1127 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1128 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1129 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1130 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1131 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1132 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1133 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1135 pushd $OSM_DOCKER_WORK_DIR
1136 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
1139 echo "Finished deployment of lightweight build"
1142 function deploy_elk
() {
1143 echo "Pulling docker images for ELK"
1144 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
1145 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
1146 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
1147 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
1148 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
1149 echo "Finished pulling elk docker images"
1150 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
1151 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
1152 remove_stack osm_elk
1153 echo "Deploying ELK stack"
1154 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1155 echo "Waiting for ELK stack to be up and running"
1160 while [ $time -le $timelength ]; do
1161 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
1168 if [ $elk_is_up -eq 0 ]; then
1169 echo "ELK is up and running. Trying to create index pattern..."
1170 #Create index pattern
1171 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1172 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1173 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
1174 #Make it the default index
1175 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1176 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1177 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
1179 echo "Cannot connect to Kibana to create index pattern."
1180 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1181 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1182 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1183 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1184 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1185 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1186 -d"{\"value\":\"filebeat-*\"}"'
1188 echo "Finished deployment of ELK stack"
1192 function install_lightweight
() {
1193 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1194 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1195 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1196 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1199 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1202 if [ -n "$KUBERNETES" ]; then
1203 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1204 1. Install and configure LXD
1206 3. Install docker CE
1207 4. Disable swap space
1208 5. Install and initialize Kubernetes
1209 as pre-requirements.
1210 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1213 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1217 echo "Installing lightweight build of OSM"
1218 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1219 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1220 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
1221 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1222 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
1223 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1224 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1226 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1227 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
1228 need_packages_lw
="lxd snapd"
1229 echo -e "Checking required packages: $need_packages_lw"
1230 dpkg
-l $need_packages_lw &>/dev
/null \
1231 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1232 || sudo apt-get update \
1233 || FATAL
"failed to run apt-get update"
1234 dpkg
-l $need_packages_lw &>/dev
/null \
1235 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1236 || sudo apt-get
install -y $need_packages_lw \
1237 || FATAL
"failed to install $need_packages_lw"
1241 [ -z "$INSTALL_NOJUJU" ] && install_juju
1244 if [ -z "$OSM_VCA_HOST" ]; then
1245 juju_createcontroller
1246 OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1247 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1249 track juju_controller
1251 if [ -z "$OSM_VCA_SECRET" ]; then
1252 OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1253 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1255 if [ -z "$OSM_VCA_PUBKEY" ]; then
1256 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1257 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1259 if [ -z "$OSM_VCA_CACERT" ]; then
1260 OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r '.controllers["osm"]["ca-cert"]' | base64 |
tr -d \\n
)
1261 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1263 if [ -z "$OSM_VCA_APIPROXY" ]; then
1264 OSM_VCA_APIPROXY
=$DEFAULT_IP
1265 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1270 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1271 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1272 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1275 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1278 #Installs Kubernetes and deploys osm services
1279 if [ -n "$KUBERNETES" ]; then
1282 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1286 #install_docker_compose
1287 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1291 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1294 generate_docker_env_files
1296 if [ -n "$KUBERNETES" ]; then
1297 if [ -n "$K8S_MONITOR" ]; then
1298 # uninstall OSM MONITORING
1299 uninstall_k8s_monitoring
1300 track uninstall_k8s_monitoring
1302 #remove old namespace
1303 remove_k8s_namespace
$OSM_STACK_NAME
1306 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
1309 track deploy_osm_services_k8s
1310 if [ -n "$K8S_MONITOR" ]; then
1311 # install OSM MONITORING
1312 install_k8s_monitoring
1313 track install_k8s_monitoring
1317 remove_stack
$OSM_STACK_NAME
1318 create_docker_network
1320 generate_osmclient_script
1322 install_prometheus_nodeexporter
1324 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1325 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1328 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1331 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null
1336 function install_vimemu
() {
1337 echo "\nInstalling vim-emu"
1338 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1339 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1340 # clone vim-emu repository (attention: branch is currently master only)
1341 echo "Cloning vim-emu repository ..."
1342 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1343 # build vim-emu docker
1344 echo "Building vim-emu Docker container..."
1346 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1347 # start vim-emu container as daemon
1348 echo "Starting vim-emu Docker container 'vim-emu' ..."
1349 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1350 # in lightweight mode, the emulator needs to be attached to netOSM
1351 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1353 # classic build mode
1354 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1356 echo "Waiting for 'vim-emu' container to start ..."
1358 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1359 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1360 # print vim-emu connection info
1361 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1362 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1363 echo -e "To add the emulated VIM to OSM you should do:"
1364 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1367 function install_k8s_monitoring
() {
1368 # install OSM monitoring
1369 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1370 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1373 function uninstall_k8s_monitoring
() {
1374 # uninstall OSM monitoring
1375 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1378 function dump_vars
(){
1379 echo "DEVELOP=$DEVELOP"
1380 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1381 echo "UNINSTALL=$UNINSTALL"
1383 echo "UPDATE=$UPDATE"
1384 echo "RECONFIGURE=$RECONFIGURE"
1385 echo "TEST_INSTALLER=$TEST_INSTALLER"
1386 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1387 echo "INSTALL_LXD=$INSTALL_LXD"
1388 echo "INSTALL_FROM_LXDIMAGES=$INSTALL_FROM_LXDIMAGES"
1389 echo "LXD_REPOSITORY_BASE=$LXD_REPOSITORY_BASE"
1390 echo "LXD_REPOSITORY_PATH=$LXD_REPOSITORY_PATH"
1391 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1392 echo "INSTALL_ONLY=$INSTALL_ONLY"
1393 echo "INSTALL_ELK=$INSTALL_ELK"
1394 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1395 echo "TO_REBUILD=$TO_REBUILD"
1396 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1397 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1398 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1399 echo "RELEASE=$RELEASE"
1400 echo "REPOSITORY=$REPOSITORY"
1401 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1402 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1403 echo "NOCONFIGURE=$NOCONFIGURE"
1404 echo "OSM_DEVOPS=$OSM_DEVOPS"
1405 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1406 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1407 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1408 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1409 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1410 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1411 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1412 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1413 echo "DOCKER_USER=$DOCKER_USER"
1414 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1415 echo "PULL_IMAGES=$PULL_IMAGES"
1416 echo "KUBERNETES=$KUBERNETES"
1417 echo "SHOWOPTS=$SHOWOPTS"
1418 echo "Install from specific refspec (-b): $COMMIT_ID"
1423 duration
=$
((ctime
- SESSION_ID
))
1424 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1425 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1427 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1428 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1429 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1430 event_name
="${event_name}_$1"
1431 url
="${url}&event=${event_name}&ce_duration=${duration}"
1432 wget
-q -O /dev
/null
$url
1445 INSTALL_FROM_SOURCE
=""
1446 RELEASE
="ReleaseSEVEN"
1449 INSTALL_FROM_LXDIMAGES
=""
1450 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1451 LXD_REPOSITORY_PATH
=""
1452 INSTALL_LIGHTWEIGHT
="y"
1461 INSTALL_NOHOSTCLIENT
=""
1464 SESSION_ID
=`date +%s`
1472 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1473 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1475 OSM_WORK_DIR
="/etc/osm"
1476 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1477 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1478 OSM_HOST_VOL
="/var/lib/osm"
1479 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1480 OSM_DOCKER_TAG
=latest
1481 DOCKER_USER
=opensourcemano
1483 KAFKA_TAG
=2.11-1.0
.2
1484 PROMETHEUS_TAG
=v2.4
.3
1486 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1487 PROMETHEUS_CADVISOR_TAG
=latest
1489 OSM_DATABASE_COMMONKEY
=
1490 ELASTIC_VERSION
=6.4.2
1491 ELASTIC_CURATOR_VERSION
=5.5.4
1492 POD_NETWORK_CIDR
=10.244.0.0/16
1493 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1494 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1496 while getopts ":hy-:b:r:c:k:u:R:l:p:D:o:m:H:S:s:w:t:U:P:A:" o
; do
1506 REPOSITORY
="${OPTARG}"
1507 REPO_ARGS
+=(-r "$REPOSITORY")
1510 [ "${OPTARG}" == "swarm" ] && continue
1511 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1512 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1517 REPO_ARGS
+=(-R "$RELEASE")
1520 REPOSITORY_KEY
="${OPTARG}"
1521 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1524 REPOSITORY_BASE
="${OPTARG}"
1525 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1528 DOCKER_USER
="${OPTARG}"
1531 LXD_REPOSITORY_BASE
="${OPTARG}"
1534 LXD_REPOSITORY_PATH
="${OPTARG}"
1537 OSM_DEVOPS
="${OPTARG}"
1540 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1543 OSM_VCA_HOST
="${OPTARG}"
1546 OSM_VCA_SECRET
="${OPTARG}"
1549 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1552 OSM_VCA_APIPROXY
="${OPTARG}"
1555 # when specifying workdir, do not use sudo for access
1557 OSM_WORK_DIR
="${OPTARG}"
1560 OSM_DOCKER_TAG
="${OPTARG}"
1564 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1565 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1568 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1569 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1570 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1571 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1572 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1573 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1574 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1575 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1576 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1577 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1578 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1579 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1580 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1583 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1584 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1585 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1586 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1587 [ "${OPTARG}" == "nat" ] && NAT
="y" && continue
1588 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1589 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1590 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1591 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1592 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1593 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1594 [ "${OPTARG}" == "lxdimages" ] && INSTALL_FROM_LXDIMAGES
="y" && continue
1595 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1596 [ "${OPTARG}" == "soui" ] && INSTALL_LIGHTWEIGHT
="" && RELEASE
="-R ReleaseTHREE" && REPOSITORY
="-r stable" && continue
1597 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1598 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1599 [ "${OPTARG}" == "noconfigure" ] && NOCONFIGURE
="y" && continue
1600 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1601 [ "${OPTARG}" == "daily" ] && RELEASE_DAILY
="y" && continue
1602 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1603 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1604 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1605 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1606 [ "${OPTARG}" == "pullimages" ] && continue
1607 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR
="y" && continue
1608 echo -e "Invalid option: '--$OPTARG'\n" >&2
1612 echo -e "Invalid option: '-$OPTARG'\n" >&2
1624 [ -n "$INSTALL_FROM_LXDIMAGES" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --lxd can only be used with --soui"
1625 [ -n "$NAT" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --nat can only be used with --soui"
1626 [ -n "$NOCONFIGURE" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --noconfigure can only be used with --soui"
1627 [ -n "$RELEASE_DAILY" ] && [ -n "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible options: --daily can only be used with --soui"
1628 [ -n "$INSTALL_NOLXD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nolxd cannot be used with --soui"
1629 [ -n "$INSTALL_NODOCKER" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: --nodocker cannot be used with --soui"
1630 [ -n "$TO_REBUILD" ] && [ -z "$INSTALL_LIGHTWEIGHT" ] && FATAL
"Incompatible option: -m cannot be used with --soui"
1631 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1633 if [ -n "$SHOWOPTS" ]; then
1638 [ -n "$RELEASE_DAILY" ] && echo -e "\nInstalling from daily build repo" && RELEASE
="-R ReleaseTHREE-daily" && REPOSITORY
="-r testing" && COMMIT_ID
="master"
1640 # if develop, we force master
1641 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1643 need_packages
="git jq wget curl tar"
1644 echo -e "Checking required packages: $need_packages"
1645 dpkg
-l $need_packages &>/dev
/null \
1646 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1647 || sudo apt-get update \
1648 || FATAL
"failed to run apt-get update"
1649 dpkg
-l $need_packages &>/dev
/null \
1650 ||
! echo -e "Installing $need_packages requires root privileges." \
1651 || sudo apt-get
install -y $need_packages \
1652 || FATAL
"failed to install $need_packages"
1654 if [ -z "$OSM_DEVOPS" ]; then
1655 if [ -n "$TEST_INSTALLER" ]; then
1656 echo -e "\nUsing local devops repo for OSM installation"
1657 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1659 echo -e "\nCreating temporary dir for OSM installation"
1660 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1661 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1663 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1665 if [ -z "$COMMIT_ID" ]; then
1666 echo -e "\nGuessing the current stable release"
1667 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1668 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1670 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1671 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1673 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1675 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1679 .
$OSM_DEVOPS/common
/all_funcs
1681 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1682 [ -n "$UNINSTALL" ] && uninstall
&& echo -e "\nDONE" && exit 0
1683 [ -n "$NAT" ] && nat
&& echo -e "\nDONE" && exit 0
1684 [ -n "$UPDATE" ] && update
&& echo -e "\nDONE" && exit 0
1685 [ -n "$RECONFIGURE" ] && configure
&& echo -e "\nDONE" && exit 0
1686 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1687 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1688 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1689 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1691 #Installation starts here
1692 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README.txt
&> /dev
/null
1695 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1696 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1697 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1698 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1701 echo -e "Checking required packages: lxd"
1702 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1703 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1705 # use local devops for containers
1706 export OSM_USE_LOCAL_DEVOPS
=true
1707 if [ -n "$INSTALL_FROM_SOURCE" ]; then #install from source
1708 echo -e "\nCreating the containers and building from source ..."
1709 $OSM_DEVOPS/jenkins
/host
/start_build RO
--notest checkout
$COMMIT_ID || FATAL
"RO container build failed (refspec: '$COMMIT_ID')"
1710 ro_is_up
&& track RO
1711 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA container build failed"
1712 vca_is_up
&& track VCA
1713 $OSM_DEVOPS/jenkins
/host
/start_build MON || FATAL
"MON install failed"
1714 mon_is_up
&& track MON
1715 $OSM_DEVOPS/jenkins
/host
/start_build SO checkout
$COMMIT_ID || FATAL
"SO container build failed (refspec: '$COMMIT_ID')"
1716 $OSM_DEVOPS/jenkins
/host
/start_build UI checkout
$COMMIT_ID || FATAL
"UI container build failed (refspec: '$COMMIT_ID')"
1717 #so_is_up && track SOUI
1719 elif [ -n "$INSTALL_FROM_LXDIMAGES" ]; then #install from LXD images stored in OSM repo
1720 echo -e "\nInstalling from lxd images ..."
1721 install_from_lxdimages
1722 else #install from binaries
1723 echo -e "\nCreating the containers and installing from binaries ..."
1724 $OSM_DEVOPS/jenkins
/host
/install RO
${REPO_ARGS[@]} || FATAL
"RO install failed"
1725 ro_is_up
&& track RO
1726 $OSM_DEVOPS/jenkins
/host
/start_build VCA || FATAL
"VCA install failed"
1727 vca_is_up
&& track VCA
1728 $OSM_DEVOPS/jenkins
/host
/install MON || FATAL
"MON build failed"
1729 mon_is_up
&& track MON
1730 $OSM_DEVOPS/jenkins
/host
/install SO
${REPO_ARGS[@]} || FATAL
"SO install failed"
1731 $OSM_DEVOPS/jenkins
/host
/install UI
${REPO_ARGS[@]} || FATAL
"UI install failed"
1732 #so_is_up && track SOUI
1736 #Install iptables-persistent and configure NAT rules
1737 [ -z "$NOCONFIGURE" ] && nat
1739 #Configure components
1740 [ -z "$NOCONFIGURE" ] && configure
1743 [ -z "$NOCONFIGURE" ] && install_osmclient
1745 #Install vim-emu (optional)
1746 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1748 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null