2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
53 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
54 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
55 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
56 echo -e " --showopts: print chosen options and exit (only for debugging)"
57 echo -e " -y: do not prompt for confirmation, assumes yes"
58 echo -e " -h / --help: print this help"
61 # takes a juju/accounts.yaml file and returns the password specific
62 # for a controller. I wrote this using only bash tools to minimize
63 # additions of other packages
64 function parse_juju_password
{
65 password_file
="${HOME}/.local/share/juju/accounts.yaml"
66 local controller_name
=$1
67 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
68 sed -ne "s|^\($s\):|\1|" \
69 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
70 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
71 awk -F$fs -v controller
=$controller_name '{
72 indent = length($1)/2;
74 for (i in vname) {if (i > indent) {delete vname[i]}}
76 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
77 if (match(vn,controller) && match($2,"password")) {
84 function generate_secret
() {
85 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
88 function remove_volumes
() {
89 if [ -n "$KUBERNETES" ]; then
91 echo "Removing ${k8_volume}"
92 $WORKDIR_SUDO rm -rf ${k8_volume}
95 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
96 for volume
in $volumes; do
97 sg docker
-c "docker volume rm ${stack}_${volume}"
102 function remove_network
() {
104 sg docker
-c "docker network rm net${stack}"
107 function remove_iptables
() {
109 if [ -z "$OSM_VCA_HOST" ]; then
110 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
111 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
114 if [ -z "$DEFAULT_IP" ]; then
115 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
116 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
117 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
118 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
121 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
122 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
123 sudo netfilter-persistent save
127 function remove_stack
() {
129 if sg docker
-c "docker stack ps ${stack}" ; then
130 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
133 while [ ${COUNTER} -lt 30 ]; do
134 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
135 #echo "Dockers running: $result"
136 if [ "${result}" == "0" ]; then
139 let COUNTER
=COUNTER
+1
142 if [ "${result}" == "0" ]; then
143 echo "All dockers of the stack ${stack} were removed"
145 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
151 #removes osm deployments and services
152 function remove_k8s_namespace
() {
156 #Uninstall lightweight OSM: remove dockers
157 function uninstall_lightweight
() {
158 if [ -n "$INSTALL_ONLY" ]; then
159 if [ -n "$INSTALL_ELK" ]; then
160 echo -e "\nUninstalling OSM ELK stack"
162 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
165 echo -e "\nUninstalling OSM"
166 if [ -n "$KUBERNETES" ]; then
167 if [ -n "$INSTALL_K8S_MONITOR" ]; then
168 # uninstall OSM MONITORING
169 uninstall_k8s_monitoring
171 remove_k8s_namespace
$OSM_STACK_NAME
174 remove_stack
$OSM_STACK_NAME
177 echo "Now osm docker images and volumes will be deleted"
178 newgrp docker
<< EONG
179 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
180 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
181 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
182 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
183 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
184 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
185 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
186 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
189 if [ -n "$KUBERNETES" ]; then
190 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
191 remove_volumes
$OSM_NAMESPACE_VOL
193 remove_volumes
$OSM_STACK_NAME
194 remove_network
$OSM_STACK_NAME
196 remove_iptables
$OSM_STACK_NAME
197 echo "Removing $OSM_DOCKER_WORK_DIR"
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
199 sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
201 echo "Some docker images will be kept in case they are used by other docker stacks"
202 echo "To remove them, just run 'docker image prune' in a terminal"
206 #Safe unattended install of iptables-persistent
207 function check_install_iptables_persistent
(){
208 echo -e "\nChecking required packages: iptables-persistent"
209 if dpkg
-l iptables-persistent
&>/dev
/null
; then
210 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
211 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
212 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
213 sudo apt-get
-yq install iptables-persistent
217 #Configure NAT rules, based on the current IP addresses of containers
219 check_install_iptables_persistent
221 echo -e "\nConfiguring NAT rules"
222 echo -e " Required root privileges"
223 sudo
$OSM_DEVOPS/installers
/nat_osm
227 echo "FATAL error: Cannot install OSM due to \"$1\""
231 function install_lxd
() {
232 # Apply sysctl production values for optimal performance
233 sudo
cp /usr
/share
/osm-devops
/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
237 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
238 sudo snap
install lxd
--channel=3.0/stable
241 sudo usermod
-a -G lxd
`whoami`
242 cat /usr
/share
/osm-devops
/installers
/lxd-preseed.conf | sg lxd
-c "lxd init --preseed"
243 sg lxd
-c "lxd waitready"
244 DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8}')
245 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
246 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
247 #sudo systemctl stop lxd-bridge
248 #sudo systemctl --system daemon-reload
249 #sudo systemctl enable lxd-bridge
250 #sudo systemctl start lxd-bridge
254 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
255 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
256 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
257 read -e -p "$1" USER_CONFIRMATION
259 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
260 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
261 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
262 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
263 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
267 function install_osmclient
(){
268 CLIENT_RELEASE
=${RELEASE#"-R "}
269 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
270 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
271 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
272 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
273 curl
$key_location | sudo apt-key add
-
274 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
276 sudo apt-get
install -y python3-pip
277 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
278 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
279 sudo apt-get
install -y python3-osm-im python3-osmclient
280 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
281 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
282 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
283 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
284 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
285 echo -e "\nOSM client installed"
286 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
287 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
288 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
289 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
291 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
292 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
293 echo " export OSM_HOSTNAME=<OSM_host>"
298 function install_prometheus_nodeexporter
(){
299 if (systemctl
-q is-active node_exporter
)
301 echo "Node Exporter is already running."
303 echo "Node Exporter is not active, installing..."
304 if getent passwd node_exporter
> /dev
/null
2>&1; then
305 echo "node_exporter user exists"
307 echo "Creating user node_exporter"
308 sudo useradd
--no-create-home --shell /bin
/false node_exporter
310 sudo wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
311 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
312 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
313 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
314 sudo
rm -rf node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
315 sudo
cp ${OSM_DEVOPS}/installers
/docker
/files
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
316 sudo systemctl daemon-reload
317 sudo systemctl restart node_exporter
318 sudo systemctl
enable node_exporter
319 echo "Node Exporter has been activated in this host."
324 function uninstall_prometheus_nodeexporter
(){
325 sudo systemctl stop node_exporter
326 sudo systemctl disable node_exporter
327 sudo
rm /etc
/systemd
/system
/node_exporter.service
328 sudo systemctl daemon-reload
329 sudo userdel node_exporter
330 sudo
rm /usr
/local
/bin
/node_exporter
334 function install_docker_ce
() {
335 # installs and configures Docker CE
336 echo "Installing Docker CE ..."
337 sudo apt-get
-qq update
338 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
339 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
340 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
341 sudo apt-get
-qq update
342 sudo apt-get
install -y docker-ce
343 echo "Adding user to group 'docker'"
344 sudo groupadd
-f docker
345 sudo usermod
-aG docker
$USER
347 sudo service docker restart
348 echo "... restarted Docker service"
349 sg docker
-c "docker version" || FATAL
"Docker installation failed"
350 echo "... Docker CE installation done"
354 function install_docker_compose
() {
355 # installs and configures docker-compose
356 echo "Installing Docker Compose ..."
357 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
358 sudo
chmod +x
/usr
/local
/bin
/docker-compose
359 echo "... Docker Compose installation done"
362 function install_juju
() {
363 echo "Installing juju"
364 sudo snap
install juju
--classic
365 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
366 echo "Finished installation of juju"
370 function juju_createcontroller
() {
371 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
372 # Not found created, create the controller
373 sudo usermod
-a -G lxd
${USER}
374 sg lxd
-c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
376 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
379 function juju_createproxy
() {
380 check_install_iptables_persistent
382 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
383 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
384 sudo netfilter-persistent save
388 function generate_docker_images
() {
389 echo "Pulling and generating docker images"
390 _build_from
=$COMMIT_ID
391 [ -z "$_build_from" ] && _build_from
="master"
393 echo "OSM Docker images generated from $_build_from"
395 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
396 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
397 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
398 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
400 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
401 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
402 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
405 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
406 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
409 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
410 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
413 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
414 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
417 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
418 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
421 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
422 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
425 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
426 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
429 if [ -n "$PULL_IMAGES" ]; then
430 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
431 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
432 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
433 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
434 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
437 if [ -n "$PULL_IMAGES" ]; then
438 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
439 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
440 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
441 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
442 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
445 if [ -n "$PULL_IMAGES" ]; then
446 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
447 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
448 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
449 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
450 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
451 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
452 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
455 if [ -n "$PULL_IMAGES" ]; then
456 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
457 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
458 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
459 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
460 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
463 if [ -n "$PULL_IMAGES" ]; then
464 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
465 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
466 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
467 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
468 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
471 if [ -n "$PULL_IMAGES" ]; then
472 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
473 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
474 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
475 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
476 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
479 if [ -n "$PULL_IMAGES" ]; then
480 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
481 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
482 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
485 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
486 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
489 echo "Finished generation of docker images"
492 function cmp_overwrite
() {
495 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
496 if [ -f "${file2}" ]; then
497 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
499 cp -b ${file1} ${file2}
504 function generate_docker_env_files() {
505 echo "Doing a backup of existing env files
"
506 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
507 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
508 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
509 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
510 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
511 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
512 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
513 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
514 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
516 echo "Generating docker env files
"
517 if [ -n "$KUBERNETES" ]; then
518 #Kubernetes resources
519 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
522 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
525 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
527 # Grafana & Prometheus Exporter files
528 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
529 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
533 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
534 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
537 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
538 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
540 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
543 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
544 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
546 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
549 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
550 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
552 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
555 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
556 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
558 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
561 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
562 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
564 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
567 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
568 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
571 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
572 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
576 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
577 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
578 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
580 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
581 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
585 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
586 SERVICE_PASSWORD
=$
(generate_secret
)
587 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
588 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
590 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
591 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
592 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
593 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
597 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
598 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
599 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
603 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
604 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
605 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
608 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
609 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
611 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
614 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
615 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
617 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
620 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
621 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
623 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
626 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
627 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
629 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
634 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
635 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
639 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
640 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
643 echo "Finished generation of docker env files"
646 function generate_osmclient_script
() {
647 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
648 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
649 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
652 #installs kubernetes packages
653 function install_kube
() {
654 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
655 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
656 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
658 echo "Installing Kubernetes Packages ..."
659 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
662 #initializes kubernetes control plane
663 function init_kubeadm
() {
665 sudo kubeadm init
--config $1
669 function kube_config_dir
() {
670 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
672 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
673 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
676 #deploys flannel as daemonsets
677 function deploy_cni_provider
() {
678 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
679 trap 'rm -rf "${CNI_DIR}"' EXIT
680 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
681 kubectl apply
-f $CNI_DIR
682 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
685 #creates secrets from env files which will be used by containers
686 function kube_secrets
(){
687 kubectl create ns
$OSM_STACK_NAME
688 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
689 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
690 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
691 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
692 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
693 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
694 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
695 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
698 #deploys osm pods and services
699 function deploy_osm_services
() {
700 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
701 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
703 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
706 function parse_yaml
() {
707 osm_services
="nbi lcm ro pol mon light-ui keystone"
709 for osm
in $osm_services; do
710 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
714 function namespace_vol
() {
715 osm_services
="nbi lcm ro pol mon kafka mongo mysql"
716 for osm
in $osm_services; do
717 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
721 function init_docker_swarm
() {
722 if [ "${DEFAULT_MTU}" != "1500" ]; then
723 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
724 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
725 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
727 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
731 function create_docker_network
() {
732 echo "creating network"
733 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
734 echo "creating network DONE"
737 function deploy_lightweight
() {
739 echo "Deploying lightweight build"
742 OSM_KEYSTONE_PORT
=5000
746 OSM_PROM_CADVISOR_PORT
=8080
747 OSM_PROM_HOSTPORT
=9091
748 OSM_GRAFANA_PORT
=3000
749 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
750 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
752 if [ -n "$NO_HOST_PORTS" ]; then
753 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
754 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
755 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
756 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
757 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
758 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
759 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
760 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
761 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
762 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
764 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
765 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
766 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
767 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
768 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
769 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
770 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
771 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
772 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
773 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
775 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
776 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
777 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
778 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
779 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
780 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
781 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
782 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
783 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
785 pushd $OSM_DOCKER_WORK_DIR
786 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
789 echo "Finished deployment of lightweight build"
792 function deploy_elk
() {
793 echo "Pulling docker images for ELK"
794 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
795 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
796 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
797 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
798 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
799 echo "Finished pulling elk docker images"
800 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
801 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
803 echo "Deploying ELK stack"
804 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
805 echo "Waiting for ELK stack to be up and running"
810 while [ $time -le $timelength ]; do
811 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
818 if [ $elk_is_up -eq 0 ]; then
819 echo "ELK is up and running. Trying to create index pattern..."
820 #Create index pattern
821 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
822 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
823 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
824 #Make it the default index
825 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
826 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
827 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
829 echo "Cannot connect to Kibana to create index pattern."
830 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
831 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
832 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
833 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
834 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
835 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
836 -d"{\"value\":\"filebeat-*\"}"'
838 echo "Finished deployment of ELK stack"
842 function install_lightweight
() {
843 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
844 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
845 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
846 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
849 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
852 if [ -n "$KUBERNETES" ]; then
853 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
854 1. Install and configure LXD
857 4. Disable swap space
858 5. Install and initialize Kubernetes
860 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
863 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
867 echo "Installing lightweight build of OSM"
868 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
869 trap 'rm -rf "${LWTEMPDIR}"' EXIT
870 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
871 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
872 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
873 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
874 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
876 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
877 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
878 need_packages_lw
="snapd"
879 echo -e "Checking required packages: $need_packages_lw"
880 dpkg
-l $need_packages_lw &>/dev
/null \
881 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
882 || sudo apt-get update \
883 || FATAL
"failed to run apt-get update"
884 dpkg
-l $need_packages_lw &>/dev
/null \
885 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
886 || sudo apt-get
install -y $need_packages_lw \
887 || FATAL
"failed to install $need_packages_lw"
892 [ -z "$INSTALL_NOJUJU" ] && install_juju
895 if [ -z "$OSM_VCA_HOST" ]; then
896 juju_createcontroller
897 OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
898 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
900 track juju_controller
902 if [ -z "$OSM_VCA_SECRET" ]; then
903 OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
904 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
906 if [ -z "$OSM_VCA_PUBKEY" ]; then
907 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
908 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
910 if [ -z "$OSM_VCA_CACERT" ]; then
911 OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r '.controllers["osm"]["ca-cert"]' | base64 |
tr -d \\n
)
912 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
914 if [ -z "$OSM_VCA_APIPROXY" ]; then
915 OSM_VCA_APIPROXY
=$DEFAULT_IP
916 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
921 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
922 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
923 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
926 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
929 #Installs Kubernetes and deploys osm services
930 if [ -n "$KUBERNETES" ]; then
933 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
937 #install_docker_compose
938 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
942 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
945 generate_docker_env_files
947 if [ -n "$KUBERNETES" ]; then
948 if [ -n "$INSTALL_K8S_MONITOR" ]; then
949 # uninstall OSM MONITORING
950 uninstall_k8s_monitoring
951 track uninstall_k8s_monitoring
953 #remove old namespace
954 remove_k8s_namespace
$OSM_STACK_NAME
957 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
960 track deploy_osm_services_k8s
961 if [ -n "$INSTALL_K8S_MONITOR" ]; then
962 # install OSM MONITORING
963 install_k8s_monitoring
964 track install_k8s_monitoring
968 remove_stack
$OSM_STACK_NAME
969 create_docker_network
971 generate_osmclient_script
973 install_prometheus_nodeexporter
975 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
976 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
979 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
982 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null
987 function install_vimemu
() {
988 echo "\nInstalling vim-emu"
989 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
990 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
991 # install prerequisites (OVS is a must for the emulator to work)
992 sudo apt-get
install openvswitch-switch
993 # clone vim-emu repository (attention: branch is currently master only)
994 echo "Cloning vim-emu repository ..."
995 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
996 # build vim-emu docker
997 echo "Building vim-emu Docker container..."
999 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1000 # start vim-emu container as daemon
1001 echo "Starting vim-emu Docker container 'vim-emu' ..."
1002 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1003 # in lightweight mode, the emulator needs to be attached to netOSM
1004 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1006 # classic build mode
1007 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1009 echo "Waiting for 'vim-emu' container to start ..."
1011 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1012 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1013 # print vim-emu connection info
1014 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1015 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1016 echo -e "To add the emulated VIM to OSM you should do:"
1017 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1020 function install_k8s_monitoring
() {
1021 # install OSM monitoring
1022 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1023 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1026 function uninstall_k8s_monitoring
() {
1027 # uninstall OSM monitoring
1028 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1031 function dump_vars
(){
1032 echo "DEVELOP=$DEVELOP"
1033 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1034 echo "UNINSTALL=$UNINSTALL"
1035 echo "UPDATE=$UPDATE"
1036 echo "RECONFIGURE=$RECONFIGURE"
1037 echo "TEST_INSTALLER=$TEST_INSTALLER"
1038 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1039 echo "INSTALL_LXD=$INSTALL_LXD"
1040 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1041 echo "INSTALL_ONLY=$INSTALL_ONLY"
1042 echo "INSTALL_ELK=$INSTALL_ELK"
1043 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1044 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1045 echo "TO_REBUILD=$TO_REBUILD"
1046 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1047 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1048 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1049 echo "RELEASE=$RELEASE"
1050 echo "REPOSITORY=$REPOSITORY"
1051 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1052 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1053 echo "OSM_DEVOPS=$OSM_DEVOPS"
1054 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1055 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1056 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1057 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1058 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1059 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1060 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1061 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1062 echo "DOCKER_USER=$DOCKER_USER"
1063 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1064 echo "PULL_IMAGES=$PULL_IMAGES"
1065 echo "KUBERNETES=$KUBERNETES"
1066 echo "SHOWOPTS=$SHOWOPTS"
1067 echo "Install from specific refspec (-b): $COMMIT_ID"
1072 duration
=$
((ctime
- SESSION_ID
))
1073 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1074 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1076 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1077 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1078 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1079 event_name
="${event_name}_$1"
1080 url
="${url}&event=${event_name}&ce_duration=${duration}"
1081 wget
-q -O /dev
/null
$url
1093 INSTALL_FROM_SOURCE
=""
1094 RELEASE
="ReleaseSEVEN"
1097 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1098 LXD_REPOSITORY_PATH
=""
1099 INSTALL_LIGHTWEIGHT
="y"
1107 INSTALL_K8S_MONITOR
=""
1108 INSTALL_NOHOSTCLIENT
=""
1109 SESSION_ID
=`date +%s`
1117 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1118 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1120 OSM_WORK_DIR
="/etc/osm"
1121 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1122 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1123 OSM_HOST_VOL
="/var/lib/osm"
1124 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1125 OSM_DOCKER_TAG
=latest
1126 DOCKER_USER
=opensourcemano
1128 KAFKA_TAG
=2.11-1.0
.2
1129 PROMETHEUS_TAG
=v2.4
.3
1131 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1132 PROMETHEUS_CADVISOR_TAG
=latest
1134 OSM_DATABASE_COMMONKEY
=
1135 ELASTIC_VERSION
=6.4.2
1136 ELASTIC_CURATOR_VERSION
=5.5.4
1137 POD_NETWORK_CIDR
=10.244.0.0/16
1138 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1139 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1141 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o
; do
1148 REPOSITORY
="${OPTARG}"
1149 REPO_ARGS
+=(-r "$REPOSITORY")
1152 [ "${OPTARG}" == "swarm" ] && continue
1153 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1154 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1158 REPOSITORY_KEY
="${OPTARG}"
1159 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1162 REPOSITORY_BASE
="${OPTARG}"
1163 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1167 REPO_ARGS
+=(-R "$RELEASE")
1170 OSM_DEVOPS
="${OPTARG}"
1174 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1175 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1176 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1179 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1180 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1181 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1182 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1183 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1184 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1185 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1186 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1187 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1188 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1189 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1190 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1191 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1194 OSM_VCA_HOST
="${OPTARG}"
1197 OSM_VCA_SECRET
="${OPTARG}"
1200 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1203 # when specifying workdir, do not use sudo for access
1205 OSM_WORK_DIR
="${OPTARG}"
1208 OSM_DOCKER_TAG
="${OPTARG}"
1211 DOCKER_USER
="${OPTARG}"
1214 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1217 OSM_VCA_APIPROXY
="${OPTARG}"
1220 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1221 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1222 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1223 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1224 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1225 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1226 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1227 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1228 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1229 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1230 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1231 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1232 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1233 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1234 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1235 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1236 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1237 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1238 [ "${OPTARG}" == "pullimages" ] && continue
1239 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1240 echo -e "Invalid option: '--$OPTARG'\n" >&2
1244 echo "Option -$OPTARG requires an argument" >&2
1248 echo -e "Invalid option: '-$OPTARG'\n" >&2
1263 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1265 if [ -n "$SHOWOPTS" ]; then
1270 # if develop, we force master
1271 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1273 need_packages
="git jq wget curl tar"
1274 echo -e "Checking required packages: $need_packages"
1275 dpkg
-l $need_packages &>/dev
/null \
1276 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1277 || sudo apt-get update \
1278 || FATAL
"failed to run apt-get update"
1279 dpkg
-l $need_packages &>/dev
/null \
1280 ||
! echo -e "Installing $need_packages requires root privileges." \
1281 || sudo apt-get
install -y $need_packages \
1282 || FATAL
"failed to install $need_packages"
1284 if [ -z "$OSM_DEVOPS" ]; then
1285 if [ -n "$TEST_INSTALLER" ]; then
1286 echo -e "\nUsing local devops repo for OSM installation"
1287 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1289 echo -e "\nCreating temporary dir for OSM installation"
1290 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1291 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1293 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1295 if [ -z "$COMMIT_ID" ]; then
1296 echo -e "\nGuessing the current stable release"
1297 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1298 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1300 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1301 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1303 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1305 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1309 .
$OSM_DEVOPS/common
/all_funcs
1311 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1312 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1313 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1314 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1315 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1316 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1318 #Installation starts here
1319 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README.txt
&> /dev
/null
1322 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1323 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1324 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1325 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1328 echo -e "Checking required packages: lxd"
1329 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1330 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1332 # use local devops for containers
1333 export OSM_USE_LOCAL_DEVOPS
=true
1337 #Install vim-emu (optional)
1338 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1340 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null