Fix for unattended install of iptables-persistent
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
53 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
54 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
55 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
56 echo -e " --showopts: print chosen options and exit (only for debugging)"
57 echo -e " -y: do not prompt for confirmation, assumes yes"
58 echo -e " -h / --help: print this help"
59 }
60
61 # takes a juju/accounts.yaml file and returns the password specific
62 # for a controller. I wrote this using only bash tools to minimize
63 # additions of other packages
64 function parse_juju_password {
65 password_file="${HOME}/.local/share/juju/accounts.yaml"
66 local controller_name=$1
67 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
68 sed -ne "s|^\($s\):|\1|" \
69 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
70 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
71 awk -F$fs -v controller=$controller_name '{
72 indent = length($1)/2;
73 vname[indent] = $2;
74 for (i in vname) {if (i > indent) {delete vname[i]}}
75 if (length($3) > 0) {
76 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
77 if (match(vn,controller) && match($2,"password")) {
78 printf("%s",$3);
79 }
80 }
81 }'
82 }
83
84 function generate_secret() {
85 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
86 }
87
88 function remove_volumes() {
89 if [ -n "$KUBERNETES" ]; then
90 k8_volume=$1
91 echo "Removing ${k8_volume}"
92 $WORKDIR_SUDO rm -rf ${k8_volume}
93 else
94 stack=$1
95 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
96 for volume in $volumes; do
97 sg docker -c "docker volume rm ${stack}_${volume}"
98 done
99 fi
100 }
101
102 function remove_network() {
103 stack=$1
104 sg docker -c "docker network rm net${stack}"
105 }
106
107 function remove_iptables() {
108 stack=$1
109 if [ -z "$OSM_VCA_HOST" ]; then
110 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
111 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
112 fi
113
114 if [ -z "$DEFAULT_IP" ]; then
115 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
116 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
117 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
118 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
119 fi
120
121 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
122 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
123 sudo netfilter-persistent save
124 fi
125 }
126
127 function remove_stack() {
128 stack=$1
129 if sg docker -c "docker stack ps ${stack}" ; then
130 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
131 COUNTER=0
132 result=1
133 while [ ${COUNTER} -lt 30 ]; do
134 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
135 #echo "Dockers running: $result"
136 if [ "${result}" == "0" ]; then
137 break
138 fi
139 let COUNTER=COUNTER+1
140 sleep 1
141 done
142 if [ "${result}" == "0" ]; then
143 echo "All dockers of the stack ${stack} were removed"
144 else
145 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
146 fi
147 sleep 5
148 fi
149 }
150
151 #removes osm deployments and services
152 function remove_k8s_namespace() {
153 kubectl delete ns $1
154 }
155
156 #Uninstall lightweight OSM: remove dockers
157 function uninstall_lightweight() {
158 if [ -n "$INSTALL_ONLY" ]; then
159 if [ -n "$INSTALL_ELK" ]; then
160 echo -e "\nUninstalling OSM ELK stack"
161 remove_stack osm_elk
162 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
163 fi
164 else
165 echo -e "\nUninstalling OSM"
166 if [ -n "$KUBERNETES" ]; then
167 if [ -n "$K8S_MONITOR" ]; then
168 # uninstall OSM MONITORING
169 uninstall_k8s_monitoring
170 fi
171 remove_k8s_namespace $OSM_STACK_NAME
172 else
173
174 remove_stack $OSM_STACK_NAME
175 remove_stack osm_elk
176 fi
177 echo "Now osm docker images and volumes will be deleted"
178 newgrp docker << EONG
179 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
180 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
181 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
182 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
183 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
184 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
185 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
186 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
187 EONG
188
189 if [ -n "$KUBERNETES" ]; then
190 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
191 remove_volumes $OSM_NAMESPACE_VOL
192 else
193 remove_volumes $OSM_STACK_NAME
194 remove_network $OSM_STACK_NAME
195 fi
196 remove_iptables $OSM_STACK_NAME
197 echo "Removing $OSM_DOCKER_WORK_DIR"
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
199 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
200 fi
201 echo "Some docker images will be kept in case they are used by other docker stacks"
202 echo "To remove them, just run 'docker image prune' in a terminal"
203 return 0
204 }
205
206 #Safe unattended install of iptables-persistent
207 function check_install_iptables_persistent(){
208 echo -e "\nChecking required packages: iptables-persistent"
209 if dpkg -l iptables-persistent &>/dev/null; then
210 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
211 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
212 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
213 sudo apt-get -yq install iptables-persistent
214 fi
215 }
216
217 #Configure NAT rules, based on the current IP addresses of containers
218 function nat(){
219 check_install_iptables_persistent
220
221 echo -e "\nConfiguring NAT rules"
222 echo -e " Required root privileges"
223 sudo $OSM_DEVOPS/installers/nat_osm
224 }
225
226 function FATAL(){
227 echo "FATAL error: Cannot install OSM due to \"$1\""
228 exit 1
229 }
230
231 function install_lxd() {
232 sudo apt-get update
233 sudo apt-get install -y lxd
234 newgrp lxd
235 lxd init --auto
236 lxd waitready
237 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
238 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
239 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
240 lxc profile device set default eth0 mtu $DEFAULT_MTU
241 #sudo systemctl stop lxd-bridge
242 #sudo systemctl --system daemon-reload
243 #sudo systemctl enable lxd-bridge
244 #sudo systemctl start lxd-bridge
245 }
246
247 function ask_user(){
248 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
249 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
250 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
251 read -e -p "$1" USER_CONFIRMATION
252 while true ; do
253 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
254 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
255 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
256 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
257 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
258 done
259 }
260
261 function install_osmclient(){
262 CLIENT_RELEASE=${RELEASE#"-R "}
263 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
264 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
265 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
266 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
267 curl $key_location | sudo apt-key add -
268 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
269 sudo apt-get update
270 sudo apt-get install -y python3-pip
271 sudo -H LC_ALL=C python3 -m pip install -U pip
272 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
273 sudo apt-get install -y python3-osm-im python3-osmclient
274 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
275 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
276 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
277 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
278 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
279 echo -e "\nOSM client installed"
280 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
281 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
282 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
283 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
284 else
285 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
286 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
287 echo " export OSM_HOSTNAME=<OSM_host>"
288 fi
289 return 0
290 }
291
292 function install_prometheus_nodeexporter(){
293 if (systemctl -q is-active node_exporter)
294 then
295 echo "Node Exporter is already running."
296 else
297 echo "Node Exporter is not active, installing..."
298 if getent passwd node_exporter > /dev/null 2>&1; then
299 echo "node_exporter user exists"
300 else
301 echo "Creating user node_exporter"
302 sudo useradd --no-create-home --shell /bin/false node_exporter
303 fi
304 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
305 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
306 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
307 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
308 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
309 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
310 sudo systemctl daemon-reload
311 sudo systemctl restart node_exporter
312 sudo systemctl enable node_exporter
313 echo "Node Exporter has been activated in this host."
314 fi
315 return 0
316 }
317
318 function uninstall_prometheus_nodeexporter(){
319 sudo systemctl stop node_exporter
320 sudo systemctl disable node_exporter
321 sudo rm /etc/systemd/system/node_exporter.service
322 sudo systemctl daemon-reload
323 sudo userdel node_exporter
324 sudo rm /usr/local/bin/node_exporter
325 return 0
326 }
327
328 function install_docker_ce() {
329 # installs and configures Docker CE
330 echo "Installing Docker CE ..."
331 sudo apt-get -qq update
332 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
333 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
334 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
335 sudo apt-get -qq update
336 sudo apt-get install -y docker-ce
337 echo "Adding user to group 'docker'"
338 sudo groupadd -f docker
339 sudo usermod -aG docker $USER
340 sleep 2
341 sudo service docker restart
342 echo "... restarted Docker service"
343 sg docker -c "docker version" || FATAL "Docker installation failed"
344 echo "... Docker CE installation done"
345 return 0
346 }
347
348 function install_docker_compose() {
349 # installs and configures docker-compose
350 echo "Installing Docker Compose ..."
351 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
352 sudo chmod +x /usr/local/bin/docker-compose
353 echo "... Docker Compose installation done"
354 }
355
356 function install_juju() {
357 echo "Installing juju"
358 sudo snap install juju --classic
359 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
360 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
361 echo "Finished installation of juju"
362 return 0
363 }
364
365 function juju_createcontroller() {
366 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
367 # Not found created, create the controller
368 sudo usermod -a -G lxd ${USER}
369 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
370 fi
371 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
372 }
373
374 function juju_createproxy() {
375 check_install_iptables_persistent
376
377 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
378 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
379 sudo netfilter-persistent save
380 fi
381 }
382
383 function generate_docker_images() {
384 echo "Pulling and generating docker images"
385 _build_from=$COMMIT_ID
386 [ -z "$_build_from" ] && _build_from="master"
387
388 echo "OSM Docker images generated from $_build_from"
389
390 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
391 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
392 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
393 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
394
395 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
396 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
397 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
398 fi
399
400 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
401 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
402 fi
403
404 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
405 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
406 fi
407
408 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
409 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
410 fi
411
412 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
413 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
414 fi
415
416 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
417 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
418 fi
419
420 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
421 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
422 fi
423
424 if [ -n "$PULL_IMAGES" ]; then
425 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
426 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
427 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
428 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
429 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
430 fi
431
432 if [ -n "$PULL_IMAGES" ]; then
433 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
434 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
435 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
436 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
437 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
438 fi
439
440 if [ -n "$PULL_IMAGES" ]; then
441 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
442 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
443 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
444 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
445 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
446 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
447 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
448 fi
449
450 if [ -n "$PULL_IMAGES" ]; then
451 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
452 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
453 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
454 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
455 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
456 fi
457
458 if [ -n "$PULL_IMAGES" ]; then
459 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
460 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
461 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
462 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
463 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
464 fi
465
466 if [ -n "$PULL_IMAGES" ]; then
467 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
468 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
469 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
470 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
471 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
472 fi
473
474 if [ -n "$PULL_IMAGES" ]; then
475 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
476 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
477 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
478 fi
479
480 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
481 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
482 fi
483
484 echo "Finished generation of docker images"
485 }
486
487 function cmp_overwrite() {
488 file1="$1"
489 file2="$2"
490 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
491 if [ -f "${file2}" ]; then
492 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
493 else
494 cp -b ${file1} ${file2}
495 fi
496 fi
497 }
498
499 function generate_docker_env_files() {
500 echo "Doing a backup of existing env files"
501 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
502 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
503 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
504 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
505 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
506 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
507 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
508 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
509 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
510
511 echo "Generating docker env files"
512 if [ -n "$KUBERNETES" ]; then
513 #Kubernetes resources
514 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
515 else
516 # Docker-compose
517 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
518
519 # Prometheus
520 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
521
522 # Grafana & Prometheus Exporter files
523 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
524 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
525 fi
526
527 # LCM
528 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
529 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
530 fi
531
532 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
533 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
534 else
535 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
536 fi
537
538 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
539 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
540 else
541 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
542 fi
543
544 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
545 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
546 else
547 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
548 fi
549
550 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
551 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
552 else
553 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
554 fi
555
556 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
557 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
558 else
559 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
560 fi
561
562 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
563 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
564 fi
565
566 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
567 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
568 fi
569
570 # RO
571 MYSQL_ROOT_PASSWORD=$(generate_secret)
572 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
573 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
574 fi
575 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
576 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
577 fi
578
579 # Keystone
580 KEYSTONE_DB_PASSWORD=$(generate_secret)
581 SERVICE_PASSWORD=$(generate_secret)
582 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
583 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
584 fi
585 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
586 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
587 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
588 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
589 fi
590
591 # NBI
592 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
593 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
594 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
595 fi
596
597 # MON
598 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
599 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
600 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
601 fi
602
603 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
604 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
605 else
606 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
607 fi
608
609 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
610 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
611 else
612 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
613 fi
614
615 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
616 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
617 else
618 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
619 fi
620
621 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
622 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
623 else
624 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
625 fi
626
627
628 # POL
629 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
630 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
631 fi
632
633 # LW-UI
634 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
635 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
636 fi
637
638 echo "Finished generation of docker env files"
639 }
640
641 function generate_osmclient_script () {
642 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
643 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
644 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
645 }
646
647 #installs kubernetes packages
648 function install_kube() {
649 sudo apt-get update && sudo apt-get install -y apt-transport-https
650 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
651 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
652 sudo apt-get update
653 echo "Installing Kubernetes Packages ..."
654 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
655 }
656
657 #initializes kubernetes control plane
658 function init_kubeadm() {
659 sudo swapoff -a
660 sudo kubeadm init --config $1
661 sleep 5
662 }
663
664 function kube_config_dir() {
665 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
666 mkdir -p $HOME/.kube
667 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
668 sudo chown $(id -u):$(id -g) $HOME/.kube/config
669 }
670
671 #deploys flannel as daemonsets
672 function deploy_cni_provider() {
673 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
674 trap 'rm -rf "${CNI_DIR}"' EXIT
675 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
676 kubectl apply -f $CNI_DIR
677 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
678 }
679
680 #creates secrets from env files which will be used by containers
681 function kube_secrets(){
682 kubectl create ns $OSM_STACK_NAME
683 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
684 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
685 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
686 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
687 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
688 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
689 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
690 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
691 }
692
693 #deploys osm pods and services
694 function deploy_osm_services() {
695 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
696 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
697 sleep 5
698 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
699 }
700
701 function parse_yaml() {
702 osm_services="nbi lcm ro pol mon light-ui keystone"
703 TAG=$1
704 for osm in $osm_services; do
705 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
706 done
707 }
708
709 function namespace_vol() {
710 osm_services="nbi lcm ro pol mon kafka mongo mysql"
711 for osm in $osm_services; do
712 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
713 done
714 }
715
716 function init_docker_swarm() {
717 if [ "${DEFAULT_MTU}" != "1500" ]; then
718 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
719 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
720 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
721 fi
722 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
723 return 0
724 }
725
726 function create_docker_network() {
727 echo "creating network"
728 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
729 echo "creating network DONE"
730 }
731
732 function deploy_lightweight() {
733
734 echo "Deploying lightweight build"
735 OSM_NBI_PORT=9999
736 OSM_RO_PORT=9090
737 OSM_KEYSTONE_PORT=5000
738 OSM_UI_PORT=80
739 OSM_MON_PORT=8662
740 OSM_PROM_PORT=9090
741 OSM_PROM_CADVISOR_PORT=8080
742 OSM_PROM_HOSTPORT=9091
743 OSM_GRAFANA_PORT=3000
744 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
745 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
746
747 if [ -n "$NO_HOST_PORTS" ]; then
748 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
749 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
750 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
751 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
752 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
753 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
754 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
755 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
756 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
757 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
758 else
759 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
760 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
761 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
762 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
763 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
764 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
765 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
766 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
767 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
768 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
769 fi
770 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
771 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
772 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
773 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
774 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
775 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
776 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
777 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
778 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
779
780 pushd $OSM_DOCKER_WORK_DIR
781 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
782 popd
783
784 echo "Finished deployment of lightweight build"
785 }
786
787 function deploy_elk() {
788 echo "Pulling docker images for ELK"
789 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
790 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
791 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
792 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
793 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
794 echo "Finished pulling elk docker images"
795 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
796 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
797 remove_stack osm_elk
798 echo "Deploying ELK stack"
799 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
800 echo "Waiting for ELK stack to be up and running"
801 time=0
802 step=5
803 timelength=40
804 elk_is_up=1
805 while [ $time -le $timelength ]; do
806 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
807 elk_is_up=0
808 break
809 fi
810 sleep $step
811 time=$((time+step))
812 done
813 if [ $elk_is_up -eq 0 ]; then
814 echo "ELK is up and running. Trying to create index pattern..."
815 #Create index pattern
816 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
817 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
818 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
819 #Make it the default index
820 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
821 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
822 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
823 else
824 echo "Cannot connect to Kibana to create index pattern."
825 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
826 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
827 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
828 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
829 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
830 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
831 -d"{\"value\":\"filebeat-*\"}"'
832 fi
833 echo "Finished deployment of ELK stack"
834 return 0
835 }
836
837 function install_lightweight() {
838 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
839 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
840 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
841 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
842
843 track checkingroot
844 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
845 track noroot
846
847 if [ -n "$KUBERNETES" ]; then
848 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
849 1. Install and configure LXD
850 2. Install juju
851 3. Install docker CE
852 4. Disable swap space
853 5. Install and initialize Kubernetes
854 as pre-requirements.
855 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
856
857 else
858 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
859 fi
860 track proceed
861
862 echo "Installing lightweight build of OSM"
863 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
864 trap 'rm -rf "${LWTEMPDIR}"' EXIT
865 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
866 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
867 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
868 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
869 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
870
871 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
872 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
873 need_packages_lw="lxd snapd"
874 echo -e "Checking required packages: $need_packages_lw"
875 dpkg -l $need_packages_lw &>/dev/null \
876 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
877 || sudo apt-get update \
878 || FATAL "failed to run apt-get update"
879 dpkg -l $need_packages_lw &>/dev/null \
880 || ! echo -e "Installing $need_packages_lw requires root privileges." \
881 || sudo apt-get install -y $need_packages_lw \
882 || FATAL "failed to install $need_packages_lw"
883 fi
884 track prereqok
885
886 [ -z "$INSTALL_NOJUJU" ] && install_juju
887 track juju_install
888
889 if [ -z "$OSM_VCA_HOST" ]; then
890 juju_createcontroller
891 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
892 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
893 fi
894 track juju_controller
895
896 if [ -z "$OSM_VCA_SECRET" ]; then
897 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
898 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
899 fi
900 if [ -z "$OSM_VCA_PUBKEY" ]; then
901 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
902 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
903 fi
904 if [ -z "$OSM_VCA_CACERT" ]; then
905 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
906 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
907 fi
908 if [ -z "$OSM_VCA_APIPROXY" ]; then
909 OSM_VCA_APIPROXY=$DEFAULT_IP
910 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
911 fi
912 juju_createproxy
913 track juju
914
915 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
916 OSM_DATABASE_COMMONKEY=$(generate_secret)
917 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
918 fi
919
920 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
921 track docker_ce
922
923 #Installs Kubernetes and deploys osm services
924 if [ -n "$KUBERNETES" ]; then
925 install_kube
926 track install_k8s
927 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
928 kube_config_dir
929 track init_k8s
930 else
931 #install_docker_compose
932 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
933 track docker_swarm
934 fi
935
936 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
937 track docker_build
938
939 generate_docker_env_files
940
941 if [ -n "$KUBERNETES" ]; then
942 if [ -n "$K8S_MONITOR" ]; then
943 # uninstall OSM MONITORING
944 uninstall_k8s_monitoring
945 track uninstall_k8s_monitoring
946 fi
947 #remove old namespace
948 remove_k8s_namespace $OSM_STACK_NAME
949 deploy_cni_provider
950 kube_secrets
951 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
952 namespace_vol
953 deploy_osm_services
954 track deploy_osm_services_k8s
955 if [ -n "$K8S_MONITOR" ]; then
956 # install OSM MONITORING
957 install_k8s_monitoring
958 track install_k8s_monitoring
959 fi
960 else
961 # remove old stack
962 remove_stack $OSM_STACK_NAME
963 create_docker_network
964 deploy_lightweight
965 generate_osmclient_script
966 track docker_deploy
967 install_prometheus_nodeexporter
968 track nodeexporter
969 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
970 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
971 fi
972
973 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
974 track osmclient
975
976 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
977 track end
978 return 0
979 }
980
981 function install_vimemu() {
982 echo "\nInstalling vim-emu"
983 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
984 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
985 # install prerequisites (OVS is a must for the emulator to work)
986 sudo apt-get install openvswitch-switch
987 # clone vim-emu repository (attention: branch is currently master only)
988 echo "Cloning vim-emu repository ..."
989 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
990 # build vim-emu docker
991 echo "Building vim-emu Docker container..."
992
993 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
994 # start vim-emu container as daemon
995 echo "Starting vim-emu Docker container 'vim-emu' ..."
996 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
997 # in lightweight mode, the emulator needs to be attached to netOSM
998 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
999 else
1000 # classic build mode
1001 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1002 fi
1003 echo "Waiting for 'vim-emu' container to start ..."
1004 sleep 5
1005 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1006 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1007 # print vim-emu connection info
1008 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1009 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1010 echo -e "To add the emulated VIM to OSM you should do:"
1011 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1012 }
1013
1014 function install_k8s_monitoring() {
1015 # install OSM monitoring
1016 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1017 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1018 }
1019
1020 function uninstall_k8s_monitoring() {
1021 # uninstall OSM monitoring
1022 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1023 }
1024
1025 function dump_vars(){
1026 echo "DEVELOP=$DEVELOP"
1027 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1028 echo "UNINSTALL=$UNINSTALL"
1029 echo "UPDATE=$UPDATE"
1030 echo "RECONFIGURE=$RECONFIGURE"
1031 echo "TEST_INSTALLER=$TEST_INSTALLER"
1032 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1033 echo "INSTALL_LXD=$INSTALL_LXD"
1034 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1035 echo "INSTALL_ONLY=$INSTALL_ONLY"
1036 echo "INSTALL_ELK=$INSTALL_ELK"
1037 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1038 echo "TO_REBUILD=$TO_REBUILD"
1039 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1040 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1041 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1042 echo "RELEASE=$RELEASE"
1043 echo "REPOSITORY=$REPOSITORY"
1044 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1045 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1046 echo "OSM_DEVOPS=$OSM_DEVOPS"
1047 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1048 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1049 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1050 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1051 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1052 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1053 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1054 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1055 echo "DOCKER_USER=$DOCKER_USER"
1056 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1057 echo "PULL_IMAGES=$PULL_IMAGES"
1058 echo "KUBERNETES=$KUBERNETES"
1059 echo "SHOWOPTS=$SHOWOPTS"
1060 echo "Install from specific refspec (-b): $COMMIT_ID"
1061 }
1062
1063 function track(){
1064 ctime=`date +%s`
1065 duration=$((ctime - SESSION_ID))
1066 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1067 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1068 event_name="bin"
1069 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1070 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1071 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1072 event_name="${event_name}_$1"
1073 url="${url}&event=${event_name}&ce_duration=${duration}"
1074 wget -q -O /dev/null $url
1075 }
1076
1077 UNINSTALL=""
1078 DEVELOP=""
1079 UPDATE=""
1080 RECONFIGURE=""
1081 TEST_INSTALLER=""
1082 INSTALL_LXD=""
1083 SHOWOPTS=""
1084 COMMIT_ID=""
1085 ASSUME_YES=""
1086 INSTALL_FROM_SOURCE=""
1087 RELEASE="ReleaseSEVEN"
1088 REPOSITORY="stable"
1089 INSTALL_VIMEMU=""
1090 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1091 LXD_REPOSITORY_PATH=""
1092 INSTALL_LIGHTWEIGHT="y"
1093 INSTALL_ONLY=""
1094 INSTALL_ELK=""
1095 TO_REBUILD=""
1096 INSTALL_NOLXD=""
1097 INSTALL_NODOCKER=""
1098 INSTALL_NOJUJU=""
1099 KUBERNETES=""
1100 K8S_MONITOR=""
1101 INSTALL_NOHOSTCLIENT=""
1102 SESSION_ID=`date +%s`
1103 OSM_DEVOPS=
1104 OSM_VCA_HOST=
1105 OSM_VCA_SECRET=
1106 OSM_VCA_PUBKEY=
1107 OSM_STACK_NAME=osm
1108 NO_HOST_PORTS=""
1109 DOCKER_NOBUILD=""
1110 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1111 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1112 WORKDIR_SUDO=sudo
1113 OSM_WORK_DIR="/etc/osm"
1114 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1115 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1116 OSM_HOST_VOL="/var/lib/osm"
1117 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1118 OSM_DOCKER_TAG=latest
1119 DOCKER_USER=opensourcemano
1120 PULL_IMAGES="y"
1121 KAFKA_TAG=2.11-1.0.2
1122 PROMETHEUS_TAG=v2.4.3
1123 GRAFANA_TAG=latest
1124 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1125 PROMETHEUS_CADVISOR_TAG=latest
1126 KEYSTONEDB_TAG=10
1127 OSM_DATABASE_COMMONKEY=
1128 ELASTIC_VERSION=6.4.2
1129 ELASTIC_CURATOR_VERSION=5.5.4
1130 POD_NETWORK_CIDR=10.244.0.0/16
1131 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1132 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1133
1134 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o; do
1135 case "${o}" in
1136 b)
1137 COMMIT_ID=${OPTARG}
1138 PULL_IMAGES=""
1139 ;;
1140 r)
1141 REPOSITORY="${OPTARG}"
1142 REPO_ARGS+=(-r "$REPOSITORY")
1143 ;;
1144 c)
1145 [ "${OPTARG}" == "swarm" ] && continue
1146 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1147 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1148 usage && exit 1
1149 ;;
1150 k)
1151 REPOSITORY_KEY="${OPTARG}"
1152 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1153 ;;
1154 u)
1155 REPOSITORY_BASE="${OPTARG}"
1156 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1157 ;;
1158 R)
1159 RELEASE="${OPTARG}"
1160 REPO_ARGS+=(-R "$RELEASE")
1161 ;;
1162 D)
1163 OSM_DEVOPS="${OPTARG}"
1164 ;;
1165 o)
1166 INSTALL_ONLY="y"
1167 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1168 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1169 ;;
1170 m)
1171 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1172 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1173 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1174 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1175 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1176 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1177 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1178 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1179 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1180 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1181 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1182 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1183 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1184 ;;
1185 H)
1186 OSM_VCA_HOST="${OPTARG}"
1187 ;;
1188 S)
1189 OSM_VCA_SECRET="${OPTARG}"
1190 ;;
1191 s)
1192 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1193 ;;
1194 w)
1195 # when specifying workdir, do not use sudo for access
1196 WORKDIR_SUDO=
1197 OSM_WORK_DIR="${OPTARG}"
1198 ;;
1199 t)
1200 OSM_DOCKER_TAG="${OPTARG}"
1201 ;;
1202 U)
1203 DOCKER_USER="${OPTARG}"
1204 ;;
1205 P)
1206 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1207 ;;
1208 A)
1209 OSM_VCA_APIPROXY="${OPTARG}"
1210 ;;
1211 -)
1212 [ "${OPTARG}" == "help" ] && usage && exit 0
1213 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1214 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1215 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1216 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1217 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1218 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1219 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1220 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1221 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1222 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1223 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1224 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1225 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1226 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1227 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1228 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1229 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1230 [ "${OPTARG}" == "pullimages" ] && continue
1231 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR="y" && continue
1232 echo -e "Invalid option: '--$OPTARG'\n" >&2
1233 usage && exit 1
1234 ;;
1235 :)
1236 echo "Option -$OPTARG requires an argument" >&2
1237 usage && exit 1
1238 ;;
1239 \?)
1240 echo -e "Invalid option: '-$OPTARG'\n" >&2
1241 usage && exit 1
1242 ;;
1243 h)
1244 usage && exit 0
1245 ;;
1246 y)
1247 ASSUME_YES="y"
1248 ;;
1249 *)
1250 usage && exit 1
1251 ;;
1252 esac
1253 done
1254
1255 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1256
1257 if [ -n "$SHOWOPTS" ]; then
1258 dump_vars
1259 exit 0
1260 fi
1261
1262 # if develop, we force master
1263 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1264
1265 need_packages="git jq wget curl tar"
1266 echo -e "Checking required packages: $need_packages"
1267 dpkg -l $need_packages &>/dev/null \
1268 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1269 || sudo apt-get update \
1270 || FATAL "failed to run apt-get update"
1271 dpkg -l $need_packages &>/dev/null \
1272 || ! echo -e "Installing $need_packages requires root privileges." \
1273 || sudo apt-get install -y $need_packages \
1274 || FATAL "failed to install $need_packages"
1275
1276 if [ -z "$OSM_DEVOPS" ]; then
1277 if [ -n "$TEST_INSTALLER" ]; then
1278 echo -e "\nUsing local devops repo for OSM installation"
1279 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1280 else
1281 echo -e "\nCreating temporary dir for OSM installation"
1282 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1283 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1284
1285 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1286
1287 if [ -z "$COMMIT_ID" ]; then
1288 echo -e "\nGuessing the current stable release"
1289 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1290 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1291
1292 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1293 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1294 else
1295 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1296 fi
1297 git -C $OSM_DEVOPS checkout $COMMIT_ID
1298 fi
1299 fi
1300
1301 . $OSM_DEVOPS/common/all_funcs
1302
1303 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1304 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1305 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1306 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1307 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1308
1309 #Installation starts here
1310 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1311 track start
1312
1313 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1314 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1315 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1316 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1317 fi
1318
1319 echo -e "Checking required packages: lxd"
1320 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1321 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1322
1323 # use local devops for containers
1324 export OSM_USE_LOCAL_DEVOPS=true
1325
1326 #Install osmclient
1327
1328 #Install vim-emu (optional)
1329 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1330
1331 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1332 track end
1333 echo -e "\nDONE"