feature(prometheus): Configuration can be dynamically replaced
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
53 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
54 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
55 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
56 echo -e " --showopts: print chosen options and exit (only for debugging)"
57 echo -e " -y: do not prompt for confirmation, assumes yes"
58 echo -e " -h / --help: print this help"
59 }
60
61 # takes a juju/accounts.yaml file and returns the password specific
62 # for a controller. I wrote this using only bash tools to minimize
63 # additions of other packages
64 function parse_juju_password {
65 password_file="${HOME}/.local/share/juju/accounts.yaml"
66 local controller_name=$1
67 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
68 sed -ne "s|^\($s\):|\1|" \
69 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
70 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
71 awk -F$fs -v controller=$controller_name '{
72 indent = length($1)/2;
73 vname[indent] = $2;
74 for (i in vname) {if (i > indent) {delete vname[i]}}
75 if (length($3) > 0) {
76 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
77 if (match(vn,controller) && match($2,"password")) {
78 printf("%s",$3);
79 }
80 }
81 }'
82 }
83
84 function generate_secret() {
85 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
86 }
87
88 function remove_volumes() {
89 if [ -n "$KUBERNETES" ]; then
90 k8_volume=$1
91 echo "Removing ${k8_volume}"
92 $WORKDIR_SUDO rm -rf ${k8_volume}
93 else
94 stack=$1
95 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
96 for volume in $volumes; do
97 sg docker -c "docker volume rm ${stack}_${volume}"
98 done
99 fi
100 }
101
102 function remove_network() {
103 stack=$1
104 sg docker -c "docker network rm net${stack}"
105 }
106
107 function remove_iptables() {
108 stack=$1
109 if [ -z "$OSM_VCA_HOST" ]; then
110 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
111 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
112 fi
113
114 if [ -z "$DEFAULT_IP" ]; then
115 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
116 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
117 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
118 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
119 fi
120
121 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
122 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
123 sudo netfilter-persistent save
124 fi
125 }
126
127 function remove_stack() {
128 stack=$1
129 if sg docker -c "docker stack ps ${stack}" ; then
130 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
131 COUNTER=0
132 result=1
133 while [ ${COUNTER} -lt 30 ]; do
134 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
135 #echo "Dockers running: $result"
136 if [ "${result}" == "0" ]; then
137 break
138 fi
139 let COUNTER=COUNTER+1
140 sleep 1
141 done
142 if [ "${result}" == "0" ]; then
143 echo "All dockers of the stack ${stack} were removed"
144 else
145 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
146 fi
147 sleep 5
148 fi
149 }
150
151 #removes osm deployments and services
152 function remove_k8s_namespace() {
153 kubectl delete ns $1
154 }
155
156 #Uninstall lightweight OSM: remove dockers
157 function uninstall_lightweight() {
158 if [ -n "$INSTALL_ONLY" ]; then
159 if [ -n "$INSTALL_ELK" ]; then
160 echo -e "\nUninstalling OSM ELK stack"
161 remove_stack osm_elk
162 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
163 fi
164 else
165 echo -e "\nUninstalling OSM"
166 if [ -n "$KUBERNETES" ]; then
167 if [ -n "$INSTALL_K8S_MONITOR" ]; then
168 # uninstall OSM MONITORING
169 uninstall_k8s_monitoring
170 fi
171 remove_k8s_namespace $OSM_STACK_NAME
172 else
173
174 remove_stack $OSM_STACK_NAME
175 remove_stack osm_elk
176 fi
177 echo "Now osm docker images and volumes will be deleted"
178 newgrp docker << EONG
179 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
180 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
181 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
182 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
183 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
184 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
185 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
186 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
187 EONG
188
189 if [ -n "$KUBERNETES" ]; then
190 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
191 remove_volumes $OSM_NAMESPACE_VOL
192 else
193 remove_volumes $OSM_STACK_NAME
194 remove_network $OSM_STACK_NAME
195 fi
196 remove_iptables $OSM_STACK_NAME
197 echo "Removing $OSM_DOCKER_WORK_DIR"
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
199 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
200 fi
201 echo "Some docker images will be kept in case they are used by other docker stacks"
202 echo "To remove them, just run 'docker image prune' in a terminal"
203 return 0
204 }
205
206 #Safe unattended install of iptables-persistent
207 function check_install_iptables_persistent(){
208 echo -e "\nChecking required packages: iptables-persistent"
209 if dpkg -l iptables-persistent &>/dev/null; then
210 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
211 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
212 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
213 sudo apt-get -yq install iptables-persistent
214 fi
215 }
216
217 #Configure NAT rules, based on the current IP addresses of containers
218 function nat(){
219 check_install_iptables_persistent
220
221 echo -e "\nConfiguring NAT rules"
222 echo -e " Required root privileges"
223 sudo $OSM_DEVOPS/installers/nat_osm
224 }
225
226 function FATAL(){
227 echo "FATAL error: Cannot install OSM due to \"$1\""
228 exit 1
229 }
230
231 function install_lxd() {
232 # Apply sysctl production values for optimal performance
233 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
234 sudo sysctl --system
235
236 # Install LXD snap
237 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
238 sudo snap install lxd --channel=3.0/stable
239
240 # Configure LXD
241 sudo usermod -a -G lxd `whoami`
242 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sg lxd -c "lxd init --preseed"
243 sg lxd -c "lxd waitready"
244 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
245 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
246 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
247 #sudo systemctl stop lxd-bridge
248 #sudo systemctl --system daemon-reload
249 #sudo systemctl enable lxd-bridge
250 #sudo systemctl start lxd-bridge
251 }
252
253 function ask_user(){
254 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
255 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
256 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
257 read -e -p "$1" USER_CONFIRMATION
258 while true ; do
259 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
260 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
261 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
262 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
263 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
264 done
265 }
266
267 function install_osmclient(){
268 CLIENT_RELEASE=${RELEASE#"-R "}
269 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
270 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
271 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
272 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
273 curl $key_location | sudo apt-key add -
274 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
275 sudo apt-get update
276 sudo apt-get install -y python3-pip
277 sudo -H LC_ALL=C python3 -m pip install -U pip
278 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
279 sudo apt-get install -y python3-osm-im python3-osmclient
280 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
281 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
282 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
283 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
284 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
285 echo -e "\nOSM client installed"
286 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
287 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
288 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
289 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
290 else
291 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
292 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
293 echo " export OSM_HOSTNAME=<OSM_host>"
294 fi
295 return 0
296 }
297
298 function install_prometheus_nodeexporter(){
299 if (systemctl -q is-active node_exporter)
300 then
301 echo "Node Exporter is already running."
302 else
303 echo "Node Exporter is not active, installing..."
304 if getent passwd node_exporter > /dev/null 2>&1; then
305 echo "node_exporter user exists"
306 else
307 echo "Creating user node_exporter"
308 sudo useradd --no-create-home --shell /bin/false node_exporter
309 fi
310 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
311 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
312 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
313 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
314 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
315 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
316 sudo systemctl daemon-reload
317 sudo systemctl restart node_exporter
318 sudo systemctl enable node_exporter
319 echo "Node Exporter has been activated in this host."
320 fi
321 return 0
322 }
323
324 function uninstall_prometheus_nodeexporter(){
325 sudo systemctl stop node_exporter
326 sudo systemctl disable node_exporter
327 sudo rm /etc/systemd/system/node_exporter.service
328 sudo systemctl daemon-reload
329 sudo userdel node_exporter
330 sudo rm /usr/local/bin/node_exporter
331 return 0
332 }
333
334 function install_docker_ce() {
335 # installs and configures Docker CE
336 echo "Installing Docker CE ..."
337 sudo apt-get -qq update
338 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
339 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
340 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
341 sudo apt-get -qq update
342 sudo apt-get install -y docker-ce
343 echo "Adding user to group 'docker'"
344 sudo groupadd -f docker
345 sudo usermod -aG docker $USER
346 sleep 2
347 sudo service docker restart
348 echo "... restarted Docker service"
349 sg docker -c "docker version" || FATAL "Docker installation failed"
350 echo "... Docker CE installation done"
351 return 0
352 }
353
354 function install_docker_compose() {
355 # installs and configures docker-compose
356 echo "Installing Docker Compose ..."
357 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
358 sudo chmod +x /usr/local/bin/docker-compose
359 echo "... Docker Compose installation done"
360 }
361
362 function install_juju() {
363 echo "Installing juju"
364 sudo snap install juju --classic
365 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
366 echo "Finished installation of juju"
367 return 0
368 }
369
370 function juju_createcontroller() {
371 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
372 # Not found created, create the controller
373 sudo usermod -a -G lxd ${USER}
374 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
375 fi
376 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
377 }
378
379 function juju_createproxy() {
380 check_install_iptables_persistent
381
382 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
383 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
384 sudo netfilter-persistent save
385 fi
386 }
387
388 function generate_docker_images() {
389 echo "Pulling and generating docker images"
390 _build_from=$COMMIT_ID
391 [ -z "$_build_from" ] && _build_from="master"
392
393 echo "OSM Docker images generated from $_build_from"
394
395 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
396 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
397 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
398 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
399
400 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
401 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
402 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
403 fi
404
405 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
406 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
407 fi
408
409 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
410 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
411 fi
412
413 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
414 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
415 fi
416
417 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
418 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
419 fi
420
421 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
422 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
423 fi
424
425 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
426 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
427 fi
428
429 if [ -n "$PULL_IMAGES" ]; then
430 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
431 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
432 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
433 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
434 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
435 fi
436
437 if [ -n "$PULL_IMAGES" ]; then
438 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
439 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
440 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
441 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
442 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
443 fi
444
445 if [ -n "$PULL_IMAGES" ]; then
446 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
447 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
448 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
449 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
450 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
451 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
452 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
453 fi
454
455 if [ -n "$PULL_IMAGES" ]; then
456 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
457 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
458 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
459 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
460 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
461 fi
462
463 if [ -n "$PULL_IMAGES" ]; then
464 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
465 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
466 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
467 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
468 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
469 fi
470
471 if [ -n "$PULL_IMAGES" ]; then
472 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
473 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
474 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
475 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
476 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
477 fi
478
479 if [ -n "$PULL_IMAGES" ]; then
480 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
481 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
482 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
483 fi
484
485 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
486 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
487 fi
488
489 echo "Finished generation of docker images"
490 }
491
492 function cmp_overwrite() {
493 file1="$1"
494 file2="$2"
495 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
496 if [ -f "${file2}" ]; then
497 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
498 else
499 cp -b ${file1} ${file2}
500 fi
501 fi
502 }
503
504 function generate_docker_env_files() {
505 echo "Doing a backup of existing env files"
506 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
507 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
508 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
509 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
510 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
511 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
512 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
513 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
514 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
515
516 echo "Generating docker env files"
517 if [ -n "$KUBERNETES" ]; then
518 #Kubernetes resources
519 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
520 else
521 # Docker-compose
522 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
523
524 # Prometheus files
525 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
526 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
527
528 # Grafana files
529 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
530 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
531 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
532 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
533 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
534
535 # Prometheus Exporters files
536 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
537 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
538 fi
539
540 # LCM
541 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
542 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
543 fi
544
545 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
546 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
547 else
548 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
549 fi
550
551 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
552 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
553 else
554 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
555 fi
556
557 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
558 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
559 else
560 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
561 fi
562
563 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
564 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
565 else
566 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
567 fi
568
569 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
570 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
571 else
572 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
573 fi
574
575 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
576 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
577 fi
578
579 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
580 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
581 fi
582
583 # RO
584 MYSQL_ROOT_PASSWORD=$(generate_secret)
585 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
586 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
587 fi
588 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
589 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
590 fi
591
592 # Keystone
593 KEYSTONE_DB_PASSWORD=$(generate_secret)
594 SERVICE_PASSWORD=$(generate_secret)
595 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
596 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
597 fi
598 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
599 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
600 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
601 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
602 fi
603
604 # NBI
605 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
606 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
607 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
608 fi
609
610 # MON
611 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
612 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
613 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
614 fi
615
616 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
617 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
618 else
619 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
620 fi
621
622 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
623 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
624 else
625 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
626 fi
627
628 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
629 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
630 else
631 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
632 fi
633
634 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
635 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
636 else
637 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
638 fi
639
640
641 # POL
642 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
643 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
644 fi
645
646 # LW-UI
647 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
648 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
649 fi
650
651 echo "Finished generation of docker env files"
652 }
653
654 function generate_osmclient_script () {
655 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
656 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
657 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
658 }
659
660 #installs kubernetes packages
661 function install_kube() {
662 sudo apt-get update && sudo apt-get install -y apt-transport-https
663 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
664 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
665 sudo apt-get update
666 echo "Installing Kubernetes Packages ..."
667 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
668 }
669
670 #initializes kubernetes control plane
671 function init_kubeadm() {
672 sudo swapoff -a
673 sudo kubeadm init --config $1
674 sleep 5
675 }
676
677 function kube_config_dir() {
678 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
679 mkdir -p $HOME/.kube
680 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
681 sudo chown $(id -u):$(id -g) $HOME/.kube/config
682 }
683
684 #deploys flannel as daemonsets
685 function deploy_cni_provider() {
686 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
687 trap 'rm -rf "${CNI_DIR}"' EXIT
688 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
689 kubectl apply -f $CNI_DIR
690 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
691 }
692
693 #creates secrets from env files which will be used by containers
694 function kube_secrets(){
695 kubectl create ns $OSM_STACK_NAME
696 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
697 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
698 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
699 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
700 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
701 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
702 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
703 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
704 }
705
706 #deploys osm pods and services
707 function deploy_osm_services() {
708 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
709 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
710 sleep 5
711 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
712 }
713
714 function parse_yaml() {
715 osm_services="nbi lcm ro pol mon light-ui keystone"
716 TAG=$1
717 for osm in $osm_services; do
718 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
719 done
720 }
721
722 function namespace_vol() {
723 osm_services="nbi lcm ro pol mon kafka mongo mysql"
724 for osm in $osm_services; do
725 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
726 done
727 }
728
729 function init_docker_swarm() {
730 if [ "${DEFAULT_MTU}" != "1500" ]; then
731 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
732 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
733 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
734 fi
735 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
736 return 0
737 }
738
739 function create_docker_network() {
740 echo "creating network"
741 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
742 echo "creating network DONE"
743 }
744
745 function deploy_lightweight() {
746
747 echo "Deploying lightweight build"
748 OSM_NBI_PORT=9999
749 OSM_RO_PORT=9090
750 OSM_KEYSTONE_PORT=5000
751 OSM_UI_PORT=80
752 OSM_MON_PORT=8662
753 OSM_PROM_PORT=9090
754 OSM_PROM_CADVISOR_PORT=8080
755 OSM_PROM_HOSTPORT=9091
756 OSM_GRAFANA_PORT=3000
757 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
758 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
759
760 if [ -n "$NO_HOST_PORTS" ]; then
761 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
762 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
763 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
764 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
765 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
766 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
767 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
768 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
769 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
770 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
771 else
772 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
773 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
774 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
775 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
776 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
777 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
778 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
779 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
780 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
781 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
782 fi
783 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
784 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
785 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
786 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
787 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
788 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
789 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
790 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
791 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
792
793 pushd $OSM_DOCKER_WORK_DIR
794 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
795 popd
796
797 echo "Finished deployment of lightweight build"
798 }
799
800 function deploy_elk() {
801 echo "Pulling docker images for ELK"
802 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
803 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
804 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
805 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
806 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
807 echo "Finished pulling elk docker images"
808 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
809 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
810 remove_stack osm_elk
811 echo "Deploying ELK stack"
812 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
813 echo "Waiting for ELK stack to be up and running"
814 time=0
815 step=5
816 timelength=40
817 elk_is_up=1
818 while [ $time -le $timelength ]; do
819 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
820 elk_is_up=0
821 break
822 fi
823 sleep $step
824 time=$((time+step))
825 done
826 if [ $elk_is_up -eq 0 ]; then
827 echo "ELK is up and running. Trying to create index pattern..."
828 #Create index pattern
829 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
830 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
831 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
832 #Make it the default index
833 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
834 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
835 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
836 else
837 echo "Cannot connect to Kibana to create index pattern."
838 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
839 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
840 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
841 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
842 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
843 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
844 -d"{\"value\":\"filebeat-*\"}"'
845 fi
846 echo "Finished deployment of ELK stack"
847 return 0
848 }
849
850 function install_lightweight() {
851 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
852 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
853 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
854 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
855
856 track checkingroot
857 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
858 track noroot
859
860 if [ -n "$KUBERNETES" ]; then
861 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
862 1. Install and configure LXD
863 2. Install juju
864 3. Install docker CE
865 4. Disable swap space
866 5. Install and initialize Kubernetes
867 as pre-requirements.
868 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
869
870 else
871 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
872 fi
873 track proceed
874
875 echo "Installing lightweight build of OSM"
876 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
877 trap 'rm -rf "${LWTEMPDIR}"' EXIT
878 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
879 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
880 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
881 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
882 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
883
884 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
885 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
886 need_packages_lw="snapd"
887 echo -e "Checking required packages: $need_packages_lw"
888 dpkg -l $need_packages_lw &>/dev/null \
889 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
890 || sudo apt-get update \
891 || FATAL "failed to run apt-get update"
892 dpkg -l $need_packages_lw &>/dev/null \
893 || ! echo -e "Installing $need_packages_lw requires root privileges." \
894 || sudo apt-get install -y $need_packages_lw \
895 || FATAL "failed to install $need_packages_lw"
896 install_lxd
897 fi
898 track prereqok
899
900 [ -z "$INSTALL_NOJUJU" ] && install_juju
901 track juju_install
902
903 if [ -z "$OSM_VCA_HOST" ]; then
904 juju_createcontroller
905 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
906 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
907 fi
908 track juju_controller
909
910 if [ -z "$OSM_VCA_SECRET" ]; then
911 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
912 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
913 fi
914 if [ -z "$OSM_VCA_PUBKEY" ]; then
915 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
916 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
917 fi
918 if [ -z "$OSM_VCA_CACERT" ]; then
919 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
920 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
921 fi
922 if [ -z "$OSM_VCA_APIPROXY" ]; then
923 OSM_VCA_APIPROXY=$DEFAULT_IP
924 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
925 fi
926 juju_createproxy
927 track juju
928
929 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
930 OSM_DATABASE_COMMONKEY=$(generate_secret)
931 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
932 fi
933
934 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
935 track docker_ce
936
937 #Installs Kubernetes and deploys osm services
938 if [ -n "$KUBERNETES" ]; then
939 install_kube
940 track install_k8s
941 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
942 kube_config_dir
943 track init_k8s
944 else
945 #install_docker_compose
946 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
947 track docker_swarm
948 fi
949
950 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
951 track docker_build
952
953 generate_docker_env_files
954
955 if [ -n "$KUBERNETES" ]; then
956 if [ -n "$INSTALL_K8S_MONITOR" ]; then
957 # uninstall OSM MONITORING
958 uninstall_k8s_monitoring
959 track uninstall_k8s_monitoring
960 fi
961 #remove old namespace
962 remove_k8s_namespace $OSM_STACK_NAME
963 deploy_cni_provider
964 kube_secrets
965 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
966 namespace_vol
967 deploy_osm_services
968 track deploy_osm_services_k8s
969 if [ -n "$INSTALL_K8S_MONITOR" ]; then
970 # install OSM MONITORING
971 install_k8s_monitoring
972 track install_k8s_monitoring
973 fi
974 else
975 # remove old stack
976 remove_stack $OSM_STACK_NAME
977 create_docker_network
978 deploy_lightweight
979 generate_osmclient_script
980 track docker_deploy
981 install_prometheus_nodeexporter
982 track nodeexporter
983 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
984 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
985 fi
986
987 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
988 track osmclient
989
990 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
991 track end
992 return 0
993 }
994
995 function install_vimemu() {
996 echo "\nInstalling vim-emu"
997 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
998 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
999 # install prerequisites (OVS is a must for the emulator to work)
1000 sudo apt-get install openvswitch-switch
1001 # clone vim-emu repository (attention: branch is currently master only)
1002 echo "Cloning vim-emu repository ..."
1003 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1004 # build vim-emu docker
1005 echo "Building vim-emu Docker container..."
1006
1007 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1008 # start vim-emu container as daemon
1009 echo "Starting vim-emu Docker container 'vim-emu' ..."
1010 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1011 # in lightweight mode, the emulator needs to be attached to netOSM
1012 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1013 else
1014 # classic build mode
1015 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1016 fi
1017 echo "Waiting for 'vim-emu' container to start ..."
1018 sleep 5
1019 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1020 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1021 # print vim-emu connection info
1022 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1023 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1024 echo -e "To add the emulated VIM to OSM you should do:"
1025 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1026 }
1027
1028 function install_k8s_monitoring() {
1029 # install OSM monitoring
1030 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1031 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1032 }
1033
1034 function uninstall_k8s_monitoring() {
1035 # uninstall OSM monitoring
1036 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1037 }
1038
1039 function dump_vars(){
1040 echo "DEVELOP=$DEVELOP"
1041 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1042 echo "UNINSTALL=$UNINSTALL"
1043 echo "UPDATE=$UPDATE"
1044 echo "RECONFIGURE=$RECONFIGURE"
1045 echo "TEST_INSTALLER=$TEST_INSTALLER"
1046 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1047 echo "INSTALL_LXD=$INSTALL_LXD"
1048 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1049 echo "INSTALL_ONLY=$INSTALL_ONLY"
1050 echo "INSTALL_ELK=$INSTALL_ELK"
1051 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1052 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1053 echo "TO_REBUILD=$TO_REBUILD"
1054 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1055 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1056 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1057 echo "RELEASE=$RELEASE"
1058 echo "REPOSITORY=$REPOSITORY"
1059 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1060 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1061 echo "OSM_DEVOPS=$OSM_DEVOPS"
1062 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1063 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1064 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1065 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1066 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1067 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1068 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1069 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1070 echo "DOCKER_USER=$DOCKER_USER"
1071 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1072 echo "PULL_IMAGES=$PULL_IMAGES"
1073 echo "KUBERNETES=$KUBERNETES"
1074 echo "SHOWOPTS=$SHOWOPTS"
1075 echo "Install from specific refspec (-b): $COMMIT_ID"
1076 }
1077
1078 function track(){
1079 ctime=`date +%s`
1080 duration=$((ctime - SESSION_ID))
1081 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1082 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1083 event_name="bin"
1084 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1085 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1086 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1087 event_name="${event_name}_$1"
1088 url="${url}&event=${event_name}&ce_duration=${duration}"
1089 wget -q -O /dev/null $url
1090 }
1091
1092 UNINSTALL=""
1093 DEVELOP=""
1094 UPDATE=""
1095 RECONFIGURE=""
1096 TEST_INSTALLER=""
1097 INSTALL_LXD=""
1098 SHOWOPTS=""
1099 COMMIT_ID=""
1100 ASSUME_YES=""
1101 INSTALL_FROM_SOURCE=""
1102 RELEASE="ReleaseSEVEN"
1103 REPOSITORY="stable"
1104 INSTALL_VIMEMU=""
1105 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1106 LXD_REPOSITORY_PATH=""
1107 INSTALL_LIGHTWEIGHT="y"
1108 INSTALL_ONLY=""
1109 INSTALL_ELK=""
1110 TO_REBUILD=""
1111 INSTALL_NOLXD=""
1112 INSTALL_NODOCKER=""
1113 INSTALL_NOJUJU=""
1114 KUBERNETES=""
1115 INSTALL_K8S_MONITOR=""
1116 INSTALL_NOHOSTCLIENT=""
1117 SESSION_ID=`date +%s`
1118 OSM_DEVOPS=
1119 OSM_VCA_HOST=
1120 OSM_VCA_SECRET=
1121 OSM_VCA_PUBKEY=
1122 OSM_STACK_NAME=osm
1123 NO_HOST_PORTS=""
1124 DOCKER_NOBUILD=""
1125 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1126 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1127 WORKDIR_SUDO=sudo
1128 OSM_WORK_DIR="/etc/osm"
1129 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1130 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1131 OSM_HOST_VOL="/var/lib/osm"
1132 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1133 OSM_DOCKER_TAG=latest
1134 DOCKER_USER=opensourcemano
1135 PULL_IMAGES="y"
1136 KAFKA_TAG=2.11-1.0.2
1137 PROMETHEUS_TAG=v2.4.3
1138 GRAFANA_TAG=latest
1139 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1140 PROMETHEUS_CADVISOR_TAG=latest
1141 KEYSTONEDB_TAG=10
1142 OSM_DATABASE_COMMONKEY=
1143 ELASTIC_VERSION=6.4.2
1144 ELASTIC_CURATOR_VERSION=5.5.4
1145 POD_NETWORK_CIDR=10.244.0.0/16
1146 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1147 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1148
1149 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o; do
1150 case "${o}" in
1151 b)
1152 COMMIT_ID=${OPTARG}
1153 PULL_IMAGES=""
1154 ;;
1155 r)
1156 REPOSITORY="${OPTARG}"
1157 REPO_ARGS+=(-r "$REPOSITORY")
1158 ;;
1159 c)
1160 [ "${OPTARG}" == "swarm" ] && continue
1161 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1162 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1163 usage && exit 1
1164 ;;
1165 k)
1166 REPOSITORY_KEY="${OPTARG}"
1167 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1168 ;;
1169 u)
1170 REPOSITORY_BASE="${OPTARG}"
1171 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1172 ;;
1173 R)
1174 RELEASE="${OPTARG}"
1175 REPO_ARGS+=(-R "$RELEASE")
1176 ;;
1177 D)
1178 OSM_DEVOPS="${OPTARG}"
1179 ;;
1180 o)
1181 INSTALL_ONLY="y"
1182 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1183 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1184 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1185 ;;
1186 m)
1187 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1188 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1189 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1190 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1191 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1192 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1193 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1194 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1195 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1196 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1197 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1198 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1199 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1200 ;;
1201 H)
1202 OSM_VCA_HOST="${OPTARG}"
1203 ;;
1204 S)
1205 OSM_VCA_SECRET="${OPTARG}"
1206 ;;
1207 s)
1208 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1209 ;;
1210 w)
1211 # when specifying workdir, do not use sudo for access
1212 WORKDIR_SUDO=
1213 OSM_WORK_DIR="${OPTARG}"
1214 ;;
1215 t)
1216 OSM_DOCKER_TAG="${OPTARG}"
1217 ;;
1218 U)
1219 DOCKER_USER="${OPTARG}"
1220 ;;
1221 P)
1222 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1223 ;;
1224 A)
1225 OSM_VCA_APIPROXY="${OPTARG}"
1226 ;;
1227 -)
1228 [ "${OPTARG}" == "help" ] && usage && exit 0
1229 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1230 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1231 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1232 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1233 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1234 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1235 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1236 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1237 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1238 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1239 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1240 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1241 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1242 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1243 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1244 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1245 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1246 [ "${OPTARG}" == "pullimages" ] && continue
1247 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1248 echo -e "Invalid option: '--$OPTARG'\n" >&2
1249 usage && exit 1
1250 ;;
1251 :)
1252 echo "Option -$OPTARG requires an argument" >&2
1253 usage && exit 1
1254 ;;
1255 \?)
1256 echo -e "Invalid option: '-$OPTARG'\n" >&2
1257 usage && exit 1
1258 ;;
1259 h)
1260 usage && exit 0
1261 ;;
1262 y)
1263 ASSUME_YES="y"
1264 ;;
1265 *)
1266 usage && exit 1
1267 ;;
1268 esac
1269 done
1270
1271 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1272
1273 if [ -n "$SHOWOPTS" ]; then
1274 dump_vars
1275 exit 0
1276 fi
1277
1278 # if develop, we force master
1279 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1280
1281 need_packages="git jq wget curl tar"
1282 echo -e "Checking required packages: $need_packages"
1283 dpkg -l $need_packages &>/dev/null \
1284 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1285 || sudo apt-get update \
1286 || FATAL "failed to run apt-get update"
1287 dpkg -l $need_packages &>/dev/null \
1288 || ! echo -e "Installing $need_packages requires root privileges." \
1289 || sudo apt-get install -y $need_packages \
1290 || FATAL "failed to install $need_packages"
1291
1292 if [ -z "$OSM_DEVOPS" ]; then
1293 if [ -n "$TEST_INSTALLER" ]; then
1294 echo -e "\nUsing local devops repo for OSM installation"
1295 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1296 else
1297 echo -e "\nCreating temporary dir for OSM installation"
1298 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1299 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1300
1301 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1302
1303 if [ -z "$COMMIT_ID" ]; then
1304 echo -e "\nGuessing the current stable release"
1305 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1306 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1307
1308 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1309 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1310 else
1311 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1312 fi
1313 git -C $OSM_DEVOPS checkout $COMMIT_ID
1314 fi
1315 fi
1316
1317 . $OSM_DEVOPS/common/all_funcs
1318
1319 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1320 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1321 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1322 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1323 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1324 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1325
1326 #Installation starts here
1327 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1328 track start
1329
1330 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1331 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1332 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1333 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1334 fi
1335
1336 echo -e "Checking required packages: lxd"
1337 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1338 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1339
1340 # use local devops for containers
1341 export OSM_USE_LOCAL_DEVOPS=true
1342
1343 #Install osmclient
1344
1345 #Install vim-emu (optional)
1346 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1347
1348 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1349 track end
1350 echo -e "\nDONE"