9e4132ca807f28cfd61b4aa14ddeff43d3d38ae9
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
53 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
54 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
55 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
56 echo -e " --showopts: print chosen options and exit (only for debugging)"
57 echo -e " -y: do not prompt for confirmation, assumes yes"
58 echo -e " -h / --help: print this help"
59 }
60
61 # takes a juju/accounts.yaml file and returns the password specific
62 # for a controller. I wrote this using only bash tools to minimize
63 # additions of other packages
64 function parse_juju_password {
65 password_file="${HOME}/.local/share/juju/accounts.yaml"
66 local controller_name=$1
67 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
68 sed -ne "s|^\($s\):|\1|" \
69 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
70 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
71 awk -F$fs -v controller=$controller_name '{
72 indent = length($1)/2;
73 vname[indent] = $2;
74 for (i in vname) {if (i > indent) {delete vname[i]}}
75 if (length($3) > 0) {
76 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
77 if (match(vn,controller) && match($2,"password")) {
78 printf("%s",$3);
79 }
80 }
81 }'
82 }
83
84 function generate_secret() {
85 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
86 }
87
88 function remove_volumes() {
89 if [ -n "$KUBERNETES" ]; then
90 k8_volume=$1
91 echo "Removing ${k8_volume}"
92 $WORKDIR_SUDO rm -rf ${k8_volume}
93 else
94 stack=$1
95 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
96 for volume in $volumes; do
97 sg docker -c "docker volume rm ${stack}_${volume}"
98 done
99 fi
100 }
101
102 function remove_network() {
103 stack=$1
104 sg docker -c "docker network rm net${stack}"
105 }
106
107 function remove_iptables() {
108 stack=$1
109 if [ -z "$OSM_VCA_HOST" ]; then
110 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
111 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
112 fi
113
114 if [ -z "$DEFAULT_IP" ]; then
115 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
116 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
117 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
118 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
119 fi
120
121 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
122 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
123 sudo netfilter-persistent save
124 fi
125 }
126
127 function remove_stack() {
128 stack=$1
129 if sg docker -c "docker stack ps ${stack}" ; then
130 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
131 COUNTER=0
132 result=1
133 while [ ${COUNTER} -lt 30 ]; do
134 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
135 #echo "Dockers running: $result"
136 if [ "${result}" == "0" ]; then
137 break
138 fi
139 let COUNTER=COUNTER+1
140 sleep 1
141 done
142 if [ "${result}" == "0" ]; then
143 echo "All dockers of the stack ${stack} were removed"
144 else
145 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
146 fi
147 sleep 5
148 fi
149 }
150
151 #removes osm deployments and services
152 function remove_k8s_namespace() {
153 kubectl delete ns $1
154 }
155
156 #Uninstall lightweight OSM: remove dockers
157 function uninstall_lightweight() {
158 if [ -n "$INSTALL_ONLY" ]; then
159 if [ -n "$INSTALL_ELK" ]; then
160 echo -e "\nUninstalling OSM ELK stack"
161 remove_stack osm_elk
162 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
163 fi
164 else
165 echo -e "\nUninstalling OSM"
166 if [ -n "$KUBERNETES" ]; then
167 if [ -n "$INSTALL_K8S_MONITOR" ]; then
168 # uninstall OSM MONITORING
169 uninstall_k8s_monitoring
170 fi
171 remove_k8s_namespace $OSM_STACK_NAME
172 else
173
174 remove_stack $OSM_STACK_NAME
175 remove_stack osm_elk
176 fi
177 echo "Now osm docker images and volumes will be deleted"
178 newgrp docker << EONG
179 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
180 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
181 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
182 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
183 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
184 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
185 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
186 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
187 EONG
188
189 if [ -n "$KUBERNETES" ]; then
190 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
191 remove_volumes $OSM_NAMESPACE_VOL
192 else
193 remove_volumes $OSM_STACK_NAME
194 remove_network $OSM_STACK_NAME
195 fi
196 remove_iptables $OSM_STACK_NAME
197 echo "Removing $OSM_DOCKER_WORK_DIR"
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
199 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
200 fi
201 echo "Some docker images will be kept in case they are used by other docker stacks"
202 echo "To remove them, just run 'docker image prune' in a terminal"
203 return 0
204 }
205
206 #Safe unattended install of iptables-persistent
207 function check_install_iptables_persistent(){
208 echo -e "\nChecking required packages: iptables-persistent"
209 if dpkg -l iptables-persistent &>/dev/null; then
210 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
211 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
212 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
213 sudo apt-get -yq install iptables-persistent
214 fi
215 }
216
217 #Configure NAT rules, based on the current IP addresses of containers
218 function nat(){
219 check_install_iptables_persistent
220
221 echo -e "\nConfiguring NAT rules"
222 echo -e " Required root privileges"
223 sudo $OSM_DEVOPS/installers/nat_osm
224 }
225
226 function FATAL(){
227 echo "FATAL error: Cannot install OSM due to \"$1\""
228 exit 1
229 }
230
231 function install_lxd() {
232 # Apply sysctl production values for optimal performance
233 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
234 sudo sysctl --system
235
236 # Install LXD snap
237 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
238 sudo snap install lxd --channel=3.0/stable
239
240 # Configure LXD
241 sudo usermod -a -G lxd `whoami`
242 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sg lxd -c "lxd init --preseed"
243 sg lxd -c "lxd waitready"
244 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
245 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
246 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
247 #sudo systemctl stop lxd-bridge
248 #sudo systemctl --system daemon-reload
249 #sudo systemctl enable lxd-bridge
250 #sudo systemctl start lxd-bridge
251 }
252
253 function ask_user(){
254 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
255 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
256 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
257 read -e -p "$1" USER_CONFIRMATION
258 while true ; do
259 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
260 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
261 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
262 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
263 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
264 done
265 }
266
267 function install_osmclient(){
268 CLIENT_RELEASE=${RELEASE#"-R "}
269 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
270 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
271 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
272 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
273 curl $key_location | sudo apt-key add -
274 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
275 sudo apt-get update
276 sudo apt-get install -y python3-pip
277 sudo -H LC_ALL=C python3 -m pip install -U pip
278 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
279 sudo apt-get install -y python3-osm-im python3-osmclient
280 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
281 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
282 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
283 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
284 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
285 echo -e "\nOSM client installed"
286 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
287 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
288 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
289 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
290 else
291 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
292 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
293 echo " export OSM_HOSTNAME=<OSM_host>"
294 fi
295 return 0
296 }
297
298 function install_prometheus_nodeexporter(){
299 if (systemctl -q is-active node_exporter)
300 then
301 echo "Node Exporter is already running."
302 else
303 echo "Node Exporter is not active, installing..."
304 if getent passwd node_exporter > /dev/null 2>&1; then
305 echo "node_exporter user exists"
306 else
307 echo "Creating user node_exporter"
308 sudo useradd --no-create-home --shell /bin/false node_exporter
309 fi
310 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
311 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
312 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
313 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
314 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
315 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
316 sudo systemctl daemon-reload
317 sudo systemctl restart node_exporter
318 sudo systemctl enable node_exporter
319 echo "Node Exporter has been activated in this host."
320 fi
321 return 0
322 }
323
324 function uninstall_prometheus_nodeexporter(){
325 sudo systemctl stop node_exporter
326 sudo systemctl disable node_exporter
327 sudo rm /etc/systemd/system/node_exporter.service
328 sudo systemctl daemon-reload
329 sudo userdel node_exporter
330 sudo rm /usr/local/bin/node_exporter
331 return 0
332 }
333
334 function install_docker_ce() {
335 # installs and configures Docker CE
336 echo "Installing Docker CE ..."
337 sudo apt-get -qq update
338 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
339 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
340 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
341 sudo apt-get -qq update
342 sudo apt-get install -y docker-ce
343 echo "Adding user to group 'docker'"
344 sudo groupadd -f docker
345 sudo usermod -aG docker $USER
346 sleep 2
347 sudo service docker restart
348 echo "... restarted Docker service"
349 sg docker -c "docker version" || FATAL "Docker installation failed"
350 echo "... Docker CE installation done"
351 return 0
352 }
353
354 function install_docker_compose() {
355 # installs and configures docker-compose
356 echo "Installing Docker Compose ..."
357 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
358 sudo chmod +x /usr/local/bin/docker-compose
359 echo "... Docker Compose installation done"
360 }
361
362 function install_juju() {
363 echo "Installing juju"
364 sudo snap install juju --classic
365 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
366 echo "Finished installation of juju"
367 return 0
368 }
369
370 function juju_createcontroller() {
371 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
372 # Not found created, create the controller
373 sudo usermod -a -G lxd ${USER}
374 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
375 fi
376 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
377 }
378
379 function juju_createproxy() {
380 check_install_iptables_persistent
381
382 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
383 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
384 sudo netfilter-persistent save
385 fi
386 }
387
388 function generate_docker_images() {
389 echo "Pulling and generating docker images"
390 _build_from=$COMMIT_ID
391 [ -z "$_build_from" ] && _build_from="master"
392
393 echo "OSM Docker images generated from $_build_from"
394
395 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
396 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
397 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
398 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
399
400 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
401 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
402 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
403 fi
404
405 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
406 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
407 fi
408
409 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
410 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
411 fi
412
413 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
414 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
415 fi
416
417 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
418 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
419 fi
420
421 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
422 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
423 fi
424
425 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
426 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
427 fi
428
429 if [ -n "$PULL_IMAGES" ]; then
430 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
431 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
432 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
433 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
434 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
435 fi
436
437 if [ -n "$PULL_IMAGES" ]; then
438 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
439 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
440 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
441 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
442 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
443 fi
444
445 if [ -n "$PULL_IMAGES" ]; then
446 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
447 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
448 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
449 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
450 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
451 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
452 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
453 fi
454
455 if [ -n "$PULL_IMAGES" ]; then
456 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
457 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
458 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
459 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
460 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
461 fi
462
463 if [ -n "$PULL_IMAGES" ]; then
464 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
465 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
466 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
467 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
468 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
469 fi
470
471 if [ -n "$PULL_IMAGES" ]; then
472 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
473 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
474 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
475 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
476 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
477 fi
478
479 if [ -n "$PULL_IMAGES" ]; then
480 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
481 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
482 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
483 fi
484
485 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
486 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
487 fi
488
489 echo "Finished generation of docker images"
490 }
491
492 function cmp_overwrite() {
493 file1="$1"
494 file2="$2"
495 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
496 if [ -f "${file2}" ]; then
497 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
498 else
499 cp -b ${file1} ${file2}
500 fi
501 fi
502 }
503
504 function generate_docker_env_files() {
505 echo "Doing a backup of existing env files"
506 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
507 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
508 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
509 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
510 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
511 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
512 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
513 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
514 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
515
516 echo "Generating docker env files"
517 if [ -n "$KUBERNETES" ]; then
518 #Kubernetes resources
519 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
520 else
521 # Docker-compose
522 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
523
524 # Prometheus
525 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
526
527 # Grafana & Prometheus Exporter files
528 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
529 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
530 fi
531
532 # LCM
533 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
534 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
535 fi
536
537 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
538 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
539 else
540 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
541 fi
542
543 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
544 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
545 else
546 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
547 fi
548
549 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
550 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
551 else
552 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
553 fi
554
555 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
556 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
557 else
558 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
559 fi
560
561 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
562 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
563 else
564 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
565 fi
566
567 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
568 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
569 fi
570
571 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
572 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
573 fi
574
575 # RO
576 MYSQL_ROOT_PASSWORD=$(generate_secret)
577 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
578 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
579 fi
580 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
581 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
582 fi
583
584 # Keystone
585 KEYSTONE_DB_PASSWORD=$(generate_secret)
586 SERVICE_PASSWORD=$(generate_secret)
587 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
588 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
589 fi
590 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
591 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
592 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
593 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
594 fi
595
596 # NBI
597 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
598 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
599 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
600 fi
601
602 # MON
603 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
604 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
605 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
606 fi
607
608 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
609 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
610 else
611 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
612 fi
613
614 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
615 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
616 else
617 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
618 fi
619
620 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
621 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
622 else
623 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
624 fi
625
626 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
627 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
628 else
629 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
630 fi
631
632
633 # POL
634 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
635 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
636 fi
637
638 # LW-UI
639 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
640 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
641 fi
642
643 echo "Finished generation of docker env files"
644 }
645
646 function generate_osmclient_script () {
647 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
648 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
649 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
650 }
651
652 #installs kubernetes packages
653 function install_kube() {
654 sudo apt-get update && sudo apt-get install -y apt-transport-https
655 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
656 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
657 sudo apt-get update
658 echo "Installing Kubernetes Packages ..."
659 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
660 }
661
662 #initializes kubernetes control plane
663 function init_kubeadm() {
664 sudo swapoff -a
665 sudo kubeadm init --config $1
666 sleep 5
667 }
668
669 function kube_config_dir() {
670 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
671 mkdir -p $HOME/.kube
672 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
673 sudo chown $(id -u):$(id -g) $HOME/.kube/config
674 }
675
676 #deploys flannel as daemonsets
677 function deploy_cni_provider() {
678 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
679 trap 'rm -rf "${CNI_DIR}"' EXIT
680 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
681 kubectl apply -f $CNI_DIR
682 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
683 }
684
685 #creates secrets from env files which will be used by containers
686 function kube_secrets(){
687 kubectl create ns $OSM_STACK_NAME
688 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
689 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
690 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
691 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
692 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
693 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
694 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
695 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
696 }
697
698 #deploys osm pods and services
699 function deploy_osm_services() {
700 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
701 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
702 sleep 5
703 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
704 }
705
706 function parse_yaml() {
707 osm_services="nbi lcm ro pol mon light-ui keystone"
708 TAG=$1
709 for osm in $osm_services; do
710 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
711 done
712 }
713
714 function namespace_vol() {
715 osm_services="nbi lcm ro pol mon kafka mongo mysql"
716 for osm in $osm_services; do
717 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
718 done
719 }
720
721 function init_docker_swarm() {
722 if [ "${DEFAULT_MTU}" != "1500" ]; then
723 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
724 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
725 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
726 fi
727 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
728 return 0
729 }
730
731 function create_docker_network() {
732 echo "creating network"
733 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
734 echo "creating network DONE"
735 }
736
737 function deploy_lightweight() {
738
739 echo "Deploying lightweight build"
740 OSM_NBI_PORT=9999
741 OSM_RO_PORT=9090
742 OSM_KEYSTONE_PORT=5000
743 OSM_UI_PORT=80
744 OSM_MON_PORT=8662
745 OSM_PROM_PORT=9090
746 OSM_PROM_CADVISOR_PORT=8080
747 OSM_PROM_HOSTPORT=9091
748 OSM_GRAFANA_PORT=3000
749 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
750 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
751
752 if [ -n "$NO_HOST_PORTS" ]; then
753 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
754 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
755 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
756 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
757 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
758 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
759 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
760 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
761 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
762 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
763 else
764 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
765 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
766 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
767 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
768 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
769 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
770 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
771 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
772 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
773 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
774 fi
775 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
776 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
777 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
778 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
779 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
780 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
781 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
782 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
783 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
784
785 pushd $OSM_DOCKER_WORK_DIR
786 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
787 popd
788
789 echo "Finished deployment of lightweight build"
790 }
791
792 function deploy_elk() {
793 echo "Pulling docker images for ELK"
794 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
795 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
796 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
797 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
798 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
799 echo "Finished pulling elk docker images"
800 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
801 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
802 remove_stack osm_elk
803 echo "Deploying ELK stack"
804 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
805 echo "Waiting for ELK stack to be up and running"
806 time=0
807 step=5
808 timelength=40
809 elk_is_up=1
810 while [ $time -le $timelength ]; do
811 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
812 elk_is_up=0
813 break
814 fi
815 sleep $step
816 time=$((time+step))
817 done
818 if [ $elk_is_up -eq 0 ]; then
819 echo "ELK is up and running. Trying to create index pattern..."
820 #Create index pattern
821 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
822 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
823 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
824 #Make it the default index
825 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
826 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
827 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
828 else
829 echo "Cannot connect to Kibana to create index pattern."
830 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
831 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
832 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
833 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
834 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
835 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
836 -d"{\"value\":\"filebeat-*\"}"'
837 fi
838 echo "Finished deployment of ELK stack"
839 return 0
840 }
841
842 function install_lightweight() {
843 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
844 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
845 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
846 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
847
848 track checkingroot
849 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
850 track noroot
851
852 if [ -n "$KUBERNETES" ]; then
853 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
854 1. Install and configure LXD
855 2. Install juju
856 3. Install docker CE
857 4. Disable swap space
858 5. Install and initialize Kubernetes
859 as pre-requirements.
860 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
861
862 else
863 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
864 fi
865 track proceed
866
867 echo "Installing lightweight build of OSM"
868 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
869 trap 'rm -rf "${LWTEMPDIR}"' EXIT
870 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
871 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
872 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
873 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
874 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
875
876 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
877 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
878 need_packages_lw="snapd"
879 echo -e "Checking required packages: $need_packages_lw"
880 dpkg -l $need_packages_lw &>/dev/null \
881 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
882 || sudo apt-get update \
883 || FATAL "failed to run apt-get update"
884 dpkg -l $need_packages_lw &>/dev/null \
885 || ! echo -e "Installing $need_packages_lw requires root privileges." \
886 || sudo apt-get install -y $need_packages_lw \
887 || FATAL "failed to install $need_packages_lw"
888 install_lxd
889 fi
890 track prereqok
891
892 [ -z "$INSTALL_NOJUJU" ] && install_juju
893 track juju_install
894
895 if [ -z "$OSM_VCA_HOST" ]; then
896 juju_createcontroller
897 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
898 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
899 fi
900 track juju_controller
901
902 if [ -z "$OSM_VCA_SECRET" ]; then
903 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
904 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
905 fi
906 if [ -z "$OSM_VCA_PUBKEY" ]; then
907 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
908 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
909 fi
910 if [ -z "$OSM_VCA_CACERT" ]; then
911 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
912 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
913 fi
914 if [ -z "$OSM_VCA_APIPROXY" ]; then
915 OSM_VCA_APIPROXY=$DEFAULT_IP
916 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
917 fi
918 juju_createproxy
919 track juju
920
921 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
922 OSM_DATABASE_COMMONKEY=$(generate_secret)
923 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
924 fi
925
926 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
927 track docker_ce
928
929 #Installs Kubernetes and deploys osm services
930 if [ -n "$KUBERNETES" ]; then
931 install_kube
932 track install_k8s
933 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
934 kube_config_dir
935 track init_k8s
936 else
937 #install_docker_compose
938 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
939 track docker_swarm
940 fi
941
942 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
943 track docker_build
944
945 generate_docker_env_files
946
947 if [ -n "$KUBERNETES" ]; then
948 if [ -n "$INSTALL_K8S_MONITOR" ]; then
949 # uninstall OSM MONITORING
950 uninstall_k8s_monitoring
951 track uninstall_k8s_monitoring
952 fi
953 #remove old namespace
954 remove_k8s_namespace $OSM_STACK_NAME
955 deploy_cni_provider
956 kube_secrets
957 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
958 namespace_vol
959 deploy_osm_services
960 track deploy_osm_services_k8s
961 if [ -n "$INSTALL_K8S_MONITOR" ]; then
962 # install OSM MONITORING
963 install_k8s_monitoring
964 track install_k8s_monitoring
965 fi
966 else
967 # remove old stack
968 remove_stack $OSM_STACK_NAME
969 create_docker_network
970 deploy_lightweight
971 generate_osmclient_script
972 track docker_deploy
973 install_prometheus_nodeexporter
974 track nodeexporter
975 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
976 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
977 fi
978
979 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
980 track osmclient
981
982 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
983 track end
984 return 0
985 }
986
987 function install_vimemu() {
988 echo "\nInstalling vim-emu"
989 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
990 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
991 # install prerequisites (OVS is a must for the emulator to work)
992 sudo apt-get install openvswitch-switch
993 # clone vim-emu repository (attention: branch is currently master only)
994 echo "Cloning vim-emu repository ..."
995 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
996 # build vim-emu docker
997 echo "Building vim-emu Docker container..."
998
999 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1000 # start vim-emu container as daemon
1001 echo "Starting vim-emu Docker container 'vim-emu' ..."
1002 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1003 # in lightweight mode, the emulator needs to be attached to netOSM
1004 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1005 else
1006 # classic build mode
1007 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1008 fi
1009 echo "Waiting for 'vim-emu' container to start ..."
1010 sleep 5
1011 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1012 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1013 # print vim-emu connection info
1014 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1015 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1016 echo -e "To add the emulated VIM to OSM you should do:"
1017 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1018 }
1019
1020 function install_k8s_monitoring() {
1021 # install OSM monitoring
1022 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1023 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1024 }
1025
1026 function uninstall_k8s_monitoring() {
1027 # uninstall OSM monitoring
1028 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1029 }
1030
1031 function dump_vars(){
1032 echo "DEVELOP=$DEVELOP"
1033 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1034 echo "UNINSTALL=$UNINSTALL"
1035 echo "UPDATE=$UPDATE"
1036 echo "RECONFIGURE=$RECONFIGURE"
1037 echo "TEST_INSTALLER=$TEST_INSTALLER"
1038 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1039 echo "INSTALL_LXD=$INSTALL_LXD"
1040 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1041 echo "INSTALL_ONLY=$INSTALL_ONLY"
1042 echo "INSTALL_ELK=$INSTALL_ELK"
1043 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1044 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1045 echo "TO_REBUILD=$TO_REBUILD"
1046 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1047 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1048 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1049 echo "RELEASE=$RELEASE"
1050 echo "REPOSITORY=$REPOSITORY"
1051 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1052 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1053 echo "OSM_DEVOPS=$OSM_DEVOPS"
1054 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1055 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1056 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1057 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1058 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1059 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1060 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1061 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1062 echo "DOCKER_USER=$DOCKER_USER"
1063 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1064 echo "PULL_IMAGES=$PULL_IMAGES"
1065 echo "KUBERNETES=$KUBERNETES"
1066 echo "SHOWOPTS=$SHOWOPTS"
1067 echo "Install from specific refspec (-b): $COMMIT_ID"
1068 }
1069
1070 function track(){
1071 ctime=`date +%s`
1072 duration=$((ctime - SESSION_ID))
1073 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1074 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1075 event_name="bin"
1076 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1077 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1078 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1079 event_name="${event_name}_$1"
1080 url="${url}&event=${event_name}&ce_duration=${duration}"
1081 wget -q -O /dev/null $url
1082 }
1083
1084 UNINSTALL=""
1085 DEVELOP=""
1086 UPDATE=""
1087 RECONFIGURE=""
1088 TEST_INSTALLER=""
1089 INSTALL_LXD=""
1090 SHOWOPTS=""
1091 COMMIT_ID=""
1092 ASSUME_YES=""
1093 INSTALL_FROM_SOURCE=""
1094 RELEASE="ReleaseSEVEN"
1095 REPOSITORY="stable"
1096 INSTALL_VIMEMU=""
1097 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1098 LXD_REPOSITORY_PATH=""
1099 INSTALL_LIGHTWEIGHT="y"
1100 INSTALL_ONLY=""
1101 INSTALL_ELK=""
1102 TO_REBUILD=""
1103 INSTALL_NOLXD=""
1104 INSTALL_NODOCKER=""
1105 INSTALL_NOJUJU=""
1106 KUBERNETES=""
1107 INSTALL_K8S_MONITOR=""
1108 INSTALL_NOHOSTCLIENT=""
1109 SESSION_ID=`date +%s`
1110 OSM_DEVOPS=
1111 OSM_VCA_HOST=
1112 OSM_VCA_SECRET=
1113 OSM_VCA_PUBKEY=
1114 OSM_STACK_NAME=osm
1115 NO_HOST_PORTS=""
1116 DOCKER_NOBUILD=""
1117 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1118 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1119 WORKDIR_SUDO=sudo
1120 OSM_WORK_DIR="/etc/osm"
1121 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1122 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1123 OSM_HOST_VOL="/var/lib/osm"
1124 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1125 OSM_DOCKER_TAG=latest
1126 DOCKER_USER=opensourcemano
1127 PULL_IMAGES="y"
1128 KAFKA_TAG=2.11-1.0.2
1129 PROMETHEUS_TAG=v2.4.3
1130 GRAFANA_TAG=latest
1131 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1132 PROMETHEUS_CADVISOR_TAG=latest
1133 KEYSTONEDB_TAG=10
1134 OSM_DATABASE_COMMONKEY=
1135 ELASTIC_VERSION=6.4.2
1136 ELASTIC_CURATOR_VERSION=5.5.4
1137 POD_NETWORK_CIDR=10.244.0.0/16
1138 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1139 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1140
1141 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o; do
1142 case "${o}" in
1143 b)
1144 COMMIT_ID=${OPTARG}
1145 PULL_IMAGES=""
1146 ;;
1147 r)
1148 REPOSITORY="${OPTARG}"
1149 REPO_ARGS+=(-r "$REPOSITORY")
1150 ;;
1151 c)
1152 [ "${OPTARG}" == "swarm" ] && continue
1153 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1154 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1155 usage && exit 1
1156 ;;
1157 k)
1158 REPOSITORY_KEY="${OPTARG}"
1159 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1160 ;;
1161 u)
1162 REPOSITORY_BASE="${OPTARG}"
1163 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1164 ;;
1165 R)
1166 RELEASE="${OPTARG}"
1167 REPO_ARGS+=(-R "$RELEASE")
1168 ;;
1169 D)
1170 OSM_DEVOPS="${OPTARG}"
1171 ;;
1172 o)
1173 INSTALL_ONLY="y"
1174 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1175 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1176 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1177 ;;
1178 m)
1179 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1180 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1181 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1182 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1183 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1184 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1185 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1186 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1187 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1188 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1189 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1190 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1191 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1192 ;;
1193 H)
1194 OSM_VCA_HOST="${OPTARG}"
1195 ;;
1196 S)
1197 OSM_VCA_SECRET="${OPTARG}"
1198 ;;
1199 s)
1200 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1201 ;;
1202 w)
1203 # when specifying workdir, do not use sudo for access
1204 WORKDIR_SUDO=
1205 OSM_WORK_DIR="${OPTARG}"
1206 ;;
1207 t)
1208 OSM_DOCKER_TAG="${OPTARG}"
1209 ;;
1210 U)
1211 DOCKER_USER="${OPTARG}"
1212 ;;
1213 P)
1214 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1215 ;;
1216 A)
1217 OSM_VCA_APIPROXY="${OPTARG}"
1218 ;;
1219 -)
1220 [ "${OPTARG}" == "help" ] && usage && exit 0
1221 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1222 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1223 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1224 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1225 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1226 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1227 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1228 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1229 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1230 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1231 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1232 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1233 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1234 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1235 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1236 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1237 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1238 [ "${OPTARG}" == "pullimages" ] && continue
1239 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1240 echo -e "Invalid option: '--$OPTARG'\n" >&2
1241 usage && exit 1
1242 ;;
1243 :)
1244 echo "Option -$OPTARG requires an argument" >&2
1245 usage && exit 1
1246 ;;
1247 \?)
1248 echo -e "Invalid option: '-$OPTARG'\n" >&2
1249 usage && exit 1
1250 ;;
1251 h)
1252 usage && exit 0
1253 ;;
1254 y)
1255 ASSUME_YES="y"
1256 ;;
1257 *)
1258 usage && exit 1
1259 ;;
1260 esac
1261 done
1262
1263 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1264
1265 if [ -n "$SHOWOPTS" ]; then
1266 dump_vars
1267 exit 0
1268 fi
1269
1270 # if develop, we force master
1271 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1272
1273 need_packages="git jq wget curl tar"
1274 echo -e "Checking required packages: $need_packages"
1275 dpkg -l $need_packages &>/dev/null \
1276 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1277 || sudo apt-get update \
1278 || FATAL "failed to run apt-get update"
1279 dpkg -l $need_packages &>/dev/null \
1280 || ! echo -e "Installing $need_packages requires root privileges." \
1281 || sudo apt-get install -y $need_packages \
1282 || FATAL "failed to install $need_packages"
1283
1284 if [ -z "$OSM_DEVOPS" ]; then
1285 if [ -n "$TEST_INSTALLER" ]; then
1286 echo -e "\nUsing local devops repo for OSM installation"
1287 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1288 else
1289 echo -e "\nCreating temporary dir for OSM installation"
1290 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1291 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1292
1293 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1294
1295 if [ -z "$COMMIT_ID" ]; then
1296 echo -e "\nGuessing the current stable release"
1297 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1298 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1299
1300 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1301 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1302 else
1303 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1304 fi
1305 git -C $OSM_DEVOPS checkout $COMMIT_ID
1306 fi
1307 fi
1308
1309 . $OSM_DEVOPS/common/all_funcs
1310
1311 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1312 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1313 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1314 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1315 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1316 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1317
1318 #Installation starts here
1319 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1320 track start
1321
1322 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1323 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1324 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1325 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1326 fi
1327
1328 echo -e "Checking required packages: lxd"
1329 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1330 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1331
1332 # use local devops for containers
1333 export OSM_USE_LOCAL_DEVOPS=true
1334
1335 #Install osmclient
1336
1337 #Install vim-emu (optional)
1338 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1339
1340 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1341 track end
1342 echo -e "\nDONE"