Removed old code from full_install_osm.sh.
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
39 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
40 echo -e " -D <devops path> use local devops installation path"
41 echo -e " -w <work dir> Location to store runtime installation"
42 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
43 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
44 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
45 echo -e " --nojuju: do not juju, assumes already installed"
46 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
47 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
48 echo -e " --nohostclient: do not install the osmclient"
49 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
50 echo -e " --source: install OSM from source code using the latest stable tag"
51 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
52 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
53 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
54 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
55 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
56 echo -e " --showopts: print chosen options and exit (only for debugging)"
57 echo -e " -y: do not prompt for confirmation, assumes yes"
58 echo -e " -h / --help: print this help"
59 }
60
61 # takes a juju/accounts.yaml file and returns the password specific
62 # for a controller. I wrote this using only bash tools to minimize
63 # additions of other packages
64 function parse_juju_password {
65 password_file="${HOME}/.local/share/juju/accounts.yaml"
66 local controller_name=$1
67 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
68 sed -ne "s|^\($s\):|\1|" \
69 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
70 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
71 awk -F$fs -v controller=$controller_name '{
72 indent = length($1)/2;
73 vname[indent] = $2;
74 for (i in vname) {if (i > indent) {delete vname[i]}}
75 if (length($3) > 0) {
76 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
77 if (match(vn,controller) && match($2,"password")) {
78 printf("%s",$3);
79 }
80 }
81 }'
82 }
83
84 function generate_secret() {
85 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
86 }
87
88 function remove_volumes() {
89 if [ -n "$KUBERNETES" ]; then
90 k8_volume=$1
91 echo "Removing ${k8_volume}"
92 $WORKDIR_SUDO rm -rf ${k8_volume}
93 else
94 stack=$1
95 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
96 for volume in $volumes; do
97 sg docker -c "docker volume rm ${stack}_${volume}"
98 done
99 fi
100 }
101
102 function remove_network() {
103 stack=$1
104 sg docker -c "docker network rm net${stack}"
105 }
106
107 function remove_iptables() {
108 stack=$1
109 if [ -z "$OSM_VCA_HOST" ]; then
110 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
111 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
112 fi
113
114 if [ -z "$DEFAULT_IP" ]; then
115 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
116 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
117 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
118 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
119 fi
120
121 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
122 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
123 sudo netfilter-persistent save
124 fi
125 }
126
127 function remove_stack() {
128 stack=$1
129 if sg docker -c "docker stack ps ${stack}" ; then
130 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
131 COUNTER=0
132 result=1
133 while [ ${COUNTER} -lt 30 ]; do
134 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
135 #echo "Dockers running: $result"
136 if [ "${result}" == "0" ]; then
137 break
138 fi
139 let COUNTER=COUNTER+1
140 sleep 1
141 done
142 if [ "${result}" == "0" ]; then
143 echo "All dockers of the stack ${stack} were removed"
144 else
145 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
146 fi
147 sleep 5
148 fi
149 }
150
151 #removes osm deployments and services
152 function remove_k8s_namespace() {
153 kubectl delete ns $1
154 }
155
156 #Uninstall lightweight OSM: remove dockers
157 function uninstall_lightweight() {
158 if [ -n "$INSTALL_ONLY" ]; then
159 if [ -n "$INSTALL_ELK" ]; then
160 echo -e "\nUninstalling OSM ELK stack"
161 remove_stack osm_elk
162 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
163 fi
164 else
165 echo -e "\nUninstalling OSM"
166 if [ -n "$KUBERNETES" ]; then
167 if [ -n "$K8S_MONITOR" ]; then
168 # uninstall OSM MONITORING
169 uninstall_k8s_monitoring
170 fi
171 remove_k8s_namespace $OSM_STACK_NAME
172 else
173
174 remove_stack $OSM_STACK_NAME
175 remove_stack osm_elk
176 fi
177 echo "Now osm docker images and volumes will be deleted"
178 newgrp docker << EONG
179 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
180 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
181 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
182 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
183 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
184 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
185 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
186 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
187 EONG
188
189 if [ -n "$KUBERNETES" ]; then
190 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
191 remove_volumes $OSM_NAMESPACE_VOL
192 else
193 remove_volumes $OSM_STACK_NAME
194 remove_network $OSM_STACK_NAME
195 fi
196 remove_iptables $OSM_STACK_NAME
197 echo "Removing $OSM_DOCKER_WORK_DIR"
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
199 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
200 fi
201 echo "Some docker images will be kept in case they are used by other docker stacks"
202 echo "To remove them, just run 'docker image prune' in a terminal"
203 return 0
204 }
205
206 #Configure NAT rules, based on the current IP addresses of containers
207 function nat(){
208 echo -e "\nChecking required packages: iptables-persistent"
209 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
210 sudo apt-get -yq install iptables-persistent
211 echo -e "\nConfiguring NAT rules"
212 echo -e " Required root privileges"
213 sudo $OSM_DEVOPS/installers/nat_osm
214 }
215
216 function FATAL(){
217 echo "FATAL error: Cannot install OSM due to \"$1\""
218 exit 1
219 }
220
221 function install_lxd() {
222 sudo apt-get update
223 sudo apt-get install -y lxd
224 newgrp lxd
225 lxd init --auto
226 lxd waitready
227 lxc network create lxdbr0 ipv4.address=auto ipv4.nat=true ipv6.address=none ipv6.nat=false
228 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
229 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
230 lxc profile device set default eth0 mtu $DEFAULT_MTU
231 #sudo systemctl stop lxd-bridge
232 #sudo systemctl --system daemon-reload
233 #sudo systemctl enable lxd-bridge
234 #sudo systemctl start lxd-bridge
235 }
236
237 function ask_user(){
238 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
239 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
240 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
241 read -e -p "$1" USER_CONFIRMATION
242 while true ; do
243 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
244 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
245 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
246 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
247 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
248 done
249 }
250
251 function install_osmclient(){
252 CLIENT_RELEASE=${RELEASE#"-R "}
253 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
254 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
255 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
256 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
257 curl $key_location | sudo apt-key add -
258 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
259 sudo apt-get update
260 sudo apt-get install -y python3-pip
261 sudo -H LC_ALL=C python3 -m pip install -U pip
262 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
263 sudo apt-get install -y python3-osm-im python3-osmclient
264 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
265 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
266 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
267 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
268 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
269 echo -e "\nOSM client installed"
270 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
271 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
272 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
273 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
274 else
275 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
276 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
277 echo " export OSM_HOSTNAME=<OSM_host>"
278 fi
279 return 0
280 }
281
282 function install_prometheus_nodeexporter(){
283 if (systemctl -q is-active node_exporter)
284 then
285 echo "Node Exporter is already running."
286 else
287 echo "Node Exporter is not active, installing..."
288 if getent passwd node_exporter > /dev/null 2>&1; then
289 echo "node_exporter user exists"
290 else
291 echo "Creating user node_exporter"
292 sudo useradd --no-create-home --shell /bin/false node_exporter
293 fi
294 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
295 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
296 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
297 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
298 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
299 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
300 sudo systemctl daemon-reload
301 sudo systemctl restart node_exporter
302 sudo systemctl enable node_exporter
303 echo "Node Exporter has been activated in this host."
304 fi
305 return 0
306 }
307
308 function uninstall_prometheus_nodeexporter(){
309 sudo systemctl stop node_exporter
310 sudo systemctl disable node_exporter
311 sudo rm /etc/systemd/system/node_exporter.service
312 sudo systemctl daemon-reload
313 sudo userdel node_exporter
314 sudo rm /usr/local/bin/node_exporter
315 return 0
316 }
317
318 function install_docker_ce() {
319 # installs and configures Docker CE
320 echo "Installing Docker CE ..."
321 sudo apt-get -qq update
322 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
323 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
324 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
325 sudo apt-get -qq update
326 sudo apt-get install -y docker-ce
327 echo "Adding user to group 'docker'"
328 sudo groupadd -f docker
329 sudo usermod -aG docker $USER
330 sleep 2
331 sudo service docker restart
332 echo "... restarted Docker service"
333 sg docker -c "docker version" || FATAL "Docker installation failed"
334 echo "... Docker CE installation done"
335 return 0
336 }
337
338 function install_docker_compose() {
339 # installs and configures docker-compose
340 echo "Installing Docker Compose ..."
341 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
342 sudo chmod +x /usr/local/bin/docker-compose
343 echo "... Docker Compose installation done"
344 }
345
346 function install_juju() {
347 echo "Installing juju"
348 sudo snap install juju --classic
349 [ -z "$INSTALL_NOLXD" ] && sudo dpkg-reconfigure -p medium lxd
350 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
351 echo "Finished installation of juju"
352 return 0
353 }
354
355 function juju_createcontroller() {
356 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
357 # Not found created, create the controller
358 sudo usermod -a -G lxd ${USER}
359 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
360 fi
361 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
362 }
363
364 function juju_createproxy() {
365 echo -e "\nChecking required packages: iptables-persistent"
366 dpkg -l iptables-persistent &>/dev/null || ! echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" || \
367 sudo apt-get -yq install iptables-persistent
368
369 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
370 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
371 sudo netfilter-persistent save
372 fi
373 }
374
375 function generate_docker_images() {
376 echo "Pulling and generating docker images"
377 _build_from=$COMMIT_ID
378 [ -z "$_build_from" ] && _build_from="master"
379
380 echo "OSM Docker images generated from $_build_from"
381
382 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
383 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
384 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
385 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
386
387 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
388 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
389 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
390 fi
391
392 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
393 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
394 fi
395
396 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
397 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
398 fi
399
400 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
401 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
402 fi
403
404 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
405 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
406 fi
407
408 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
409 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
410 fi
411
412 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
413 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
414 fi
415
416 if [ -n "$PULL_IMAGES" ]; then
417 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
418 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
419 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
420 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
421 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
422 fi
423
424 if [ -n "$PULL_IMAGES" ]; then
425 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
426 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
427 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
428 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
429 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
430 fi
431
432 if [ -n "$PULL_IMAGES" ]; then
433 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
434 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
435 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
436 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
437 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
438 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
439 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
440 fi
441
442 if [ -n "$PULL_IMAGES" ]; then
443 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
444 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
445 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
446 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
447 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
448 fi
449
450 if [ -n "$PULL_IMAGES" ]; then
451 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
452 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
453 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
454 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
455 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
456 fi
457
458 if [ -n "$PULL_IMAGES" ]; then
459 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
460 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
461 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
462 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
463 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
464 fi
465
466 if [ -n "$PULL_IMAGES" ]; then
467 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
468 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
469 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
470 fi
471
472 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
473 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
474 fi
475
476 echo "Finished generation of docker images"
477 }
478
479 function cmp_overwrite() {
480 file1="$1"
481 file2="$2"
482 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
483 if [ -f "${file2}" ]; then
484 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
485 else
486 cp -b ${file1} ${file2}
487 fi
488 fi
489 }
490
491 function generate_docker_env_files() {
492 echo "Doing a backup of existing env files"
493 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
494 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
495 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
496 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
497 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
498 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
499 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
500 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
501 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
502
503 echo "Generating docker env files"
504 if [ -n "$KUBERNETES" ]; then
505 #Kubernetes resources
506 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
507 else
508 # Docker-compose
509 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
510
511 # Prometheus
512 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
513
514 # Grafana & Prometheus Exporter files
515 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
516 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
517 fi
518
519 # LCM
520 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
521 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
522 fi
523
524 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
525 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
526 else
527 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
528 fi
529
530 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
531 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
532 else
533 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
534 fi
535
536 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
537 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
538 else
539 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
540 fi
541
542 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
543 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
544 else
545 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
546 fi
547
548 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
549 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
550 else
551 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
552 fi
553
554 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
555 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
556 fi
557
558 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
559 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
560 fi
561
562 # RO
563 MYSQL_ROOT_PASSWORD=$(generate_secret)
564 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
565 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
566 fi
567 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
568 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
569 fi
570
571 # Keystone
572 KEYSTONE_DB_PASSWORD=$(generate_secret)
573 SERVICE_PASSWORD=$(generate_secret)
574 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
575 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
576 fi
577 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
578 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
579 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
580 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
581 fi
582
583 # NBI
584 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
585 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
586 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
587 fi
588
589 # MON
590 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
591 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
592 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
593 fi
594
595 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
596 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
597 else
598 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
599 fi
600
601 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
602 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
603 else
604 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
605 fi
606
607 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
608 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
609 else
610 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
611 fi
612
613 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
614 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
615 else
616 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
617 fi
618
619
620 # POL
621 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
622 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
623 fi
624
625 # LW-UI
626 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
627 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
628 fi
629
630 echo "Finished generation of docker env files"
631 }
632
633 function generate_osmclient_script () {
634 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
635 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
636 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
637 }
638
639 #installs kubernetes packages
640 function install_kube() {
641 sudo apt-get update && sudo apt-get install -y apt-transport-https
642 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
643 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
644 sudo apt-get update
645 echo "Installing Kubernetes Packages ..."
646 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
647 }
648
649 #initializes kubernetes control plane
650 function init_kubeadm() {
651 sudo swapoff -a
652 sudo kubeadm init --config $1
653 sleep 5
654 }
655
656 function kube_config_dir() {
657 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
658 mkdir -p $HOME/.kube
659 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
660 sudo chown $(id -u):$(id -g) $HOME/.kube/config
661 }
662
663 #deploys flannel as daemonsets
664 function deploy_cni_provider() {
665 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
666 trap 'rm -rf "${CNI_DIR}"' EXIT
667 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
668 kubectl apply -f $CNI_DIR
669 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
670 }
671
672 #creates secrets from env files which will be used by containers
673 function kube_secrets(){
674 kubectl create ns $OSM_STACK_NAME
675 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
676 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
677 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
678 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
679 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
680 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
681 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
682 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
683 }
684
685 #deploys osm pods and services
686 function deploy_osm_services() {
687 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
688 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
689 sleep 5
690 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
691 }
692
693 function parse_yaml() {
694 osm_services="nbi lcm ro pol mon light-ui keystone"
695 TAG=$1
696 for osm in $osm_services; do
697 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
698 done
699 }
700
701 function namespace_vol() {
702 osm_services="nbi lcm ro pol mon kafka mongo mysql"
703 for osm in $osm_services; do
704 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
705 done
706 }
707
708 function init_docker_swarm() {
709 if [ "${DEFAULT_MTU}" != "1500" ]; then
710 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
711 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
712 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
713 fi
714 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
715 return 0
716 }
717
718 function create_docker_network() {
719 echo "creating network"
720 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
721 echo "creating network DONE"
722 }
723
724 function deploy_lightweight() {
725
726 echo "Deploying lightweight build"
727 OSM_NBI_PORT=9999
728 OSM_RO_PORT=9090
729 OSM_KEYSTONE_PORT=5000
730 OSM_UI_PORT=80
731 OSM_MON_PORT=8662
732 OSM_PROM_PORT=9090
733 OSM_PROM_CADVISOR_PORT=8080
734 OSM_PROM_HOSTPORT=9091
735 OSM_GRAFANA_PORT=3000
736 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
737 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
738
739 if [ -n "$NO_HOST_PORTS" ]; then
740 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
741 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
742 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
743 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
744 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
745 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
746 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
747 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
748 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
749 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
750 else
751 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
752 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
753 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
754 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
755 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
756 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
757 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
758 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
759 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
760 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
761 fi
762 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
763 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
764 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
765 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
766 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
767 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
768 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
769 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
770 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
771
772 pushd $OSM_DOCKER_WORK_DIR
773 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
774 popd
775
776 echo "Finished deployment of lightweight build"
777 }
778
779 function deploy_elk() {
780 echo "Pulling docker images for ELK"
781 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
782 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
783 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
784 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
785 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
786 echo "Finished pulling elk docker images"
787 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
788 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
789 remove_stack osm_elk
790 echo "Deploying ELK stack"
791 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
792 echo "Waiting for ELK stack to be up and running"
793 time=0
794 step=5
795 timelength=40
796 elk_is_up=1
797 while [ $time -le $timelength ]; do
798 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
799 elk_is_up=0
800 break
801 fi
802 sleep $step
803 time=$((time+step))
804 done
805 if [ $elk_is_up -eq 0 ]; then
806 echo "ELK is up and running. Trying to create index pattern..."
807 #Create index pattern
808 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
809 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
810 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
811 #Make it the default index
812 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
813 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
814 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
815 else
816 echo "Cannot connect to Kibana to create index pattern."
817 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
818 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
819 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
820 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
821 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
822 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
823 -d"{\"value\":\"filebeat-*\"}"'
824 fi
825 echo "Finished deployment of ELK stack"
826 return 0
827 }
828
829 function install_lightweight() {
830 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
831 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
832 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
833 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
834
835 track checkingroot
836 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
837 track noroot
838
839 if [ -n "$KUBERNETES" ]; then
840 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
841 1. Install and configure LXD
842 2. Install juju
843 3. Install docker CE
844 4. Disable swap space
845 5. Install and initialize Kubernetes
846 as pre-requirements.
847 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
848
849 else
850 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
851 fi
852 track proceed
853
854 echo "Installing lightweight build of OSM"
855 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
856 trap 'rm -rf "${LWTEMPDIR}"' EXIT
857 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
858 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
859 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
860 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
861 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
862
863 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
864 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
865 need_packages_lw="lxd snapd"
866 echo -e "Checking required packages: $need_packages_lw"
867 dpkg -l $need_packages_lw &>/dev/null \
868 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
869 || sudo apt-get update \
870 || FATAL "failed to run apt-get update"
871 dpkg -l $need_packages_lw &>/dev/null \
872 || ! echo -e "Installing $need_packages_lw requires root privileges." \
873 || sudo apt-get install -y $need_packages_lw \
874 || FATAL "failed to install $need_packages_lw"
875 fi
876 track prereqok
877
878 [ -z "$INSTALL_NOJUJU" ] && install_juju
879 track juju_install
880
881 if [ -z "$OSM_VCA_HOST" ]; then
882 juju_createcontroller
883 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
884 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
885 fi
886 track juju_controller
887
888 if [ -z "$OSM_VCA_SECRET" ]; then
889 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
890 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
891 fi
892 if [ -z "$OSM_VCA_PUBKEY" ]; then
893 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
894 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
895 fi
896 if [ -z "$OSM_VCA_CACERT" ]; then
897 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
898 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
899 fi
900 if [ -z "$OSM_VCA_APIPROXY" ]; then
901 OSM_VCA_APIPROXY=$DEFAULT_IP
902 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
903 fi
904 juju_createproxy
905 track juju
906
907 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
908 OSM_DATABASE_COMMONKEY=$(generate_secret)
909 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
910 fi
911
912 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
913 track docker_ce
914
915 #Installs Kubernetes and deploys osm services
916 if [ -n "$KUBERNETES" ]; then
917 install_kube
918 track install_k8s
919 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
920 kube_config_dir
921 track init_k8s
922 else
923 #install_docker_compose
924 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
925 track docker_swarm
926 fi
927
928 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
929 track docker_build
930
931 generate_docker_env_files
932
933 if [ -n "$KUBERNETES" ]; then
934 if [ -n "$K8S_MONITOR" ]; then
935 # uninstall OSM MONITORING
936 uninstall_k8s_monitoring
937 track uninstall_k8s_monitoring
938 fi
939 #remove old namespace
940 remove_k8s_namespace $OSM_STACK_NAME
941 deploy_cni_provider
942 kube_secrets
943 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
944 namespace_vol
945 deploy_osm_services
946 track deploy_osm_services_k8s
947 if [ -n "$K8S_MONITOR" ]; then
948 # install OSM MONITORING
949 install_k8s_monitoring
950 track install_k8s_monitoring
951 fi
952 else
953 # remove old stack
954 remove_stack $OSM_STACK_NAME
955 create_docker_network
956 deploy_lightweight
957 generate_osmclient_script
958 track docker_deploy
959 install_prometheus_nodeexporter
960 track nodeexporter
961 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
962 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
963 fi
964
965 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
966 track osmclient
967
968 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
969 track end
970 return 0
971 }
972
973 function install_vimemu() {
974 echo "\nInstalling vim-emu"
975 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
976 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
977 # clone vim-emu repository (attention: branch is currently master only)
978 echo "Cloning vim-emu repository ..."
979 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
980 # build vim-emu docker
981 echo "Building vim-emu Docker container..."
982
983 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
984 # start vim-emu container as daemon
985 echo "Starting vim-emu Docker container 'vim-emu' ..."
986 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
987 # in lightweight mode, the emulator needs to be attached to netOSM
988 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
989 else
990 # classic build mode
991 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
992 fi
993 echo "Waiting for 'vim-emu' container to start ..."
994 sleep 5
995 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
996 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
997 # print vim-emu connection info
998 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
999 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1000 echo -e "To add the emulated VIM to OSM you should do:"
1001 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1002 }
1003
1004 function install_k8s_monitoring() {
1005 # install OSM monitoring
1006 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1007 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1008 }
1009
1010 function uninstall_k8s_monitoring() {
1011 # uninstall OSM monitoring
1012 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1013 }
1014
1015 function dump_vars(){
1016 echo "DEVELOP=$DEVELOP"
1017 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1018 echo "UNINSTALL=$UNINSTALL"
1019 echo "UPDATE=$UPDATE"
1020 echo "RECONFIGURE=$RECONFIGURE"
1021 echo "TEST_INSTALLER=$TEST_INSTALLER"
1022 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1023 echo "INSTALL_LXD=$INSTALL_LXD"
1024 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1025 echo "INSTALL_ONLY=$INSTALL_ONLY"
1026 echo "INSTALL_ELK=$INSTALL_ELK"
1027 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1028 echo "TO_REBUILD=$TO_REBUILD"
1029 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1030 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1031 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1032 echo "RELEASE=$RELEASE"
1033 echo "REPOSITORY=$REPOSITORY"
1034 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1035 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1036 echo "OSM_DEVOPS=$OSM_DEVOPS"
1037 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1038 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1039 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1040 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1041 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1042 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1043 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1044 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1045 echo "DOCKER_USER=$DOCKER_USER"
1046 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1047 echo "PULL_IMAGES=$PULL_IMAGES"
1048 echo "KUBERNETES=$KUBERNETES"
1049 echo "SHOWOPTS=$SHOWOPTS"
1050 echo "Install from specific refspec (-b): $COMMIT_ID"
1051 }
1052
1053 function track(){
1054 ctime=`date +%s`
1055 duration=$((ctime - SESSION_ID))
1056 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1057 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1058 event_name="bin"
1059 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1060 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1061 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1062 event_name="${event_name}_$1"
1063 url="${url}&event=${event_name}&ce_duration=${duration}"
1064 wget -q -O /dev/null $url
1065 }
1066
1067 UNINSTALL=""
1068 DEVELOP=""
1069 UPDATE=""
1070 RECONFIGURE=""
1071 TEST_INSTALLER=""
1072 INSTALL_LXD=""
1073 SHOWOPTS=""
1074 COMMIT_ID=""
1075 ASSUME_YES=""
1076 INSTALL_FROM_SOURCE=""
1077 RELEASE="ReleaseSEVEN"
1078 REPOSITORY="stable"
1079 INSTALL_VIMEMU=""
1080 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1081 LXD_REPOSITORY_PATH=""
1082 INSTALL_LIGHTWEIGHT="y"
1083 INSTALL_ONLY=""
1084 INSTALL_ELK=""
1085 TO_REBUILD=""
1086 INSTALL_NOLXD=""
1087 INSTALL_NODOCKER=""
1088 INSTALL_NOJUJU=""
1089 KUBERNETES=""
1090 K8S_MONITOR=""
1091 INSTALL_NOHOSTCLIENT=""
1092 SESSION_ID=`date +%s`
1093 OSM_DEVOPS=
1094 OSM_VCA_HOST=
1095 OSM_VCA_SECRET=
1096 OSM_VCA_PUBKEY=
1097 OSM_STACK_NAME=osm
1098 NO_HOST_PORTS=""
1099 DOCKER_NOBUILD=""
1100 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1101 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1102 WORKDIR_SUDO=sudo
1103 OSM_WORK_DIR="/etc/osm"
1104 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1105 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1106 OSM_HOST_VOL="/var/lib/osm"
1107 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1108 OSM_DOCKER_TAG=latest
1109 DOCKER_USER=opensourcemano
1110 PULL_IMAGES="y"
1111 KAFKA_TAG=2.11-1.0.2
1112 PROMETHEUS_TAG=v2.4.3
1113 GRAFANA_TAG=latest
1114 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1115 PROMETHEUS_CADVISOR_TAG=latest
1116 KEYSTONEDB_TAG=10
1117 OSM_DATABASE_COMMONKEY=
1118 ELASTIC_VERSION=6.4.2
1119 ELASTIC_CURATOR_VERSION=5.5.4
1120 POD_NETWORK_CIDR=10.244.0.0/16
1121 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1122 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1123
1124 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o; do
1125 case "${o}" in
1126 b)
1127 COMMIT_ID=${OPTARG}
1128 PULL_IMAGES=""
1129 ;;
1130 r)
1131 REPOSITORY="${OPTARG}"
1132 REPO_ARGS+=(-r "$REPOSITORY")
1133 ;;
1134 c)
1135 [ "${OPTARG}" == "swarm" ] && continue
1136 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1137 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1138 usage && exit 1
1139 ;;
1140 k)
1141 REPOSITORY_KEY="${OPTARG}"
1142 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1143 ;;
1144 u)
1145 REPOSITORY_BASE="${OPTARG}"
1146 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1147 ;;
1148 R)
1149 RELEASE="${OPTARG}"
1150 REPO_ARGS+=(-R "$RELEASE")
1151 ;;
1152 D)
1153 OSM_DEVOPS="${OPTARG}"
1154 ;;
1155 o)
1156 INSTALL_ONLY="y"
1157 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1158 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1159 ;;
1160 m)
1161 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1162 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1163 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1164 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1165 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1166 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1167 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1168 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1169 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1170 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1171 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1172 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1173 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1174 ;;
1175 H)
1176 OSM_VCA_HOST="${OPTARG}"
1177 ;;
1178 S)
1179 OSM_VCA_SECRET="${OPTARG}"
1180 ;;
1181 s)
1182 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1183 ;;
1184 w)
1185 # when specifying workdir, do not use sudo for access
1186 WORKDIR_SUDO=
1187 OSM_WORK_DIR="${OPTARG}"
1188 ;;
1189 t)
1190 OSM_DOCKER_TAG="${OPTARG}"
1191 ;;
1192 U)
1193 DOCKER_USER="${OPTARG}"
1194 ;;
1195 P)
1196 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1197 ;;
1198 A)
1199 OSM_VCA_APIPROXY="${OPTARG}"
1200 ;;
1201 -)
1202 [ "${OPTARG}" == "help" ] && usage && exit 0
1203 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1204 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1205 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1206 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1207 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1208 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1209 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1210 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1211 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1212 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1213 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1214 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1215 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1216 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1217 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1218 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1219 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1220 [ "${OPTARG}" == "pullimages" ] && continue
1221 [ "${OPTARG}" == "k8s_monitor" ] && K8S_MONITOR="y" && continue
1222 echo -e "Invalid option: '--$OPTARG'\n" >&2
1223 usage && exit 1
1224 ;;
1225 :)
1226 echo "Option -$OPTARG requires an argument" >&2
1227 usage && exit 1
1228 ;;
1229 \?)
1230 echo -e "Invalid option: '-$OPTARG'\n" >&2
1231 usage && exit 1
1232 ;;
1233 h)
1234 usage && exit 0
1235 ;;
1236 y)
1237 ASSUME_YES="y"
1238 ;;
1239 *)
1240 usage && exit 1
1241 ;;
1242 esac
1243 done
1244
1245 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1246
1247 if [ -n "$SHOWOPTS" ]; then
1248 dump_vars
1249 exit 0
1250 fi
1251
1252 # if develop, we force master
1253 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1254
1255 need_packages="git jq wget curl tar"
1256 echo -e "Checking required packages: $need_packages"
1257 dpkg -l $need_packages &>/dev/null \
1258 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1259 || sudo apt-get update \
1260 || FATAL "failed to run apt-get update"
1261 dpkg -l $need_packages &>/dev/null \
1262 || ! echo -e "Installing $need_packages requires root privileges." \
1263 || sudo apt-get install -y $need_packages \
1264 || FATAL "failed to install $need_packages"
1265
1266 if [ -z "$OSM_DEVOPS" ]; then
1267 if [ -n "$TEST_INSTALLER" ]; then
1268 echo -e "\nUsing local devops repo for OSM installation"
1269 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1270 else
1271 echo -e "\nCreating temporary dir for OSM installation"
1272 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1273 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1274
1275 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1276
1277 if [ -z "$COMMIT_ID" ]; then
1278 echo -e "\nGuessing the current stable release"
1279 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1280 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1281
1282 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1283 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1284 else
1285 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1286 fi
1287 git -C $OSM_DEVOPS checkout $COMMIT_ID
1288 fi
1289 fi
1290
1291 . $OSM_DEVOPS/common/all_funcs
1292
1293 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1294 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1295 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1296 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1297 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1298
1299 #Installation starts here
1300 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1301 track start
1302
1303 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1304 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1305 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1306 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1307 fi
1308
1309 echo -e "Checking required packages: lxd"
1310 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1311 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1312
1313 # use local devops for containers
1314 export OSM_USE_LOCAL_DEVOPS=true
1315
1316 #Install osmclient
1317
1318 #Install vim-emu (optional)
1319 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1320
1321 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1322 track end
1323 echo -e "\nDONE"