Merge "feature(prometheus): Configuration can be dynamically replaced"
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and configured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
54 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
55 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
56 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
57 echo -e " --showopts: print chosen options and exit (only for debugging)"
58 echo -e " -y: do not prompt for confirmation, assumes yes"
59 echo -e " -h / --help: print this help"
60 echo -e " --charmed: install OSM with charms"
61 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
62 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
63 echo -e " --lxdendpoint <lxd endpoint ip>: Specify with which LXD to deploy OSM with charms (--charmed option)"
64 echo -e " --lxdcert <lxd cert path>: Specify external LXD cert to deploy OSM with charms (--charmed option)"
65 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
66
67 }
68
69 # takes a juju/accounts.yaml file and returns the password specific
70 # for a controller. I wrote this using only bash tools to minimize
71 # additions of other packages
72 function parse_juju_password {
73 password_file="${HOME}/.local/share/juju/accounts.yaml"
74 local controller_name=$1
75 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
76 sed -ne "s|^\($s\):|\1|" \
77 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
78 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
79 awk -F$fs -v controller=$controller_name '{
80 indent = length($1)/2;
81 vname[indent] = $2;
82 for (i in vname) {if (i > indent) {delete vname[i]}}
83 if (length($3) > 0) {
84 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
85 if (match(vn,controller) && match($2,"password")) {
86 printf("%s",$3);
87 }
88 }
89 }'
90 }
91
92 function generate_secret() {
93 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
94 }
95
96 function remove_volumes() {
97 if [ -n "$KUBERNETES" ]; then
98 k8_volume=$1
99 echo "Removing ${k8_volume}"
100 $WORKDIR_SUDO rm -rf ${k8_volume}
101 else
102 stack=$1
103 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
104 for volume in $volumes; do
105 sg docker -c "docker volume rm ${stack}_${volume}"
106 done
107 fi
108 }
109
110 function remove_network() {
111 stack=$1
112 sg docker -c "docker network rm net${stack}"
113 }
114
115 function remove_iptables() {
116 stack=$1
117 if [ -z "$OSM_VCA_HOST" ]; then
118 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
120 fi
121
122 if [ -z "$DEFAULT_IP" ]; then
123 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
124 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
125 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
126 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
127 fi
128
129 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
130 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
131 sudo netfilter-persistent save
132 fi
133 }
134
135 function remove_stack() {
136 stack=$1
137 if sg docker -c "docker stack ps ${stack}" ; then
138 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
139 COUNTER=0
140 result=1
141 while [ ${COUNTER} -lt 30 ]; do
142 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
143 #echo "Dockers running: $result"
144 if [ "${result}" == "0" ]; then
145 break
146 fi
147 let COUNTER=COUNTER+1
148 sleep 1
149 done
150 if [ "${result}" == "0" ]; then
151 echo "All dockers of the stack ${stack} were removed"
152 else
153 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
154 fi
155 sleep 5
156 fi
157 }
158
159 #removes osm deployments and services
160 function remove_k8s_namespace() {
161 kubectl delete ns $1
162 }
163
164 #Uninstall lightweight OSM: remove dockers
165 function uninstall_lightweight() {
166 if [ -n "$INSTALL_ONLY" ]; then
167 if [ -n "$INSTALL_ELK" ]; then
168 echo -e "\nUninstalling OSM ELK stack"
169 remove_stack osm_elk
170 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
171 fi
172 else
173 echo -e "\nUninstalling OSM"
174 if [ -n "$KUBERNETES" ]; then
175 if [ -n "$INSTALL_K8S_MONITOR" ]; then
176 # uninstall OSM MONITORING
177 uninstall_k8s_monitoring
178 fi
179 remove_k8s_namespace $OSM_STACK_NAME
180 else
181
182 remove_stack $OSM_STACK_NAME
183 remove_stack osm_elk
184 fi
185 echo "Now osm docker images and volumes will be deleted"
186 newgrp docker << EONG
187 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
188 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
189 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
190 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
191 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
192 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
196 EONG
197
198 if [ -n "$KUBERNETES" ]; then
199 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
200 remove_volumes $OSM_NAMESPACE_VOL
201 else
202 remove_volumes $OSM_STACK_NAME
203 remove_network $OSM_STACK_NAME
204 fi
205 remove_iptables $OSM_STACK_NAME
206 echo "Removing $OSM_DOCKER_WORK_DIR"
207 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
208 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
209 fi
210 echo "Some docker images will be kept in case they are used by other docker stacks"
211 echo "To remove them, just run 'docker image prune' in a terminal"
212 return 0
213 }
214
215 #Safe unattended install of iptables-persistent
216 function check_install_iptables_persistent(){
217 echo -e "\nChecking required packages: iptables-persistent"
218 if dpkg -l iptables-persistent &>/dev/null; then
219 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
220 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
221 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
222 sudo apt-get -yq install iptables-persistent
223 fi
224 }
225
226 #Configure NAT rules, based on the current IP addresses of containers
227 function nat(){
228 check_install_iptables_persistent
229
230 echo -e "\nConfiguring NAT rules"
231 echo -e " Required root privileges"
232 sudo $OSM_DEVOPS/installers/nat_osm
233 }
234
235 function FATAL(){
236 echo "FATAL error: Cannot install OSM due to \"$1\""
237 exit 1
238 }
239
240 function install_lxd() {
241 # Apply sysctl production values for optimal performance
242 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
243 sudo sysctl --system
244
245 # Install LXD snap
246 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
247 sudo snap install lxd --channel=3.0/stable
248
249 # Configure LXD
250 sudo usermod -a -G lxd `whoami`
251 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sg lxd -c "lxd init --preseed"
252 sg lxd -c "lxd waitready"
253 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
254 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
255 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
256 #sudo systemctl stop lxd-bridge
257 #sudo systemctl --system daemon-reload
258 #sudo systemctl enable lxd-bridge
259 #sudo systemctl start lxd-bridge
260 }
261
262 function ask_user(){
263 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
264 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
265 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
266 read -e -p "$1" USER_CONFIRMATION
267 while true ; do
268 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
269 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
270 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
271 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
272 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
273 done
274 }
275
276 function install_osmclient(){
277 CLIENT_RELEASE=${RELEASE#"-R "}
278 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
279 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
280 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
281 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
282 curl $key_location | sudo apt-key add -
283 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
284 sudo apt-get update
285 sudo apt-get install -y python3-pip
286 sudo -H LC_ALL=C python3 -m pip install -U pip
287 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
288 sudo apt-get install -y python3-osm-im python3-osmclient
289 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
290 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
291 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
292 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
293 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
294 echo -e "\nOSM client installed"
295 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
296 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
297 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
298 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
299 else
300 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
301 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
302 echo " export OSM_HOSTNAME=<OSM_host>"
303 fi
304 return 0
305 }
306
307 function install_prometheus_nodeexporter(){
308 if (systemctl -q is-active node_exporter)
309 then
310 echo "Node Exporter is already running."
311 else
312 echo "Node Exporter is not active, installing..."
313 if getent passwd node_exporter > /dev/null 2>&1; then
314 echo "node_exporter user exists"
315 else
316 echo "Creating user node_exporter"
317 sudo useradd --no-create-home --shell /bin/false node_exporter
318 fi
319 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
320 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
321 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
322 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
323 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
324 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
325 sudo systemctl daemon-reload
326 sudo systemctl restart node_exporter
327 sudo systemctl enable node_exporter
328 echo "Node Exporter has been activated in this host."
329 fi
330 return 0
331 }
332
333 function uninstall_prometheus_nodeexporter(){
334 sudo systemctl stop node_exporter
335 sudo systemctl disable node_exporter
336 sudo rm /etc/systemd/system/node_exporter.service
337 sudo systemctl daemon-reload
338 sudo userdel node_exporter
339 sudo rm /usr/local/bin/node_exporter
340 return 0
341 }
342
343 function install_docker_ce() {
344 # installs and configures Docker CE
345 echo "Installing Docker CE ..."
346 sudo apt-get -qq update
347 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
348 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
349 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
350 sudo apt-get -qq update
351 sudo apt-get install -y docker-ce
352 echo "Adding user to group 'docker'"
353 sudo groupadd -f docker
354 sudo usermod -aG docker $USER
355 sleep 2
356 sudo service docker restart
357 echo "... restarted Docker service"
358 sg docker -c "docker version" || FATAL "Docker installation failed"
359 echo "... Docker CE installation done"
360 return 0
361 }
362
363 function install_docker_compose() {
364 # installs and configures docker-compose
365 echo "Installing Docker Compose ..."
366 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
367 sudo chmod +x /usr/local/bin/docker-compose
368 echo "... Docker Compose installation done"
369 }
370
371 function install_juju() {
372 echo "Installing juju"
373 sudo snap install juju --classic
374 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
375 echo "Finished installation of juju"
376 return 0
377 }
378
379 function juju_createcontroller() {
380 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
381 # Not found created, create the controller
382 sudo usermod -a -G lxd ${USER}
383 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
384 fi
385 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
386 }
387
388 function juju_createproxy() {
389 check_install_iptables_persistent
390
391 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
392 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
393 sudo netfilter-persistent save
394 fi
395 }
396
397 function generate_docker_images() {
398 echo "Pulling and generating docker images"
399 _build_from=$COMMIT_ID
400 [ -z "$_build_from" ] && _build_from="master"
401
402 echo "OSM Docker images generated from $_build_from"
403
404 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
405 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
406 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
407 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
408
409 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
410 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
411 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
412 fi
413
414 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
415 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
416 fi
417
418 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
419 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
420 fi
421
422 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
423 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
424 fi
425
426 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
427 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
428 fi
429
430 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
431 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
432 fi
433
434 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
435 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
436 fi
437
438 if [ -n "$PULL_IMAGES" ]; then
439 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
440 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
441 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
442 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
443 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
444 fi
445
446 if [ -n "$PULL_IMAGES" ]; then
447 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
448 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
449 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
450 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
451 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
452 fi
453
454 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
455 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
456 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
457 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
458 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
459 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
460 fi
461
462 if [ -n "$PULL_IMAGES" ]; then
463 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
464 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
465 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
466 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
467 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
468 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
469 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
470 fi
471
472 if [ -n "$PULL_IMAGES" ]; then
473 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
474 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
475 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
476 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
477 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
478 fi
479
480 if [ -n "$PULL_IMAGES" ]; then
481 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
482 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
483 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
484 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
485 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
486 fi
487
488 if [ -n "$PULL_IMAGES" ]; then
489 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
490 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
491 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
492 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
493 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
494 fi
495
496 if [ -n "$PULL_IMAGES" ]; then
497 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
498 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
499 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
500 fi
501
502 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
503 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
504 fi
505
506 echo "Finished generation of docker images"
507 }
508
509 function cmp_overwrite() {
510 file1="$1"
511 file2="$2"
512 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
513 if [ -f "${file2}" ]; then
514 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
515 else
516 cp -b ${file1} ${file2}
517 fi
518 fi
519 }
520
521 function generate_docker_env_files() {
522 echo "Doing a backup of existing env files"
523 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
524 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
525 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
526 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
527 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
528 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
529 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
530 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
531 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
532
533 echo "Generating docker env files"
534 if [ -n "$KUBERNETES" ]; then
535 #Kubernetes resources
536 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
537 else
538 # Docker-compose
539 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
540 if [ -n "$INSTALL_PLA" ]; then
541 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
542 fi
543
544 # Prometheus files
545 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
546 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
547
548 # Grafana files
549 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
550 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
551 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
552 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
553 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
554
555 # Prometheus Exporters files
556 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
557 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
558 fi
559
560 # LCM
561 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
562 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
563 fi
564
565 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
566 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
567 else
568 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
569 fi
570
571 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
572 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
573 else
574 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
575 fi
576
577 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
578 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
579 else
580 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
581 fi
582
583 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
584 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
585 else
586 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
587 fi
588
589 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
590 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
591 else
592 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
593 fi
594
595 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
596 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
597 fi
598
599 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
600 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
601 fi
602
603 # RO
604 MYSQL_ROOT_PASSWORD=$(generate_secret)
605 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
606 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
607 fi
608 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
609 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
610 fi
611
612 # Keystone
613 KEYSTONE_DB_PASSWORD=$(generate_secret)
614 SERVICE_PASSWORD=$(generate_secret)
615 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
616 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
617 fi
618 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
619 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
620 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
621 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
622 fi
623
624 # NBI
625 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
626 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
627 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
628 fi
629
630 # MON
631 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
632 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
633 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
634 fi
635
636 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
637 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
638 else
639 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
640 fi
641
642 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
643 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
644 else
645 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
646 fi
647
648 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
649 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
650 else
651 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
652 fi
653
654 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
655 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
656 else
657 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
658 fi
659
660
661 # POL
662 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
663 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
664 fi
665
666 # LW-UI
667 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
668 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
669 fi
670
671 echo "Finished generation of docker env files"
672 }
673
674 function generate_osmclient_script () {
675 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
676 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
677 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
678 }
679
680 #installs kubernetes packages
681 function install_kube() {
682 sudo apt-get update && sudo apt-get install -y apt-transport-https
683 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
684 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
685 sudo apt-get update
686 echo "Installing Kubernetes Packages ..."
687 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
688 }
689
690 #initializes kubernetes control plane
691 function init_kubeadm() {
692 sudo swapoff -a
693 sudo kubeadm init --config $1
694 sleep 5
695 }
696
697 function kube_config_dir() {
698 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
699 mkdir -p $HOME/.kube
700 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
701 sudo chown $(id -u):$(id -g) $HOME/.kube/config
702 }
703
704 #deploys flannel as daemonsets
705 function deploy_cni_provider() {
706 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
707 trap 'rm -rf "${CNI_DIR}"' EXIT
708 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
709 kubectl apply -f $CNI_DIR
710 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
711 }
712
713 #creates secrets from env files which will be used by containers
714 function kube_secrets(){
715 kubectl create ns $OSM_STACK_NAME
716 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
717 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
718 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
719 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
720 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
721 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
722 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
723 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
724 }
725
726 #deploys osm pods and services
727 function deploy_osm_services() {
728 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
729 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
730 sleep 5
731 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
732 }
733
734 function deploy_osm_pla_service() {
735 # corresponding to parse_yaml
736 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
737 # corresponding to namespace_vol
738 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
739 # corresponding to deploy_osm_services
740 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
741 }
742
743 function parse_yaml() {
744 osm_services="nbi lcm ro pol mon light-ui keystone"
745 TAG=$1
746 for osm in $osm_services; do
747 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
748 done
749 }
750
751 function namespace_vol() {
752 osm_services="nbi lcm ro pol mon kafka mongo mysql"
753 for osm in $osm_services; do
754 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
755 done
756 }
757
758 function init_docker_swarm() {
759 if [ "${DEFAULT_MTU}" != "1500" ]; then
760 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
761 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
762 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
763 fi
764 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
765 return 0
766 }
767
768 function create_docker_network() {
769 echo "creating network"
770 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
771 echo "creating network DONE"
772 }
773
774 function deploy_lightweight() {
775
776 echo "Deploying lightweight build"
777 OSM_NBI_PORT=9999
778 OSM_RO_PORT=9090
779 OSM_KEYSTONE_PORT=5000
780 OSM_UI_PORT=80
781 OSM_MON_PORT=8662
782 OSM_PROM_PORT=9090
783 OSM_PROM_CADVISOR_PORT=8080
784 OSM_PROM_HOSTPORT=9091
785 OSM_GRAFANA_PORT=3000
786 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
787 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
788
789 if [ -n "$NO_HOST_PORTS" ]; then
790 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
791 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
792 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
793 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
794 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
795 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
796 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
797 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
798 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
799 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
800 else
801 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
802 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
803 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
804 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
805 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
806 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
807 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
808 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
809 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
810 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
811 fi
812 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
813 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
814 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
815 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
816 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
817 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
818 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
819 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
820 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
821
822 pushd $OSM_DOCKER_WORK_DIR
823 if [ -n "$INSTALL_PLA" ]; then
824 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
825 else
826 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
827 fi
828 popd
829
830 echo "Finished deployment of lightweight build"
831 }
832
833 function deploy_elk() {
834 echo "Pulling docker images for ELK"
835 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
836 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
837 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
838 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
839 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
840 echo "Finished pulling elk docker images"
841 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
842 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
843 remove_stack osm_elk
844 echo "Deploying ELK stack"
845 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
846 echo "Waiting for ELK stack to be up and running"
847 time=0
848 step=5
849 timelength=40
850 elk_is_up=1
851 while [ $time -le $timelength ]; do
852 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
853 elk_is_up=0
854 break
855 fi
856 sleep $step
857 time=$((time+step))
858 done
859 if [ $elk_is_up -eq 0 ]; then
860 echo "ELK is up and running. Trying to create index pattern..."
861 #Create index pattern
862 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
863 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
864 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
865 #Make it the default index
866 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
867 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
868 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
869 else
870 echo "Cannot connect to Kibana to create index pattern."
871 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
872 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
873 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
874 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
875 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
876 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
877 -d"{\"value\":\"filebeat-*\"}"'
878 fi
879 echo "Finished deployment of ELK stack"
880 return 0
881 }
882
883 function install_lightweight() {
884 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
885 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
886 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
887 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
888 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
889
890 track checkingroot
891 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
892 track noroot
893
894 if [ -n "$KUBERNETES" ]; then
895 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
896 1. Install and configure LXD
897 2. Install juju
898 3. Install docker CE
899 4. Disable swap space
900 5. Install and initialize Kubernetes
901 as pre-requirements.
902 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
903
904 else
905 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
906 fi
907 track proceed
908
909 echo "Installing lightweight build of OSM"
910 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
911 trap 'rm -rf "${LWTEMPDIR}"' EXIT
912 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
913 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
914 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
915 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
916 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
917
918 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
919 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
920 need_packages_lw="snapd"
921 echo -e "Checking required packages: $need_packages_lw"
922 dpkg -l $need_packages_lw &>/dev/null \
923 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
924 || sudo apt-get update \
925 || FATAL "failed to run apt-get update"
926 dpkg -l $need_packages_lw &>/dev/null \
927 || ! echo -e "Installing $need_packages_lw requires root privileges." \
928 || sudo apt-get install -y $need_packages_lw \
929 || FATAL "failed to install $need_packages_lw"
930 install_lxd
931 fi
932 track prereqok
933
934 [ -z "$INSTALL_NOJUJU" ] && install_juju
935 track juju_install
936
937 if [ -z "$OSM_VCA_HOST" ]; then
938 juju_createcontroller
939 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
940 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
941 fi
942 track juju_controller
943
944 if [ -z "$OSM_VCA_SECRET" ]; then
945 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
946 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
947 fi
948 if [ -z "$OSM_VCA_PUBKEY" ]; then
949 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
950 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
951 fi
952 if [ -z "$OSM_VCA_CACERT" ]; then
953 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
954 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
955 fi
956 if [ -z "$OSM_VCA_APIPROXY" ]; then
957 OSM_VCA_APIPROXY=$DEFAULT_IP
958 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
959 fi
960 juju_createproxy
961 track juju
962
963 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
964 OSM_DATABASE_COMMONKEY=$(generate_secret)
965 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
966 fi
967
968 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
969 track docker_ce
970
971 #Installs Kubernetes and deploys osm services
972 if [ -n "$KUBERNETES" ]; then
973 install_kube
974 track install_k8s
975 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
976 kube_config_dir
977 track init_k8s
978 else
979 #install_docker_compose
980 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
981 track docker_swarm
982 fi
983
984 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
985 track docker_build
986
987 generate_docker_env_files
988
989 if [ -n "$KUBERNETES" ]; then
990 if [ -n "$INSTALL_K8S_MONITOR" ]; then
991 # uninstall OSM MONITORING
992 uninstall_k8s_monitoring
993 track uninstall_k8s_monitoring
994 fi
995 #remove old namespace
996 remove_k8s_namespace $OSM_STACK_NAME
997 deploy_cni_provider
998 kube_secrets
999 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1000 namespace_vol
1001 deploy_osm_services
1002 if [ -n "$INSTALL_PLA"]; then
1003 # optional PLA install
1004 deploy_osm_pla_service
1005 fi
1006 track deploy_osm_services_k8s
1007 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1008 # install OSM MONITORING
1009 install_k8s_monitoring
1010 track install_k8s_monitoring
1011 fi
1012 else
1013 # remove old stack
1014 remove_stack $OSM_STACK_NAME
1015 create_docker_network
1016 deploy_lightweight
1017 generate_osmclient_script
1018 track docker_deploy
1019 install_prometheus_nodeexporter
1020 track nodeexporter
1021 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1022 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1023 fi
1024
1025 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1026 track osmclient
1027
1028 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1029 track end
1030 return 0
1031 }
1032
1033 function install_vimemu() {
1034 echo "\nInstalling vim-emu"
1035 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1036 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1037 # install prerequisites (OVS is a must for the emulator to work)
1038 sudo apt-get install openvswitch-switch
1039 # clone vim-emu repository (attention: branch is currently master only)
1040 echo "Cloning vim-emu repository ..."
1041 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1042 # build vim-emu docker
1043 echo "Building vim-emu Docker container..."
1044
1045 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1046 # start vim-emu container as daemon
1047 echo "Starting vim-emu Docker container 'vim-emu' ..."
1048 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1049 # in lightweight mode, the emulator needs to be attached to netOSM
1050 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1051 else
1052 # classic build mode
1053 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1054 fi
1055 echo "Waiting for 'vim-emu' container to start ..."
1056 sleep 5
1057 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1058 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1059 # print vim-emu connection info
1060 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1061 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1062 echo -e "To add the emulated VIM to OSM you should do:"
1063 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1064 }
1065
1066 function install_k8s_monitoring() {
1067 # install OSM monitoring
1068 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1069 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1070 }
1071
1072 function uninstall_k8s_monitoring() {
1073 # uninstall OSM monitoring
1074 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1075 }
1076
1077 function dump_vars(){
1078 echo "DEVELOP=$DEVELOP"
1079 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1080 echo "UNINSTALL=$UNINSTALL"
1081 echo "UPDATE=$UPDATE"
1082 echo "RECONFIGURE=$RECONFIGURE"
1083 echo "TEST_INSTALLER=$TEST_INSTALLER"
1084 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1085 echo "INSTALL_PLA=$INSTALL_PLA"
1086 echo "INSTALL_LXD=$INSTALL_LXD"
1087 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1088 echo "INSTALL_ONLY=$INSTALL_ONLY"
1089 echo "INSTALL_ELK=$INSTALL_ELK"
1090 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1091 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1092 echo "TO_REBUILD=$TO_REBUILD"
1093 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1094 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1095 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1096 echo "RELEASE=$RELEASE"
1097 echo "REPOSITORY=$REPOSITORY"
1098 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1099 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1100 echo "OSM_DEVOPS=$OSM_DEVOPS"
1101 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1102 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1103 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1104 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1105 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1106 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1107 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1108 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1109 echo "DOCKER_USER=$DOCKER_USER"
1110 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1111 echo "PULL_IMAGES=$PULL_IMAGES"
1112 echo "KUBERNETES=$KUBERNETES"
1113 echo "SHOWOPTS=$SHOWOPTS"
1114 echo "Install from specific refspec (-b): $COMMIT_ID"
1115 }
1116
1117 function track(){
1118 ctime=`date +%s`
1119 duration=$((ctime - SESSION_ID))
1120 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1121 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1122 event_name="bin"
1123 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1124 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1125 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1126 event_name="${event_name}_$1"
1127 url="${url}&event=${event_name}&ce_duration=${duration}"
1128 wget -q -O /dev/null $url
1129 }
1130
1131 UNINSTALL=""
1132 DEVELOP=""
1133 UPDATE=""
1134 RECONFIGURE=""
1135 TEST_INSTALLER=""
1136 INSTALL_LXD=""
1137 SHOWOPTS=""
1138 COMMIT_ID=""
1139 ASSUME_YES=""
1140 INSTALL_FROM_SOURCE=""
1141 RELEASE="ReleaseSEVEN"
1142 REPOSITORY="stable"
1143 INSTALL_VIMEMU=""
1144 INSTALL_PLA=""
1145 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1146 LXD_REPOSITORY_PATH=""
1147 INSTALL_LIGHTWEIGHT="y"
1148 INSTALL_ONLY=""
1149 INSTALL_ELK=""
1150 TO_REBUILD=""
1151 INSTALL_NOLXD=""
1152 INSTALL_NODOCKER=""
1153 INSTALL_NOJUJU=""
1154 KUBERNETES=""
1155 INSTALL_K8S_MONITOR=""
1156 INSTALL_NOHOSTCLIENT=""
1157 SESSION_ID=`date +%s`
1158 OSM_DEVOPS=
1159 OSM_VCA_HOST=
1160 OSM_VCA_SECRET=
1161 OSM_VCA_PUBKEY=
1162 OSM_STACK_NAME=osm
1163 NO_HOST_PORTS=""
1164 DOCKER_NOBUILD=""
1165 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1166 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1167 WORKDIR_SUDO=sudo
1168 OSM_WORK_DIR="/etc/osm"
1169 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1170 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1171 OSM_HOST_VOL="/var/lib/osm"
1172 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1173 OSM_DOCKER_TAG=latest
1174 DOCKER_USER=opensourcemano
1175 PULL_IMAGES="y"
1176 KAFKA_TAG=2.11-1.0.2
1177 PROMETHEUS_TAG=v2.4.3
1178 GRAFANA_TAG=latest
1179 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1180 PROMETHEUS_CADVISOR_TAG=latest
1181 KEYSTONEDB_TAG=10
1182 OSM_DATABASE_COMMONKEY=
1183 ELASTIC_VERSION=6.4.2
1184 ELASTIC_CURATOR_VERSION=5.5.4
1185 POD_NETWORK_CIDR=10.244.0.0/16
1186 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1187 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1188
1189 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o; do
1190 case "${o}" in
1191 b)
1192 COMMIT_ID=${OPTARG}
1193 PULL_IMAGES=""
1194 ;;
1195 r)
1196 REPOSITORY="${OPTARG}"
1197 REPO_ARGS+=(-r "$REPOSITORY")
1198 ;;
1199 c)
1200 [ "${OPTARG}" == "swarm" ] && continue
1201 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1202 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1203 usage && exit 1
1204 ;;
1205 k)
1206 REPOSITORY_KEY="${OPTARG}"
1207 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1208 ;;
1209 u)
1210 REPOSITORY_BASE="${OPTARG}"
1211 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1212 ;;
1213 R)
1214 RELEASE="${OPTARG}"
1215 REPO_ARGS+=(-R "$RELEASE")
1216 ;;
1217 D)
1218 OSM_DEVOPS="${OPTARG}"
1219 ;;
1220 o)
1221 INSTALL_ONLY="y"
1222 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1223 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1224 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1225 ;;
1226 m)
1227 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1228 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1229 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1230 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1231 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1232 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1233 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1234 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1235 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1236 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1237 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1238 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1239 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1240 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1241 ;;
1242 H)
1243 OSM_VCA_HOST="${OPTARG}"
1244 ;;
1245 S)
1246 OSM_VCA_SECRET="${OPTARG}"
1247 ;;
1248 s)
1249 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1250 ;;
1251 w)
1252 # when specifying workdir, do not use sudo for access
1253 WORKDIR_SUDO=
1254 OSM_WORK_DIR="${OPTARG}"
1255 ;;
1256 t)
1257 OSM_DOCKER_TAG="${OPTARG}"
1258 ;;
1259 U)
1260 DOCKER_USER="${OPTARG}"
1261 ;;
1262 P)
1263 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1264 ;;
1265 A)
1266 OSM_VCA_APIPROXY="${OPTARG}"
1267 ;;
1268 -)
1269 [ "${OPTARG}" == "help" ] && usage && exit 0
1270 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1271 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1272 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1273 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1274 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1275 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1276 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1277 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1278 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1279 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1280 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1281 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1282 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1283 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1284 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1285 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1286 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1287 [ "${OPTARG}" == "pullimages" ] && continue
1288 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1289 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1290 [ "${OPTARG}" == "bundle" ] && continue
1291 [ "${OPTARG}" == "kubeconfig" ] && continue
1292 [ "${OPTARG}" == "lxdendpoint" ] && continue
1293 [ "${OPTARG}" == "lxdcert" ] && continue
1294 [ "${OPTARG}" == "microstack" ] && continue
1295 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1296 echo -e "Invalid option: '--$OPTARG'\n" >&2
1297 usage && exit 1
1298 ;;
1299 :)
1300 echo "Option -$OPTARG requires an argument" >&2
1301 usage && exit 1
1302 ;;
1303 \?)
1304 echo -e "Invalid option: '-$OPTARG'\n" >&2
1305 usage && exit 1
1306 ;;
1307 h)
1308 usage && exit 0
1309 ;;
1310 y)
1311 ASSUME_YES="y"
1312 ;;
1313 *)
1314 usage && exit 1
1315 ;;
1316 esac
1317 done
1318
1319 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1320 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1321
1322 if [ -n "$SHOWOPTS" ]; then
1323 dump_vars
1324 exit 0
1325 fi
1326
1327 if [ -n "$CHARMED" ]; then
1328 if [ -n "$UNINSTALL" ]; then
1329 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1330 else
1331 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1332 fi
1333
1334 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1335 echo
1336 echo "1. Get the NBI IP with the following command:"
1337 echo
1338 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1339 echo
1340 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1341 echo
1342 echo "export OSM_HOSTNAME=<NBI-IP>"
1343 echo
1344 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1345 echo
1346 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1347 echo
1348 echo "DONE"
1349
1350 exit 0
1351 fi
1352
1353 # if develop, we force master
1354 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1355
1356 need_packages="git jq wget curl tar"
1357 echo -e "Checking required packages: $need_packages"
1358 dpkg -l $need_packages &>/dev/null \
1359 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1360 || sudo apt-get update \
1361 || FATAL "failed to run apt-get update"
1362 dpkg -l $need_packages &>/dev/null \
1363 || ! echo -e "Installing $need_packages requires root privileges." \
1364 || sudo apt-get install -y $need_packages \
1365 || FATAL "failed to install $need_packages"
1366
1367 if [ -z "$OSM_DEVOPS" ]; then
1368 if [ -n "$TEST_INSTALLER" ]; then
1369 echo -e "\nUsing local devops repo for OSM installation"
1370 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1371 else
1372 echo -e "\nCreating temporary dir for OSM installation"
1373 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1374 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1375
1376 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1377
1378 if [ -z "$COMMIT_ID" ]; then
1379 echo -e "\nGuessing the current stable release"
1380 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1381 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1382
1383 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1384 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1385 else
1386 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1387 fi
1388 git -C $OSM_DEVOPS checkout $COMMIT_ID
1389 fi
1390 fi
1391
1392 . $OSM_DEVOPS/common/all_funcs
1393
1394 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1395 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1396 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1397 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1398 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1399 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1400
1401 #Installation starts here
1402 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1403 track start
1404
1405 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1406 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1407 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1408 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1409 fi
1410
1411 echo -e "Checking required packages: lxd"
1412 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1413 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1414
1415 # use local devops for containers
1416 export OSM_USE_LOCAL_DEVOPS=true
1417
1418 #Install osmclient
1419
1420 #Install vim-emu (optional)
1421 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1422
1423 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1424 track end
1425 echo -e "\nDONE"
1426