f4c06cf3ba98b449fe53414990e34c3912e44d50
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
47 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
48 echo -e " --nojuju: do not juju, assumes already installed"
49 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
50 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
51 echo -e " --nohostclient: do not install the osmclient"
52 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
53 echo -e " --source: install OSM from source code using the latest stable tag"
54 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
55 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
56 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
57 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
58 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
59 echo -e " --showopts: print chosen options and exit (only for debugging)"
60 echo -e " -y: do not prompt for confirmation, assumes yes"
61 echo -e " -h / --help: print this help"
62 echo -e " --charmed: install OSM with charms"
63 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
64 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
65 echo -e " --lxdendpoint <lxd endpoint ip>: Specify with which LXD to deploy OSM with charms (--charmed option)"
66 echo -e " --lxdcert <lxd cert path>: Specify external LXD cert to deploy OSM with charms (--charmed option)"
67 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
68
69 }
70
71 # takes a juju/accounts.yaml file and returns the password specific
72 # for a controller. I wrote this using only bash tools to minimize
73 # additions of other packages
74 function parse_juju_password {
75 password_file="${HOME}/.local/share/juju/accounts.yaml"
76 local controller_name=$1
77 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
78 sed -ne "s|^\($s\):|\1|" \
79 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
80 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
81 awk -F$fs -v controller=$controller_name '{
82 indent = length($1)/2;
83 vname[indent] = $2;
84 for (i in vname) {if (i > indent) {delete vname[i]}}
85 if (length($3) > 0) {
86 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
87 if (match(vn,controller) && match($2,"password")) {
88 printf("%s",$3);
89 }
90 }
91 }'
92 }
93
94 function generate_secret() {
95 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
96 }
97
98 function remove_volumes() {
99 if [ -n "$KUBERNETES" ]; then
100 k8_volume=$1
101 echo "Removing ${k8_volume}"
102 $WORKDIR_SUDO rm -rf ${k8_volume}
103 else
104 stack=$1
105 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
106 for volume in $volumes; do
107 sg docker -c "docker volume rm ${stack}_${volume}"
108 done
109 fi
110 }
111
112 function remove_network() {
113 stack=$1
114 sg docker -c "docker network rm net${stack}"
115 }
116
117 function remove_iptables() {
118 stack=$1
119 if [ -z "$OSM_VCA_HOST" ]; then
120 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
121 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
122 fi
123
124 if [ -z "$DEFAULT_IP" ]; then
125 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
126 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
127 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
128 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
129 fi
130
131 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
132 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
133 sudo netfilter-persistent save
134 fi
135 }
136
137 function remove_stack() {
138 stack=$1
139 if sg docker -c "docker stack ps ${stack}" ; then
140 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
141 COUNTER=0
142 result=1
143 while [ ${COUNTER} -lt 30 ]; do
144 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
145 #echo "Dockers running: $result"
146 if [ "${result}" == "0" ]; then
147 break
148 fi
149 let COUNTER=COUNTER+1
150 sleep 1
151 done
152 if [ "${result}" == "0" ]; then
153 echo "All dockers of the stack ${stack} were removed"
154 else
155 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
156 fi
157 sleep 5
158 fi
159 }
160
161 #removes osm deployments and services
162 function remove_k8s_namespace() {
163 kubectl delete ns $1
164 }
165
166 #Uninstall lightweight OSM: remove dockers
167 function uninstall_lightweight() {
168 if [ -n "$INSTALL_ONLY" ]; then
169 if [ -n "$INSTALL_ELK" ]; then
170 echo -e "\nUninstalling OSM ELK stack"
171 remove_stack osm_elk
172 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
173 fi
174 else
175 echo -e "\nUninstalling OSM"
176 if [ -n "$KUBERNETES" ]; then
177 if [ -n "$INSTALL_K8S_MONITOR" ]; then
178 # uninstall OSM MONITORING
179 uninstall_k8s_monitoring
180 fi
181 remove_k8s_namespace $OSM_STACK_NAME
182 else
183
184 remove_stack $OSM_STACK_NAME
185 remove_stack osm_elk
186 fi
187 echo "Now osm docker images and volumes will be deleted"
188 newgrp docker << EONG
189 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
190 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
191 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
192 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
196 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
197 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
198 EONG
199
200 if [ -n "$KUBERNETES" ]; then
201 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
202 remove_volumes $OSM_NAMESPACE_VOL
203 else
204 remove_volumes $OSM_STACK_NAME
205 remove_network $OSM_STACK_NAME
206 fi
207 remove_iptables $OSM_STACK_NAME
208 echo "Removing $OSM_DOCKER_WORK_DIR"
209 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
210 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
211 fi
212 echo "Some docker images will be kept in case they are used by other docker stacks"
213 echo "To remove them, just run 'docker image prune' in a terminal"
214 return 0
215 }
216
217 #Safe unattended install of iptables-persistent
218 function check_install_iptables_persistent(){
219 echo -e "\nChecking required packages: iptables-persistent"
220 if dpkg -l iptables-persistent &>/dev/null; then
221 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
222 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
223 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
224 sudo apt-get -yq install iptables-persistent
225 fi
226 }
227
228 #Configure NAT rules, based on the current IP addresses of containers
229 function nat(){
230 check_install_iptables_persistent
231
232 echo -e "\nConfiguring NAT rules"
233 echo -e " Required root privileges"
234 sudo $OSM_DEVOPS/installers/nat_osm
235 }
236
237 function FATAL(){
238 echo "FATAL error: Cannot install OSM due to \"$1\""
239 exit 1
240 }
241
242 function install_lxd() {
243 # Apply sysctl production values for optimal performance
244 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
245 sudo sysctl --system
246
247 # Install LXD snap
248 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
249 sudo snap install lxd --channel=3.0/stable
250
251 # Configure LXD
252 sudo usermod -a -G lxd `whoami`
253 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sg lxd -c "lxd init --preseed"
254 sg lxd -c "lxd waitready"
255 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
256 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
257 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
258 #sudo systemctl stop lxd-bridge
259 #sudo systemctl --system daemon-reload
260 #sudo systemctl enable lxd-bridge
261 #sudo systemctl start lxd-bridge
262 }
263
264 function ask_user(){
265 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
266 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
267 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
268 read -e -p "$1" USER_CONFIRMATION
269 while true ; do
270 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
271 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
272 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
273 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
274 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
275 done
276 }
277
278 function install_osmclient(){
279 CLIENT_RELEASE=${RELEASE#"-R "}
280 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
281 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
282 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
283 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
284 curl $key_location | sudo apt-key add -
285 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
286 sudo apt-get update
287 sudo apt-get install -y python3-pip
288 sudo -H LC_ALL=C python3 -m pip install -U pip
289 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
290 sudo apt-get install -y python3-osm-im python3-osmclient
291 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
292 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
293 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
294 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
295 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
296 echo -e "\nOSM client installed"
297 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
298 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
299 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
300 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
301 else
302 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
303 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
304 echo " export OSM_HOSTNAME=<OSM_host>"
305 fi
306 return 0
307 }
308
309 function install_prometheus_nodeexporter(){
310 if (systemctl -q is-active node_exporter)
311 then
312 echo "Node Exporter is already running."
313 else
314 echo "Node Exporter is not active, installing..."
315 if getent passwd node_exporter > /dev/null 2>&1; then
316 echo "node_exporter user exists"
317 else
318 echo "Creating user node_exporter"
319 sudo useradd --no-create-home --shell /bin/false node_exporter
320 fi
321 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
322 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
323 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
324 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
325 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
326 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
327 sudo systemctl daemon-reload
328 sudo systemctl restart node_exporter
329 sudo systemctl enable node_exporter
330 echo "Node Exporter has been activated in this host."
331 fi
332 return 0
333 }
334
335 function uninstall_prometheus_nodeexporter(){
336 sudo systemctl stop node_exporter
337 sudo systemctl disable node_exporter
338 sudo rm /etc/systemd/system/node_exporter.service
339 sudo systemctl daemon-reload
340 sudo userdel node_exporter
341 sudo rm /usr/local/bin/node_exporter
342 return 0
343 }
344
345 function install_docker_ce() {
346 # installs and configures Docker CE
347 echo "Installing Docker CE ..."
348 sudo apt-get -qq update
349 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
350 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
351 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
352 sudo apt-get -qq update
353 sudo apt-get install -y docker-ce
354 echo "Adding user to group 'docker'"
355 sudo groupadd -f docker
356 sudo usermod -aG docker $USER
357 sleep 2
358 sudo service docker restart
359 echo "... restarted Docker service"
360 sg docker -c "docker version" || FATAL "Docker installation failed"
361 echo "... Docker CE installation done"
362 return 0
363 }
364
365 function install_docker_compose() {
366 # installs and configures docker-compose
367 echo "Installing Docker Compose ..."
368 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
369 sudo chmod +x /usr/local/bin/docker-compose
370 echo "... Docker Compose installation done"
371 }
372
373 function install_juju() {
374 echo "Installing juju"
375 sudo snap install juju --classic
376 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
377 echo "Finished installation of juju"
378 return 0
379 }
380
381 function juju_createcontroller() {
382 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
383 # Not found created, create the controller
384 [ -n "$LXD_CLOUD_FILE" ] && OSM_VCA_CLOUDNAME="lxd-cloud"
385 sudo usermod -a -G lxd ${USER}
386 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
387 fi
388 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
389 }
390
391 function juju_createproxy() {
392 check_install_iptables_persistent
393
394 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
395 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
396 sudo netfilter-persistent save
397 fi
398 }
399
400 function generate_docker_images() {
401 echo "Pulling and generating docker images"
402 _build_from=$COMMIT_ID
403 [ -z "$_build_from" ] && _build_from="master"
404
405 echo "OSM Docker images generated from $_build_from"
406
407 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
408 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
409 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
410 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
411
412 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
413 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
414 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
415 fi
416
417 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
418 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
419 fi
420
421 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
422 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
423 fi
424
425 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
426 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
427 fi
428
429 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
430 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
431 fi
432
433 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
434 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
435 fi
436
437 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
438 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
439 fi
440
441 if [ -n "$PULL_IMAGES" ]; then
442 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
443 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
444 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
445 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
446 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
447 fi
448
449 if [ -n "$PULL_IMAGES" ]; then
450 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
451 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
452 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
453 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
454 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
455 fi
456
457 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
458 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
459 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
460 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
461 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
462 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
463 fi
464
465 if [ -n "$PULL_IMAGES" ]; then
466 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
467 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
468 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
469 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
470 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
471 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
472 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
473 fi
474
475 if [ -n "$PULL_IMAGES" ]; then
476 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
477 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
478 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
479 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
480 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
481 fi
482
483 if [ -n "$PULL_IMAGES" ]; then
484 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
485 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
486 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
487 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
488 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
489 fi
490
491 if [ -n "$PULL_IMAGES" ]; then
492 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
493 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
494 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
495 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
496 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
497 fi
498
499 if [ -n "$PULL_IMAGES" ]; then
500 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
501 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
502 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
503 fi
504
505 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
506 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
507 fi
508
509 echo "Finished generation of docker images"
510 }
511
512 function cmp_overwrite() {
513 file1="$1"
514 file2="$2"
515 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
516 if [ -f "${file2}" ]; then
517 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
518 else
519 cp -b ${file1} ${file2}
520 fi
521 fi
522 }
523
524 function generate_docker_env_files() {
525 echo "Doing a backup of existing env files"
526 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
527 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
528 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
529 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
530 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
531 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
532 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
533 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
534 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
535
536 echo "Generating docker env files"
537 if [ -n "$KUBERNETES" ]; then
538 #Kubernetes resources
539 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
540 else
541 # Docker-compose
542 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
543 if [ -n "$INSTALL_PLA" ]; then
544 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
545 fi
546
547 # Prometheus files
548 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
549 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
550
551 # Grafana files
552 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
553 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
554 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
555 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
556 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
557
558 # Prometheus Exporters files
559 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
560 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
561 fi
562
563 # LCM
564 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
565 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
566 fi
567
568 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
569 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
570 else
571 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
572 fi
573
574 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
575 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
576 else
577 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
578 fi
579
580 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
581 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
582 else
583 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
584 fi
585
586 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
587 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
588 else
589 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
590 fi
591
592 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
593 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
594 else
595 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
596 fi
597
598 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
599 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
600 fi
601
602 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
603 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
604 fi
605
606 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
607 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
608 else
609 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
610 fi
611
612 # RO
613 MYSQL_ROOT_PASSWORD=$(generate_secret)
614 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
615 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
616 fi
617 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
618 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
619 fi
620
621 # Keystone
622 KEYSTONE_DB_PASSWORD=$(generate_secret)
623 SERVICE_PASSWORD=$(generate_secret)
624 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
625 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
626 fi
627 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
628 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
629 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
630 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
631 fi
632
633 # NBI
634 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
635 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
636 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
637 fi
638
639 # MON
640 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
641 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
642 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
643 fi
644
645 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
646 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
647 else
648 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
649 fi
650
651 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
652 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
653 else
654 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
655 fi
656
657 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
658 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
659 else
660 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
661 fi
662
663 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
664 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
665 else
666 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
667 fi
668
669
670 # POL
671 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
672 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
673 fi
674
675 # LW-UI
676 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
677 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
678 fi
679
680 echo "Finished generation of docker env files"
681 }
682
683 function generate_osmclient_script () {
684 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
685 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
686 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
687 }
688
689 #installs kubernetes packages
690 function install_kube() {
691 sudo apt-get update && sudo apt-get install -y apt-transport-https
692 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
693 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
694 sudo apt-get update
695 echo "Installing Kubernetes Packages ..."
696 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
697 }
698
699 #initializes kubernetes control plane
700 function init_kubeadm() {
701 sudo swapoff -a
702 sudo kubeadm init --config $1
703 sleep 5
704 }
705
706 function kube_config_dir() {
707 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
708 mkdir -p $HOME/.kube
709 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
710 sudo chown $(id -u):$(id -g) $HOME/.kube/config
711 }
712
713 #deploys flannel as daemonsets
714 function deploy_cni_provider() {
715 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
716 trap 'rm -rf "${CNI_DIR}"' EXIT
717 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
718 kubectl apply -f $CNI_DIR
719 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
720 }
721
722 #creates secrets from env files which will be used by containers
723 function kube_secrets(){
724 kubectl create ns $OSM_STACK_NAME
725 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
726 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
727 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
728 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
729 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
730 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
731 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
732 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
733 }
734
735 #deploys osm pods and services
736 function deploy_osm_services() {
737 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
738 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
739 sleep 5
740 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
741 }
742
743 function deploy_osm_pla_service() {
744 # corresponding to parse_yaml
745 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
746 # corresponding to namespace_vol
747 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
748 # corresponding to deploy_osm_services
749 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
750 }
751
752 function parse_yaml() {
753 osm_services="nbi lcm ro pol mon light-ui keystone"
754 TAG=$1
755 for osm in $osm_services; do
756 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
757 done
758 }
759
760 function namespace_vol() {
761 osm_services="nbi lcm ro pol mon kafka mongo mysql"
762 for osm in $osm_services; do
763 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
764 done
765 }
766
767 function init_docker_swarm() {
768 if [ "${DEFAULT_MTU}" != "1500" ]; then
769 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
770 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
771 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
772 fi
773 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
774 return 0
775 }
776
777 function create_docker_network() {
778 echo "creating network"
779 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
780 echo "creating network DONE"
781 }
782
783 function deploy_lightweight() {
784
785 echo "Deploying lightweight build"
786 OSM_NBI_PORT=9999
787 OSM_RO_PORT=9090
788 OSM_KEYSTONE_PORT=5000
789 OSM_UI_PORT=80
790 OSM_MON_PORT=8662
791 OSM_PROM_PORT=9090
792 OSM_PROM_CADVISOR_PORT=8080
793 OSM_PROM_HOSTPORT=9091
794 OSM_GRAFANA_PORT=3000
795 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
796 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
797
798 if [ -n "$NO_HOST_PORTS" ]; then
799 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
800 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
801 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
802 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
803 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
804 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
805 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
806 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
807 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
808 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
809 else
810 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
811 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
812 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
813 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
814 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
815 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
816 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
817 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
818 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
819 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
820 fi
821 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
822 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
823 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
824 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
825 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
826 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
827 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
828 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
829 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
830
831 pushd $OSM_DOCKER_WORK_DIR
832 if [ -n "$INSTALL_PLA" ]; then
833 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
834 else
835 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
836 fi
837 popd
838
839 echo "Finished deployment of lightweight build"
840 }
841
842 function deploy_elk() {
843 echo "Pulling docker images for ELK"
844 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
845 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
846 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
847 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
848 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
849 echo "Finished pulling elk docker images"
850 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
851 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
852 remove_stack osm_elk
853 echo "Deploying ELK stack"
854 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
855 echo "Waiting for ELK stack to be up and running"
856 time=0
857 step=5
858 timelength=40
859 elk_is_up=1
860 while [ $time -le $timelength ]; do
861 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
862 elk_is_up=0
863 break
864 fi
865 sleep $step
866 time=$((time+step))
867 done
868 if [ $elk_is_up -eq 0 ]; then
869 echo "ELK is up and running. Trying to create index pattern..."
870 #Create index pattern
871 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
872 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
873 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
874 #Make it the default index
875 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
876 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
877 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
878 else
879 echo "Cannot connect to Kibana to create index pattern."
880 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
881 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
882 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
883 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
884 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
885 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
886 -d"{\"value\":\"filebeat-*\"}"'
887 fi
888 echo "Finished deployment of ELK stack"
889 return 0
890 }
891
892 function install_lightweight() {
893 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
894 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
895 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
896 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
897 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
898
899 track checkingroot
900 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
901 track noroot
902
903 if [ -n "$KUBERNETES" ]; then
904 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
905 1. Install and configure LXD
906 2. Install juju
907 3. Install docker CE
908 4. Disable swap space
909 5. Install and initialize Kubernetes
910 as pre-requirements.
911 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
912
913 else
914 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
915 fi
916 track proceed
917
918 echo "Installing lightweight build of OSM"
919 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
920 trap 'rm -rf "${LWTEMPDIR}"' EXIT
921 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
922 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
923 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
924 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
925 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
926
927 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
928 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
929 need_packages_lw="snapd"
930 echo -e "Checking required packages: $need_packages_lw"
931 dpkg -l $need_packages_lw &>/dev/null \
932 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
933 || sudo apt-get update \
934 || FATAL "failed to run apt-get update"
935 dpkg -l $need_packages_lw &>/dev/null \
936 || ! echo -e "Installing $need_packages_lw requires root privileges." \
937 || sudo apt-get install -y $need_packages_lw \
938 || FATAL "failed to install $need_packages_lw"
939 install_lxd
940 fi
941
942 track prereqok
943
944 [ -z "$INSTALL_NOJUJU" ] && install_juju
945 track juju_install
946
947 if [ -n "$LXD_CLOUD_FILE" ]; then
948 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
949 juju add-cloud lxd-cloud $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud --client -f $LXD_CLOUD_FILE
950 juju add-credential lxd-cloud -f $LXD_CRED_FILE || juju update-credential lxd-cloud lxd-cloud-creds -f $LXD_CRED_FILE
951 fi
952
953 if [ -z "$OSM_VCA_HOST" ]; then
954 juju_createcontroller
955 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
956 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
957 fi
958 track juju_controller
959
960 if [ -z "$OSM_VCA_SECRET" ]; then
961 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
962 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
963 fi
964 if [ -z "$OSM_VCA_PUBKEY" ]; then
965 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
966 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
967 fi
968 if [ -z "$OSM_VCA_CACERT" ]; then
969 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
970 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
971 fi
972 if [ -z "$OSM_VCA_APIPROXY" ]; then
973 OSM_VCA_APIPROXY=$DEFAULT_IP
974 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
975 fi
976 juju_createproxy
977 track juju
978
979 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
980 OSM_DATABASE_COMMONKEY=$(generate_secret)
981 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
982 fi
983
984 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
985 track docker_ce
986
987 #Installs Kubernetes and deploys osm services
988 if [ -n "$KUBERNETES" ]; then
989 install_kube
990 track install_k8s
991 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
992 kube_config_dir
993 track init_k8s
994 else
995 #install_docker_compose
996 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
997 track docker_swarm
998 fi
999
1000 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1001 track docker_build
1002
1003 generate_docker_env_files
1004
1005 if [ -n "$KUBERNETES" ]; then
1006 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1007 # uninstall OSM MONITORING
1008 uninstall_k8s_monitoring
1009 track uninstall_k8s_monitoring
1010 fi
1011 #remove old namespace
1012 remove_k8s_namespace $OSM_STACK_NAME
1013 deploy_cni_provider
1014 kube_secrets
1015 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1016 namespace_vol
1017 deploy_osm_services
1018 if [ -n "$INSTALL_PLA"]; then
1019 # optional PLA install
1020 deploy_osm_pla_service
1021 fi
1022 track deploy_osm_services_k8s
1023 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1024 # install OSM MONITORING
1025 install_k8s_monitoring
1026 track install_k8s_monitoring
1027 fi
1028 else
1029 # remove old stack
1030 remove_stack $OSM_STACK_NAME
1031 create_docker_network
1032 deploy_lightweight
1033 generate_osmclient_script
1034 track docker_deploy
1035 install_prometheus_nodeexporter
1036 track nodeexporter
1037 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1038 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1039 fi
1040
1041 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1042 track osmclient
1043
1044 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1045 track end
1046 return 0
1047 }
1048
1049 function install_vimemu() {
1050 echo "\nInstalling vim-emu"
1051 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1052 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1053 # install prerequisites (OVS is a must for the emulator to work)
1054 sudo apt-get install openvswitch-switch
1055 # clone vim-emu repository (attention: branch is currently master only)
1056 echo "Cloning vim-emu repository ..."
1057 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1058 # build vim-emu docker
1059 echo "Building vim-emu Docker container..."
1060
1061 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1062 # start vim-emu container as daemon
1063 echo "Starting vim-emu Docker container 'vim-emu' ..."
1064 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1065 # in lightweight mode, the emulator needs to be attached to netOSM
1066 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1067 else
1068 # classic build mode
1069 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1070 fi
1071 echo "Waiting for 'vim-emu' container to start ..."
1072 sleep 5
1073 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1074 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1075 # print vim-emu connection info
1076 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1077 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1078 echo -e "To add the emulated VIM to OSM you should do:"
1079 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1080 }
1081
1082 function install_k8s_monitoring() {
1083 # install OSM monitoring
1084 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1085 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1086 }
1087
1088 function uninstall_k8s_monitoring() {
1089 # uninstall OSM monitoring
1090 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1091 }
1092
1093 function dump_vars(){
1094 echo "DEVELOP=$DEVELOP"
1095 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1096 echo "UNINSTALL=$UNINSTALL"
1097 echo "UPDATE=$UPDATE"
1098 echo "RECONFIGURE=$RECONFIGURE"
1099 echo "TEST_INSTALLER=$TEST_INSTALLER"
1100 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1101 echo "INSTALL_PLA=$INSTALL_PLA"
1102 echo "INSTALL_LXD=$INSTALL_LXD"
1103 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1104 echo "INSTALL_ONLY=$INSTALL_ONLY"
1105 echo "INSTALL_ELK=$INSTALL_ELK"
1106 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1107 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1108 echo "TO_REBUILD=$TO_REBUILD"
1109 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1110 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1111 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1112 echo "RELEASE=$RELEASE"
1113 echo "REPOSITORY=$REPOSITORY"
1114 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1115 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1116 echo "OSM_DEVOPS=$OSM_DEVOPS"
1117 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1118 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1119 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1120 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1121 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1122 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1123 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1124 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1125 echo "DOCKER_USER=$DOCKER_USER"
1126 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1127 echo "PULL_IMAGES=$PULL_IMAGES"
1128 echo "KUBERNETES=$KUBERNETES"
1129 echo "SHOWOPTS=$SHOWOPTS"
1130 echo "Install from specific refspec (-b): $COMMIT_ID"
1131 }
1132
1133 function track(){
1134 ctime=`date +%s`
1135 duration=$((ctime - SESSION_ID))
1136 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1137 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1138 event_name="bin"
1139 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1140 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1141 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1142 event_name="${event_name}_$1"
1143 url="${url}&event=${event_name}&ce_duration=${duration}"
1144 wget -q -O /dev/null $url
1145 }
1146
1147 UNINSTALL=""
1148 DEVELOP=""
1149 UPDATE=""
1150 RECONFIGURE=""
1151 TEST_INSTALLER=""
1152 INSTALL_LXD=""
1153 SHOWOPTS=""
1154 COMMIT_ID=""
1155 ASSUME_YES=""
1156 INSTALL_FROM_SOURCE=""
1157 RELEASE="ReleaseSEVEN"
1158 REPOSITORY="stable"
1159 INSTALL_VIMEMU=""
1160 INSTALL_PLA=""
1161 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1162 LXD_REPOSITORY_PATH=""
1163 INSTALL_LIGHTWEIGHT="y"
1164 INSTALL_ONLY=""
1165 INSTALL_ELK=""
1166 TO_REBUILD=""
1167 INSTALL_NOLXD=""
1168 INSTALL_NODOCKER=""
1169 INSTALL_NOJUJU=""
1170 KUBERNETES=""
1171 INSTALL_K8S_MONITOR=""
1172 INSTALL_NOHOSTCLIENT=""
1173 SESSION_ID=`date +%s`
1174 OSM_DEVOPS=
1175 OSM_VCA_HOST=
1176 OSM_VCA_SECRET=
1177 OSM_VCA_PUBKEY=
1178 OSM_VCA_CLOUDNAME="localhost"
1179 OSM_STACK_NAME=osm
1180 NO_HOST_PORTS=""
1181 DOCKER_NOBUILD=""
1182 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1183 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1184 WORKDIR_SUDO=sudo
1185 OSM_WORK_DIR="/etc/osm"
1186 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1187 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1188 OSM_HOST_VOL="/var/lib/osm"
1189 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1190 OSM_DOCKER_TAG=latest
1191 DOCKER_USER=opensourcemano
1192 PULL_IMAGES="y"
1193 KAFKA_TAG=2.11-1.0.2
1194 PROMETHEUS_TAG=v2.4.3
1195 GRAFANA_TAG=latest
1196 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1197 PROMETHEUS_CADVISOR_TAG=latest
1198 KEYSTONEDB_TAG=10
1199 OSM_DATABASE_COMMONKEY=
1200 ELASTIC_VERSION=6.4.2
1201 ELASTIC_CURATOR_VERSION=5.5.4
1202 POD_NETWORK_CIDR=10.244.0.0/16
1203 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1204 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1205
1206 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:-: hy" o; do
1207 case "${o}" in
1208 b)
1209 COMMIT_ID=${OPTARG}
1210 PULL_IMAGES=""
1211 ;;
1212 r)
1213 REPOSITORY="${OPTARG}"
1214 REPO_ARGS+=(-r "$REPOSITORY")
1215 ;;
1216 c)
1217 [ "${OPTARG}" == "swarm" ] && continue
1218 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1219 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1220 usage && exit 1
1221 ;;
1222 k)
1223 REPOSITORY_KEY="${OPTARG}"
1224 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1225 ;;
1226 u)
1227 REPOSITORY_BASE="${OPTARG}"
1228 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1229 ;;
1230 R)
1231 RELEASE="${OPTARG}"
1232 REPO_ARGS+=(-R "$RELEASE")
1233 ;;
1234 D)
1235 OSM_DEVOPS="${OPTARG}"
1236 ;;
1237 o)
1238 INSTALL_ONLY="y"
1239 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1240 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1241 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1242 ;;
1243 m)
1244 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1245 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1246 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1247 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1248 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1249 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1250 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1251 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1252 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1253 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1254 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1255 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1256 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1257 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1258 ;;
1259 H)
1260 OSM_VCA_HOST="${OPTARG}"
1261 ;;
1262 S)
1263 OSM_VCA_SECRET="${OPTARG}"
1264 ;;
1265 s)
1266 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1267 ;;
1268 w)
1269 # when specifying workdir, do not use sudo for access
1270 WORKDIR_SUDO=
1271 OSM_WORK_DIR="${OPTARG}"
1272 ;;
1273 t)
1274 OSM_DOCKER_TAG="${OPTARG}"
1275 ;;
1276 U)
1277 DOCKER_USER="${OPTARG}"
1278 ;;
1279 P)
1280 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1281 ;;
1282 A)
1283 OSM_VCA_APIPROXY="${OPTARG}"
1284 ;;
1285 l)
1286 LXD_CLOUD_FILE="${OPTARG}"
1287 ;;
1288 L)
1289 LXD_CRED_FILE="${OPTARG}"
1290 ;;
1291 -)
1292 [ "${OPTARG}" == "help" ] && usage && exit 0
1293 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1294 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1295 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1296 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1297 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1298 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1299 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1300 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1301 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1302 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1303 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1304 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1305 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1306 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1307 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1308 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1309 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1310 [ "${OPTARG}" == "pullimages" ] && continue
1311 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1312 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1313 [ "${OPTARG}" == "bundle" ] && continue
1314 [ "${OPTARG}" == "kubeconfig" ] && continue
1315 [ "${OPTARG}" == "lxdendpoint" ] && continue
1316 [ "${OPTARG}" == "lxdcert" ] && continue
1317 [ "${OPTARG}" == "microstack" ] && continue
1318 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1319 echo -e "Invalid option: '--$OPTARG'\n" >&2
1320 usage && exit 1
1321 ;;
1322 :)
1323 echo "Option -$OPTARG requires an argument" >&2
1324 usage && exit 1
1325 ;;
1326 \?)
1327 echo -e "Invalid option: '-$OPTARG'\n" >&2
1328 usage && exit 1
1329 ;;
1330 h)
1331 usage && exit 0
1332 ;;
1333 y)
1334 ASSUME_YES="y"
1335 ;;
1336 *)
1337 usage && exit 1
1338 ;;
1339 esac
1340 done
1341
1342 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1343 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1344
1345 if [ -n "$SHOWOPTS" ]; then
1346 dump_vars
1347 exit 0
1348 fi
1349
1350 if [ -n "$CHARMED" ]; then
1351 if [ -n "$UNINSTALL" ]; then
1352 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1353 else
1354 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1355 fi
1356
1357 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1358 echo
1359 echo "1. Get the NBI IP with the following command:"
1360 echo
1361 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1362 echo
1363 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1364 echo
1365 echo "export OSM_HOSTNAME=<NBI-IP>"
1366 echo
1367 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1368 echo
1369 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1370 echo
1371 echo "DONE"
1372
1373 exit 0
1374 fi
1375
1376 # if develop, we force master
1377 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1378
1379 need_packages="git jq wget curl tar"
1380 echo -e "Checking required packages: $need_packages"
1381 dpkg -l $need_packages &>/dev/null \
1382 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1383 || sudo apt-get update \
1384 || FATAL "failed to run apt-get update"
1385 dpkg -l $need_packages &>/dev/null \
1386 || ! echo -e "Installing $need_packages requires root privileges." \
1387 || sudo apt-get install -y $need_packages \
1388 || FATAL "failed to install $need_packages"
1389
1390 if [ -z "$OSM_DEVOPS" ]; then
1391 if [ -n "$TEST_INSTALLER" ]; then
1392 echo -e "\nUsing local devops repo for OSM installation"
1393 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1394 else
1395 echo -e "\nCreating temporary dir for OSM installation"
1396 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1397 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1398
1399 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1400
1401 if [ -z "$COMMIT_ID" ]; then
1402 echo -e "\nGuessing the current stable release"
1403 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1404 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1405
1406 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1407 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1408 else
1409 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1410 fi
1411 git -C $OSM_DEVOPS checkout $COMMIT_ID
1412 fi
1413 fi
1414
1415 . $OSM_DEVOPS/common/all_funcs
1416
1417 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1418 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1419 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1420 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1421 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1422 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1423
1424 #Installation starts here
1425 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1426 track start
1427
1428 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1429 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1430 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1431 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1432 fi
1433
1434 echo -e "Checking required packages: lxd"
1435 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1436 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1437
1438 # use local devops for containers
1439 export OSM_USE_LOCAL_DEVOPS=true
1440
1441 #Install osmclient
1442
1443 #Install vim-emu (optional)
1444 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1445
1446 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1447 track end
1448 echo -e "\nDONE"
1449