Add -K option to specify an external juju controller
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: install OSM with charms"
64 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " --controller <name>: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " --lxd-cloud <yaml path>: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " --lxd-credentials <yaml path>: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
70
71 }
72
73 # takes a juju/accounts.yaml file and returns the password specific
74 # for a controller. I wrote this using only bash tools to minimize
75 # additions of other packages
76 function parse_juju_password {
77 password_file="${HOME}/.local/share/juju/accounts.yaml"
78 local controller_name=$1
79 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
80 sed -ne "s|^\($s\):|\1|" \
81 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
82 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
83 awk -F$fs -v controller=$controller_name '{
84 indent = length($1)/2;
85 vname[indent] = $2;
86 for (i in vname) {if (i > indent) {delete vname[i]}}
87 if (length($3) > 0) {
88 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
89 if (match(vn,controller) && match($2,"password")) {
90 printf("%s",$3);
91 }
92 }
93 }'
94 }
95
96 function generate_secret() {
97 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
98 }
99
100 function remove_volumes() {
101 if [ -n "$KUBERNETES" ]; then
102 k8_volume=$1
103 echo "Removing ${k8_volume}"
104 $WORKDIR_SUDO rm -rf ${k8_volume}
105 else
106 stack=$1
107 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
108 for volume in $volumes; do
109 sg docker -c "docker volume rm ${stack}_${volume}"
110 done
111 fi
112 }
113
114 function remove_network() {
115 stack=$1
116 sg docker -c "docker network rm net${stack}"
117 }
118
119 function remove_iptables() {
120 stack=$1
121 if [ -z "$OSM_VCA_HOST" ]; then
122 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
123 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
124 fi
125
126 if [ -z "$DEFAULT_IP" ]; then
127 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
128 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
129 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
130 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
131 fi
132
133 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
134 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
135 sudo netfilter-persistent save
136 fi
137 }
138
139 function remove_stack() {
140 stack=$1
141 if sg docker -c "docker stack ps ${stack}" ; then
142 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
143 COUNTER=0
144 result=1
145 while [ ${COUNTER} -lt 30 ]; do
146 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
147 #echo "Dockers running: $result"
148 if [ "${result}" == "0" ]; then
149 break
150 fi
151 let COUNTER=COUNTER+1
152 sleep 1
153 done
154 if [ "${result}" == "0" ]; then
155 echo "All dockers of the stack ${stack} were removed"
156 else
157 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
158 fi
159 sleep 5
160 fi
161 }
162
163 #removes osm deployments and services
164 function remove_k8s_namespace() {
165 kubectl delete ns $1
166 }
167
168 #Uninstall lightweight OSM: remove dockers
169 function uninstall_lightweight() {
170 if [ -n "$INSTALL_ONLY" ]; then
171 if [ -n "$INSTALL_ELK" ]; then
172 echo -e "\nUninstalling OSM ELK stack"
173 remove_stack osm_elk
174 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
175 fi
176 else
177 echo -e "\nUninstalling OSM"
178 if [ -n "$KUBERNETES" ]; then
179 if [ -n "$INSTALL_K8S_MONITOR" ]; then
180 # uninstall OSM MONITORING
181 uninstall_k8s_monitoring
182 fi
183 remove_k8s_namespace $OSM_STACK_NAME
184 else
185
186 remove_stack $OSM_STACK_NAME
187 remove_stack osm_elk
188 fi
189 echo "Now osm docker images and volumes will be deleted"
190 newgrp docker << EONG
191 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
192 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
196 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
197 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
198 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
199 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
200 EONG
201
202 if [ -n "$KUBERNETES" ]; then
203 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
204 remove_volumes $OSM_NAMESPACE_VOL
205 else
206 remove_volumes $OSM_STACK_NAME
207 remove_network $OSM_STACK_NAME
208 fi
209 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
210 echo "Removing $OSM_DOCKER_WORK_DIR"
211 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
212 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
213 fi
214 echo "Some docker images will be kept in case they are used by other docker stacks"
215 echo "To remove them, just run 'docker image prune' in a terminal"
216 return 0
217 }
218
219 #Safe unattended install of iptables-persistent
220 function check_install_iptables_persistent(){
221 echo -e "\nChecking required packages: iptables-persistent"
222 if dpkg -l iptables-persistent &>/dev/null; then
223 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
224 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
225 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
226 sudo apt-get -yq install iptables-persistent
227 fi
228 }
229
230 #Configure NAT rules, based on the current IP addresses of containers
231 function nat(){
232 check_install_iptables_persistent
233
234 echo -e "\nConfiguring NAT rules"
235 echo -e " Required root privileges"
236 sudo $OSM_DEVOPS/installers/nat_osm
237 }
238
239 function FATAL(){
240 echo "FATAL error: Cannot install OSM due to \"$1\""
241 exit 1
242 }
243
244 function install_lxd() {
245 # Apply sysctl production values for optimal performance
246 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
247 sudo sysctl --system
248
249 # Install LXD snap
250 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
251 sudo snap install lxd
252 sudo apt-get install zfsutils-linux -y
253
254 # Configure LXD
255 sudo usermod -a -G lxd `whoami`
256 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
257 sg lxd -c "lxd waitready"
258 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
259 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
260 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
261 #sudo systemctl stop lxd-bridge
262 #sudo systemctl --system daemon-reload
263 #sudo systemctl enable lxd-bridge
264 #sudo systemctl start lxd-bridge
265 }
266
267 function ask_user(){
268 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
269 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
270 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
271 read -e -p "$1" USER_CONFIRMATION
272 while true ; do
273 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
274 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
275 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
276 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
277 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
278 done
279 }
280
281 function install_osmclient(){
282 CLIENT_RELEASE=${RELEASE#"-R "}
283 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
284 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
285 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
286 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
287 curl $key_location | sudo apt-key add -
288 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
289 sudo apt-get update
290 sudo apt-get install -y python3-pip
291 sudo -H LC_ALL=C python3 -m pip install -U pip
292 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
293 sudo apt-get install -y python3-osm-im python3-osmclient
294 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
295 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
296 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
297 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
298 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
299 echo -e "\nOSM client installed"
300 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
301 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
302 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
303 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
304 else
305 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
306 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
307 echo " export OSM_HOSTNAME=<OSM_host>"
308 fi
309 return 0
310 }
311
312 function install_prometheus_nodeexporter(){
313 if (systemctl -q is-active node_exporter)
314 then
315 echo "Node Exporter is already running."
316 else
317 echo "Node Exporter is not active, installing..."
318 if getent passwd node_exporter > /dev/null 2>&1; then
319 echo "node_exporter user exists"
320 else
321 echo "Creating user node_exporter"
322 sudo useradd --no-create-home --shell /bin/false node_exporter
323 fi
324 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
325 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
326 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
327 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
328 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
329 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
330 sudo systemctl daemon-reload
331 sudo systemctl restart node_exporter
332 sudo systemctl enable node_exporter
333 echo "Node Exporter has been activated in this host."
334 fi
335 return 0
336 }
337
338 function uninstall_prometheus_nodeexporter(){
339 sudo systemctl stop node_exporter
340 sudo systemctl disable node_exporter
341 sudo rm /etc/systemd/system/node_exporter.service
342 sudo systemctl daemon-reload
343 sudo userdel node_exporter
344 sudo rm /usr/local/bin/node_exporter
345 return 0
346 }
347
348 function install_docker_ce() {
349 # installs and configures Docker CE
350 echo "Installing Docker CE ..."
351 sudo apt-get -qq update
352 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
353 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
354 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
355 sudo apt-get -qq update
356 sudo apt-get install -y docker-ce
357 echo "Adding user to group 'docker'"
358 sudo groupadd -f docker
359 sudo usermod -aG docker $USER
360 sleep 2
361 sudo service docker restart
362 echo "... restarted Docker service"
363 sg docker -c "docker version" || FATAL "Docker installation failed"
364 echo "... Docker CE installation done"
365 return 0
366 }
367
368 function install_docker_compose() {
369 # installs and configures docker-compose
370 echo "Installing Docker Compose ..."
371 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
372 sudo chmod +x /usr/local/bin/docker-compose
373 echo "... Docker Compose installation done"
374 }
375
376 function install_juju() {
377 echo "Installing juju"
378 sudo snap install juju --classic
379 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
380 echo "Finished installation of juju"
381 return 0
382 }
383
384 function juju_createcontroller() {
385 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
386 # Not found created, create the controller
387 sudo usermod -a -G lxd ${USER}
388 sg lxd -c "juju bootstrap $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
389 fi
390 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
391 }
392
393 function juju_createproxy() {
394 check_install_iptables_persistent
395
396 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
397 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
398 sudo netfilter-persistent save
399 fi
400 }
401
402 function generate_docker_images() {
403 echo "Pulling and generating docker images"
404 _build_from=$COMMIT_ID
405 [ -z "$_build_from" ] && _build_from="master"
406
407 echo "OSM Docker images generated from $_build_from"
408
409 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
410 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
411 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
412 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
413
414 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
415 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
416 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
417 fi
418
419 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
420 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
421 fi
422
423 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
424 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
425 fi
426
427 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
428 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
429 fi
430
431 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
432 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
433 fi
434
435 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
436 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
437 fi
438
439 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
440 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
441 fi
442
443 if [ -n "$PULL_IMAGES" ]; then
444 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
445 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
446 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
447 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
448 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
449 fi
450
451 if [ -n "$PULL_IMAGES" ]; then
452 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
453 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
454 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
455 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
456 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
457 fi
458
459 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
460 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
461 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
462 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
463 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
464 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
465 fi
466
467 if [ -n "$PULL_IMAGES" ]; then
468 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
469 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
470 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
471 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
472 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
473 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
474 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
475 fi
476
477 if [ -n "$PULL_IMAGES" ]; then
478 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
479 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
480 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
481 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
482 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
483 fi
484
485 if [ -n "$PULL_IMAGES" ]; then
486 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
487 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
488 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
489 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
490 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
491 fi
492
493 if [ -n "$PULL_IMAGES" ]; then
494 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
495 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
496 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
497 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
498 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
499 fi
500
501 if [ -n "$PULL_IMAGES" ]; then
502 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
503 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
504 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
505 fi
506
507 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
508 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
509 fi
510
511 echo "Finished generation of docker images"
512 }
513
514 function cmp_overwrite() {
515 file1="$1"
516 file2="$2"
517 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
518 if [ -f "${file2}" ]; then
519 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
520 else
521 cp -b ${file1} ${file2}
522 fi
523 fi
524 }
525
526 function generate_docker_env_files() {
527 echo "Doing a backup of existing env files"
528 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
529 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
530 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
531 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
532 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
533 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
534 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
535 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
536 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
537
538 echo "Generating docker env files"
539 if [ -n "$KUBERNETES" ]; then
540 #Kubernetes resources
541 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
542 else
543 # Docker-compose
544 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
545 if [ -n "$INSTALL_PLA" ]; then
546 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
547 fi
548
549 # Prometheus files
550 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
551 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
552
553 # Grafana files
554 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
555 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
556 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
557 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
558 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
559
560 # Prometheus Exporters files
561 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
562 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
563 fi
564
565 # LCM
566 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
567 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
568 fi
569
570 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
571 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
572 else
573 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
574 fi
575
576 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
577 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
578 else
579 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
580 fi
581
582 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
583 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
584 else
585 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
586 fi
587
588 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
589 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
590 else
591 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
592 fi
593
594 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
595 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
596 else
597 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
598 fi
599
600 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
601 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
602 fi
603
604 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
605 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
606 fi
607
608 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
609 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
610 else
611 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
612 fi
613
614 # RO
615 MYSQL_ROOT_PASSWORD=$(generate_secret)
616 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
617 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
618 fi
619 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
620 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
621 fi
622
623 # Keystone
624 KEYSTONE_DB_PASSWORD=$(generate_secret)
625 SERVICE_PASSWORD=$(generate_secret)
626 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
627 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
628 fi
629 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
630 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
631 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
632 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
633 fi
634
635 # NBI
636 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
637 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
638 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
639 fi
640
641 # MON
642 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
643 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
644 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
645 fi
646
647 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
648 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
649 else
650 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
651 fi
652
653 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
654 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
655 else
656 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
657 fi
658
659 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
660 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
661 else
662 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
663 fi
664
665 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
666 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
667 else
668 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
669 fi
670
671
672 # POL
673 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
674 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
675 fi
676
677 # LW-UI
678 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
679 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
680 fi
681
682 echo "Finished generation of docker env files"
683 }
684
685 function generate_osmclient_script () {
686 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
687 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
688 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
689 }
690
691 #installs kubernetes packages
692 function install_kube() {
693 sudo apt-get update && sudo apt-get install -y apt-transport-https
694 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
695 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
696 sudo apt-get update
697 echo "Installing Kubernetes Packages ..."
698 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
699 }
700
701 #initializes kubernetes control plane
702 function init_kubeadm() {
703 sudo swapoff -a
704 sudo kubeadm init --config $1
705 sleep 5
706 }
707
708 function kube_config_dir() {
709 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
710 mkdir -p $HOME/.kube
711 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
712 sudo chown $(id -u):$(id -g) $HOME/.kube/config
713 }
714
715 #deploys flannel as daemonsets
716 function deploy_cni_provider() {
717 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
718 trap 'rm -rf "${CNI_DIR}"' EXIT
719 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
720 kubectl apply -f $CNI_DIR
721 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
722 }
723
724 #creates secrets from env files which will be used by containers
725 function kube_secrets(){
726 kubectl create ns $OSM_STACK_NAME
727 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
728 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
729 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
730 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
731 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
732 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
733 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
734 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
735 }
736
737 #deploys osm pods and services
738 function deploy_osm_services() {
739 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
740 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
741 sleep 5
742 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
743 }
744
745 function deploy_osm_pla_service() {
746 # corresponding to parse_yaml
747 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
748 # corresponding to namespace_vol
749 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
750 # corresponding to deploy_osm_services
751 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
752 }
753
754 function parse_yaml() {
755 osm_services="nbi lcm ro pol mon light-ui keystone"
756 TAG=$1
757 for osm in $osm_services; do
758 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
759 done
760 }
761
762 function namespace_vol() {
763 osm_services="nbi lcm ro pol mon kafka mongo mysql"
764 for osm in $osm_services; do
765 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
766 done
767 }
768
769 function init_docker_swarm() {
770 if [ "${DEFAULT_MTU}" != "1500" ]; then
771 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
772 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
773 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
774 fi
775 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
776 return 0
777 }
778
779 function create_docker_network() {
780 echo "creating network"
781 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
782 echo "creating network DONE"
783 }
784
785 function deploy_lightweight() {
786
787 echo "Deploying lightweight build"
788 OSM_NBI_PORT=9999
789 OSM_RO_PORT=9090
790 OSM_KEYSTONE_PORT=5000
791 OSM_UI_PORT=80
792 OSM_MON_PORT=8662
793 OSM_PROM_PORT=9090
794 OSM_PROM_CADVISOR_PORT=8080
795 OSM_PROM_HOSTPORT=9091
796 OSM_GRAFANA_PORT=3000
797 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
798 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
799
800 if [ -n "$NO_HOST_PORTS" ]; then
801 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
802 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
803 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
804 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
805 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
806 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
807 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
808 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
809 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
810 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
811 else
812 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
813 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
814 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
815 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
816 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
817 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
818 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
819 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
820 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
821 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
822 fi
823 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
824 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
825 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
826 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
827 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
828 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
829 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
830 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
831 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
832
833 pushd $OSM_DOCKER_WORK_DIR
834 if [ -n "$INSTALL_PLA" ]; then
835 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
836 else
837 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
838 fi
839 popd
840
841 echo "Finished deployment of lightweight build"
842 }
843
844 function deploy_elk() {
845 echo "Pulling docker images for ELK"
846 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
847 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
848 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
849 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
850 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
851 echo "Finished pulling elk docker images"
852 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
853 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
854 remove_stack osm_elk
855 echo "Deploying ELK stack"
856 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
857 echo "Waiting for ELK stack to be up and running"
858 time=0
859 step=5
860 timelength=40
861 elk_is_up=1
862 while [ $time -le $timelength ]; do
863 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
864 elk_is_up=0
865 break
866 fi
867 sleep $step
868 time=$((time+step))
869 done
870 if [ $elk_is_up -eq 0 ]; then
871 echo "ELK is up and running. Trying to create index pattern..."
872 #Create index pattern
873 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
874 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
875 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
876 #Make it the default index
877 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
878 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
879 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
880 else
881 echo "Cannot connect to Kibana to create index pattern."
882 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
883 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
884 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
885 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
886 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
887 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
888 -d"{\"value\":\"filebeat-*\"}"'
889 fi
890 echo "Finished deployment of ELK stack"
891 return 0
892 }
893
894 function install_lightweight() {
895 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
896 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
897 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
898 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
899 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
900
901 track checkingroot
902 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
903 track noroot
904
905 if [ -n "$KUBERNETES" ]; then
906 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
907 1. Install and configure LXD
908 2. Install juju
909 3. Install docker CE
910 4. Disable swap space
911 5. Install and initialize Kubernetes
912 as pre-requirements.
913 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
914
915 else
916 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
917 fi
918 track proceed
919
920 echo "Installing lightweight build of OSM"
921 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
922 trap 'rm -rf "${LWTEMPDIR}"' EXIT
923 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
924 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
925 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
926 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
927 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
928
929 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
930 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
931 need_packages_lw="snapd"
932 echo -e "Checking required packages: $need_packages_lw"
933 dpkg -l $need_packages_lw &>/dev/null \
934 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
935 || sudo apt-get update \
936 || FATAL "failed to run apt-get update"
937 dpkg -l $need_packages_lw &>/dev/null \
938 || ! echo -e "Installing $need_packages_lw requires root privileges." \
939 || sudo apt-get install -y $need_packages_lw \
940 || FATAL "failed to install $need_packages_lw"
941 install_lxd
942 fi
943
944 track prereqok
945
946 [ -z "$INSTALL_NOJUJU" ] && install_juju
947 track juju_install
948
949 if [ -z "$OSM_VCA_HOST" ]; then
950 if [ -z "$CONTROLLER_NAME" ]; then
951 if [ -n "$LXD_CLOUD_FILE" ]; then
952 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
953 OSM_VCA_CLOUDNAME="lxd-cloud"
954 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
955 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
956 fi
957 juju_createcontroller
958 else
959 OSM_VCA_CLOUDNAME="lxd-cloud"
960 if [ -n "$LXD_CLOUD_FILE" ]; then
961 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
962 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
963 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
964 else
965 mkdir -p ~/.osm
966 cat << EOF > ~/.osm/lxd-cloud.yaml
967 clouds:
968 lxd-cloud:
969 type: lxd
970 auth-types: [certificate]
971 endpoint: "https://$DEFAULT_IP:8443"
972 config:
973 ssl-hostname-verification: false
974 EOF
975 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
976 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
977 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
978 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
979 cat << EOF > ~/.osm/lxd-credentials.yaml
980 credentials:
981 lxd-cloud:
982 lxd-cloud:
983 auth-type: certificate
984 server-cert: |
985 $server_cert
986 client-cert: |
987 $client_cert
988 client-key: |
989 $client_key
990 EOF
991 lxc config trust add local: ~/.osm/client.crt
992 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
993 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
994 fi
995 fi
996 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
997 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
998 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
999 fi
1000 track juju_controller
1001
1002 if [ -z "$OSM_VCA_SECRET" ]; then
1003 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1004 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1005 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1006 fi
1007 if [ -z "$OSM_VCA_PUBKEY" ]; then
1008 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1009 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1010 fi
1011 if [ -z "$OSM_VCA_CACERT" ]; then
1012 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1013 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1014 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1015 fi
1016 if [ -z "$OSM_VCA_APIPROXY" ]; then
1017 OSM_VCA_APIPROXY=$DEFAULT_IP
1018 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1019 fi
1020 juju_createproxy
1021 track juju
1022
1023 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1024 OSM_DATABASE_COMMONKEY=$(generate_secret)
1025 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1026 fi
1027
1028 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1029 track docker_ce
1030
1031 #Installs Kubernetes and deploys osm services
1032 if [ -n "$KUBERNETES" ]; then
1033 install_kube
1034 track install_k8s
1035 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1036 kube_config_dir
1037 track init_k8s
1038 else
1039 #install_docker_compose
1040 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1041 track docker_swarm
1042 fi
1043
1044 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1045 track docker_build
1046
1047 generate_docker_env_files
1048
1049 if [ -n "$KUBERNETES" ]; then
1050 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1051 # uninstall OSM MONITORING
1052 uninstall_k8s_monitoring
1053 track uninstall_k8s_monitoring
1054 fi
1055 #remove old namespace
1056 remove_k8s_namespace $OSM_STACK_NAME
1057 deploy_cni_provider
1058 kube_secrets
1059 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1060 namespace_vol
1061 deploy_osm_services
1062 if [ -n "$INSTALL_PLA"]; then
1063 # optional PLA install
1064 deploy_osm_pla_service
1065 fi
1066 track deploy_osm_services_k8s
1067 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1068 # install OSM MONITORING
1069 install_k8s_monitoring
1070 track install_k8s_monitoring
1071 fi
1072 else
1073 # remove old stack
1074 remove_stack $OSM_STACK_NAME
1075 create_docker_network
1076 deploy_lightweight
1077 generate_osmclient_script
1078 track docker_deploy
1079 install_prometheus_nodeexporter
1080 track nodeexporter
1081 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1082 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1083 fi
1084
1085 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1086 track osmclient
1087
1088 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1089 track end
1090 return 0
1091 }
1092
1093 function install_vimemu() {
1094 echo "\nInstalling vim-emu"
1095 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1096 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1097 # install prerequisites (OVS is a must for the emulator to work)
1098 sudo apt-get install openvswitch-switch
1099 # clone vim-emu repository (attention: branch is currently master only)
1100 echo "Cloning vim-emu repository ..."
1101 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1102 # build vim-emu docker
1103 echo "Building vim-emu Docker container..."
1104
1105 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1106 # start vim-emu container as daemon
1107 echo "Starting vim-emu Docker container 'vim-emu' ..."
1108 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1109 # in lightweight mode, the emulator needs to be attached to netOSM
1110 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1111 else
1112 # classic build mode
1113 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1114 fi
1115 echo "Waiting for 'vim-emu' container to start ..."
1116 sleep 5
1117 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1118 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1119 # print vim-emu connection info
1120 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1121 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1122 echo -e "To add the emulated VIM to OSM you should do:"
1123 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1124 }
1125
1126 function install_k8s_monitoring() {
1127 # install OSM monitoring
1128 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1129 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1130 }
1131
1132 function uninstall_k8s_monitoring() {
1133 # uninstall OSM monitoring
1134 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1135 }
1136
1137 function dump_vars(){
1138 echo "DEVELOP=$DEVELOP"
1139 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1140 echo "UNINSTALL=$UNINSTALL"
1141 echo "UPDATE=$UPDATE"
1142 echo "RECONFIGURE=$RECONFIGURE"
1143 echo "TEST_INSTALLER=$TEST_INSTALLER"
1144 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1145 echo "INSTALL_PLA=$INSTALL_PLA"
1146 echo "INSTALL_LXD=$INSTALL_LXD"
1147 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1148 echo "INSTALL_ONLY=$INSTALL_ONLY"
1149 echo "INSTALL_ELK=$INSTALL_ELK"
1150 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1151 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1152 echo "TO_REBUILD=$TO_REBUILD"
1153 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1154 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1155 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1156 echo "RELEASE=$RELEASE"
1157 echo "REPOSITORY=$REPOSITORY"
1158 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1159 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1160 echo "OSM_DEVOPS=$OSM_DEVOPS"
1161 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1162 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1163 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1164 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1165 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1166 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1167 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1168 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1169 echo "DOCKER_USER=$DOCKER_USER"
1170 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1171 echo "PULL_IMAGES=$PULL_IMAGES"
1172 echo "KUBERNETES=$KUBERNETES"
1173 echo "SHOWOPTS=$SHOWOPTS"
1174 echo "Install from specific refspec (-b): $COMMIT_ID"
1175 }
1176
1177 function track(){
1178 ctime=`date +%s`
1179 duration=$((ctime - SESSION_ID))
1180 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1181 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1182 event_name="bin"
1183 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1184 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1185 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1186 event_name="${event_name}_$1"
1187 url="${url}&event=${event_name}&ce_duration=${duration}"
1188 wget -q -O /dev/null $url
1189 }
1190
1191 UNINSTALL=""
1192 DEVELOP=""
1193 UPDATE=""
1194 RECONFIGURE=""
1195 TEST_INSTALLER=""
1196 INSTALL_LXD=""
1197 SHOWOPTS=""
1198 COMMIT_ID=""
1199 ASSUME_YES=""
1200 INSTALL_FROM_SOURCE=""
1201 RELEASE="ReleaseSEVEN"
1202 REPOSITORY="stable"
1203 INSTALL_VIMEMU=""
1204 INSTALL_PLA=""
1205 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1206 LXD_REPOSITORY_PATH=""
1207 INSTALL_LIGHTWEIGHT="y"
1208 INSTALL_ONLY=""
1209 INSTALL_ELK=""
1210 TO_REBUILD=""
1211 INSTALL_NOLXD=""
1212 INSTALL_NODOCKER=""
1213 INSTALL_NOJUJU=""
1214 KUBERNETES=""
1215 INSTALL_K8S_MONITOR=""
1216 INSTALL_NOHOSTCLIENT=""
1217 SESSION_ID=`date +%s`
1218 OSM_DEVOPS=
1219 OSM_VCA_HOST=
1220 OSM_VCA_SECRET=
1221 OSM_VCA_PUBKEY=
1222 OSM_VCA_CLOUDNAME="localhost"
1223 OSM_STACK_NAME=osm
1224 NO_HOST_PORTS=""
1225 DOCKER_NOBUILD=""
1226 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1227 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1228 WORKDIR_SUDO=sudo
1229 OSM_WORK_DIR="/etc/osm"
1230 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1231 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1232 OSM_HOST_VOL="/var/lib/osm"
1233 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1234 OSM_DOCKER_TAG=latest
1235 DOCKER_USER=opensourcemano
1236 PULL_IMAGES="y"
1237 KAFKA_TAG=2.11-1.0.2
1238 PROMETHEUS_TAG=v2.4.3
1239 GRAFANA_TAG=latest
1240 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1241 PROMETHEUS_CADVISOR_TAG=latest
1242 KEYSTONEDB_TAG=10
1243 OSM_DATABASE_COMMONKEY=
1244 ELASTIC_VERSION=6.4.2
1245 ELASTIC_CURATOR_VERSION=5.5.4
1246 POD_NETWORK_CIDR=10.244.0.0/16
1247 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1248 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1249
1250 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1251 case "${o}" in
1252 b)
1253 COMMIT_ID=${OPTARG}
1254 PULL_IMAGES=""
1255 ;;
1256 r)
1257 REPOSITORY="${OPTARG}"
1258 REPO_ARGS+=(-r "$REPOSITORY")
1259 ;;
1260 c)
1261 [ "${OPTARG}" == "swarm" ] && continue
1262 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1263 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1264 usage && exit 1
1265 ;;
1266 k)
1267 REPOSITORY_KEY="${OPTARG}"
1268 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1269 ;;
1270 u)
1271 REPOSITORY_BASE="${OPTARG}"
1272 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1273 ;;
1274 R)
1275 RELEASE="${OPTARG}"
1276 REPO_ARGS+=(-R "$RELEASE")
1277 ;;
1278 D)
1279 OSM_DEVOPS="${OPTARG}"
1280 ;;
1281 o)
1282 INSTALL_ONLY="y"
1283 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1284 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1285 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1286 ;;
1287 m)
1288 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1289 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1290 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1291 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1292 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1293 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1294 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1295 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1296 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1297 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1298 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1299 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1300 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1301 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1302 ;;
1303 H)
1304 OSM_VCA_HOST="${OPTARG}"
1305 ;;
1306 S)
1307 OSM_VCA_SECRET="${OPTARG}"
1308 ;;
1309 s)
1310 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1311 ;;
1312 w)
1313 # when specifying workdir, do not use sudo for access
1314 WORKDIR_SUDO=
1315 OSM_WORK_DIR="${OPTARG}"
1316 ;;
1317 t)
1318 OSM_DOCKER_TAG="${OPTARG}"
1319 ;;
1320 U)
1321 DOCKER_USER="${OPTARG}"
1322 ;;
1323 P)
1324 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1325 ;;
1326 A)
1327 OSM_VCA_APIPROXY="${OPTARG}"
1328 ;;
1329 l)
1330 LXD_CLOUD_FILE="${OPTARG}"
1331 ;;
1332 L)
1333 LXD_CRED_FILE="${OPTARG}"
1334 ;;
1335 K)
1336 CONTROLLER_NAME="${OPTARG}"
1337 ;;
1338 -)
1339 [ "${OPTARG}" == "help" ] && usage && exit 0
1340 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1341 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1342 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1343 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1344 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1345 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1346 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1347 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1348 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1349 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1350 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1351 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1352 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1353 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1354 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1355 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1356 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1357 [ "${OPTARG}" == "pullimages" ] && continue
1358 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1359 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1360 [ "${OPTARG}" == "bundle" ] && continue
1361 [ "${OPTARG}" == "kubeconfig" ] && continue
1362 [ "${OPTARG}" == "lxdendpoint" ] && continue
1363 [ "${OPTARG}" == "lxdcert" ] && continue
1364 [ "${OPTARG}" == "microstack" ] && continue
1365 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1366 echo -e "Invalid option: '--$OPTARG'\n" >&2
1367 usage && exit 1
1368 ;;
1369 :)
1370 echo "Option -$OPTARG requires an argument" >&2
1371 usage && exit 1
1372 ;;
1373 \?)
1374 echo -e "Invalid option: '-$OPTARG'\n" >&2
1375 usage && exit 1
1376 ;;
1377 h)
1378 usage && exit 0
1379 ;;
1380 y)
1381 ASSUME_YES="y"
1382 ;;
1383 *)
1384 usage && exit 1
1385 ;;
1386 esac
1387 done
1388
1389 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1390 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1391
1392 if [ -n "$SHOWOPTS" ]; then
1393 dump_vars
1394 exit 0
1395 fi
1396
1397 if [ -n "$CHARMED" ]; then
1398 if [ -n "$UNINSTALL" ]; then
1399 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1400 else
1401 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1402 fi
1403
1404 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1405 echo
1406 echo "1. Get the NBI IP with the following command:"
1407 echo
1408 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1409 echo
1410 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1411 echo
1412 echo "export OSM_HOSTNAME=<NBI-IP>"
1413 echo
1414 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1415 echo
1416 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1417 echo
1418 echo "DONE"
1419
1420 exit 0
1421 fi
1422
1423 # if develop, we force master
1424 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1425
1426 need_packages="git wget curl tar"
1427 echo -e "Checking required packages: $need_packages"
1428 dpkg -l $need_packages &>/dev/null \
1429 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1430 || sudo apt-get update \
1431 || FATAL "failed to run apt-get update"
1432 dpkg -l $need_packages &>/dev/null \
1433 || ! echo -e "Installing $need_packages requires root privileges." \
1434 || sudo apt-get install -y $need_packages \
1435 || FATAL "failed to install $need_packages"
1436 sudo snap install jq
1437 if [ -z "$OSM_DEVOPS" ]; then
1438 if [ -n "$TEST_INSTALLER" ]; then
1439 echo -e "\nUsing local devops repo for OSM installation"
1440 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1441 else
1442 echo -e "\nCreating temporary dir for OSM installation"
1443 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1444 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1445
1446 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1447
1448 if [ -z "$COMMIT_ID" ]; then
1449 echo -e "\nGuessing the current stable release"
1450 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1451 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1452
1453 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1454 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1455 else
1456 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1457 fi
1458 git -C $OSM_DEVOPS checkout $COMMIT_ID
1459 fi
1460 fi
1461
1462 . $OSM_DEVOPS/common/all_funcs
1463
1464 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1465 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1466 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1467 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1468 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1469 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1470
1471 #Installation starts here
1472 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1473 track start
1474
1475 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1476 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1477 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1478 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1479 fi
1480
1481 echo -e "Checking required packages: lxd"
1482 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1483 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1484
1485 # use local devops for containers
1486 export OSM_USE_LOCAL_DEVOPS=true
1487
1488 #Install osmclient
1489
1490 #Install vim-emu (optional)
1491 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1492
1493 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1494 track end
1495 echo -e "\nDONE"
1496