30b56e8e1fdbb5fd9e1bcaefa5ca85b432da19e6
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
76
77 }
78
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password {
83 password_file="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name=$1
85 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller=$controller_name '{
90 indent = length($1)/2;
91 vname[indent] = $2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
93 if (length($3) > 0) {
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
96 printf("%s",$3);
97 }
98 }
99 }'
100 }
101
102 function generate_secret() {
103 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
104 }
105
106 function remove_volumes() {
107 if [ -n "$KUBERNETES" ]; then
108 k8_volume=$1
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
111 else
112 stack=$1
113 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume in $volumes; do
115 sg docker -c "docker volume rm ${stack}_${volume}"
116 done
117 fi
118 }
119
120 function remove_network() {
121 stack=$1
122 sg docker -c "docker network rm net${stack}"
123 }
124
125 function remove_iptables() {
126 stack=$1
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
130 fi
131
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
138 fi
139
140 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
141 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
143 fi
144 }
145
146 function remove_stack() {
147 stack=$1
148 if sg docker -c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
150 COUNTER=0
151 result=1
152 while [ ${COUNTER} -lt 30 ]; do
153 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
156 break
157 fi
158 let COUNTER=COUNTER+1
159 sleep 1
160 done
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
163 else
164 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
165 fi
166 sleep 5
167 fi
168 }
169
170 #removes osm deployments and services
171 function remove_k8s_namespace() {
172 kubectl delete ns $1
173 }
174
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm reset --force
179 kubectl delete --namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo rm /usr/local/bin/helm
182 rm -rf $HOME/.helm
183 fi
184 }
185
186 #Uninstall osmclient
187 function uninstall_osmclient() {
188 sudo apt-get remove --purge -y python-osmclient
189 sudo apt-get remove --purge -y python3-osmclient
190 }
191
192 #Uninstall lightweight OSM: remove dockers
193 function uninstall_lightweight() {
194 if [ -n "$INSTALL_ONLY" ]; then
195 if [ -n "$INSTALL_ELK" ]; then
196 echo -e "\nUninstalling OSM ELK stack"
197 remove_stack osm_elk
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
199 fi
200 else
201 echo -e "\nUninstalling OSM"
202 if [ -n "$KUBERNETES" ]; then
203 if [ -n "$INSTALL_K8S_MONITOR" ]; then
204 # uninstall OSM MONITORING
205 uninstall_k8s_monitoring
206 fi
207 remove_k8s_namespace $OSM_STACK_NAME
208 else
209
210 remove_stack $OSM_STACK_NAME
211 remove_stack osm_elk
212 fi
213 echo "Now osm docker images and volumes will be deleted"
214 newgrp docker << EONG
215 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
224 EONG
225
226 if [ -n "$NGUI" ]; then
227 newgrp docker << EONG
228 docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}
229 EONG
230 else
231 newgrp docker << EONG
232 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
233 EONG
234 fi
235
236 if [ -n "$KUBERNETES" ]; then
237 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
238 remove_volumes $OSM_NAMESPACE_VOL
239 else
240 remove_volumes $OSM_STACK_NAME
241 remove_network $OSM_STACK_NAME
242 fi
243 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
244 echo "Removing $OSM_DOCKER_WORK_DIR"
245 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
246 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
247 fi
248 uninstall_osmclient
249 echo "Some docker images will be kept in case they are used by other docker stacks"
250 echo "To remove them, just run 'docker image prune' in a terminal"
251 return 0
252 }
253
254 #Safe unattended install of iptables-persistent
255 function check_install_iptables_persistent(){
256 echo -e "\nChecking required packages: iptables-persistent"
257 if ! dpkg -l iptables-persistent &>/dev/null; then
258 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
259 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
260 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
261 sudo apt-get -yq install iptables-persistent
262 fi
263 }
264
265 #Configure NAT rules, based on the current IP addresses of containers
266 function nat(){
267 check_install_iptables_persistent
268
269 echo -e "\nConfiguring NAT rules"
270 echo -e " Required root privileges"
271 sudo $OSM_DEVOPS/installers/nat_osm
272 }
273
274 function FATAL(){
275 echo "FATAL error: Cannot install OSM due to \"$1\""
276 exit 1
277 }
278
279 function install_lxd() {
280 # Apply sysctl production values for optimal performance
281 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
282 sudo sysctl --system
283
284 # Install LXD snap
285 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
286 sudo snap install lxd
287 sudo apt-get install zfsutils-linux -y
288
289 # Configure LXD
290 sudo usermod -a -G lxd `whoami`
291 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
292 sg lxd -c "lxd waitready"
293 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
294 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
295 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
296 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
297 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
298 #sudo systemctl stop lxd-bridge
299 #sudo systemctl --system daemon-reload
300 #sudo systemctl enable lxd-bridge
301 #sudo systemctl start lxd-bridge
302 }
303
304 function ask_user(){
305 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
306 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
307 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
308 read -e -p "$1" USER_CONFIRMATION
309 while true ; do
310 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
311 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
312 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
313 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
314 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
315 done
316 }
317
318 function install_osmclient(){
319 CLIENT_RELEASE=${RELEASE#"-R "}
320 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
321 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
322 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
323 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
324 curl $key_location | sudo apt-key add -
325 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
326 sudo apt-get update
327 sudo apt-get install -y python3-pip
328 sudo -H LC_ALL=C python3 -m pip install -U pip
329 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
330 sudo apt-get install -y python3-osm-im python3-osmclient
331 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
332 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
333 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
334 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
335 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
336 echo -e "\nOSM client installed"
337 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
338 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
339 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
340 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
341 else
342 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
343 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
344 echo " export OSM_HOSTNAME=<OSM_host>"
345 fi
346 return 0
347 }
348
349 function install_prometheus_nodeexporter(){
350 if (systemctl -q is-active node_exporter)
351 then
352 echo "Node Exporter is already running."
353 else
354 echo "Node Exporter is not active, installing..."
355 if getent passwd node_exporter > /dev/null 2>&1; then
356 echo "node_exporter user exists"
357 else
358 echo "Creating user node_exporter"
359 sudo useradd --no-create-home --shell /bin/false node_exporter
360 fi
361 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
362 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
363 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
364 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
365 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
366 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
367 sudo systemctl daemon-reload
368 sudo systemctl restart node_exporter
369 sudo systemctl enable node_exporter
370 echo "Node Exporter has been activated in this host."
371 fi
372 return 0
373 }
374
375 function uninstall_prometheus_nodeexporter(){
376 sudo systemctl stop node_exporter
377 sudo systemctl disable node_exporter
378 sudo rm /etc/systemd/system/node_exporter.service
379 sudo systemctl daemon-reload
380 sudo userdel node_exporter
381 sudo rm /usr/local/bin/node_exporter
382 return 0
383 }
384
385 function install_docker_ce() {
386 # installs and configures Docker CE
387 echo "Installing Docker CE ..."
388 sudo apt-get -qq update
389 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
390 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
391 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
392 sudo apt-get -qq update
393 sudo apt-get install -y docker-ce
394 echo "Adding user to group 'docker'"
395 sudo groupadd -f docker
396 sudo usermod -aG docker $USER
397 sleep 2
398 sudo service docker restart
399 echo "... restarted Docker service"
400 sg docker -c "docker version" || FATAL "Docker installation failed"
401 echo "... Docker CE installation done"
402 return 0
403 }
404
405 function install_docker_compose() {
406 # installs and configures docker-compose
407 echo "Installing Docker Compose ..."
408 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
409 sudo chmod +x /usr/local/bin/docker-compose
410 echo "... Docker Compose installation done"
411 }
412
413 function install_juju() {
414 echo "Installing juju"
415 sudo snap install juju --classic --channel=2.8/stable
416 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
417 echo "Finished installation of juju"
418 return 0
419 }
420
421 function juju_createcontroller() {
422 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
423 # Not found created, create the controller
424 sudo usermod -a -G lxd ${USER}
425 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
426 fi
427 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
428 juju controller-config features=[k8s-operators]
429 }
430
431 function juju_addk8s() {
432 cat .kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
433 }
434
435 function juju_createproxy() {
436 check_install_iptables_persistent
437
438 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
439 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
440 sudo netfilter-persistent save
441 fi
442 }
443
444 function generate_docker_images() {
445 echo "Pulling and generating docker images"
446 _build_from=$COMMIT_ID
447 [ -z "$_build_from" ] && _build_from="master"
448
449 echo "OSM Docker images generated from $_build_from"
450
451 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
452 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
453 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
454 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
455
456 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
457 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
458 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
459 fi
460
461 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
462 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
463 fi
464
465 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
466 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
467 fi
468
469 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
470 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
471 fi
472
473 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
474 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
475 fi
476
477 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
478 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
479 fi
480
481 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
482 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
483 fi
484
485 if [ -n "$PULL_IMAGES" ]; then
486 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
487 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
488 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
489 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
490 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
491 fi
492
493 if [ -n "$PULL_IMAGES" ]; then
494 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
495 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
496 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
497 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
498 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
499 fi
500
501 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
502 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
503 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
504 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
505 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
506 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
507 fi
508
509 if [ -n "$PULL_IMAGES" ]; then
510 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
511 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
512 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
513 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
514 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
515 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
516 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
517 fi
518
519 if [ -n "$PULL_IMAGES" ]; then
520 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
521 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
522 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
523 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
524 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
525 fi
526
527 if [ -n "$PULL_IMAGES" ]; then
528 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
529 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
530 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
531 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
532 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
533 fi
534
535 if [ -n "$NGUI" ]; then
536 if [ -n "$PULL_IMAGES" ]; then
537 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
538 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
539 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
540 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
541 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
542 fi
543 else
544 if [ -n "$PULL_IMAGES" ]; then
545 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
546 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
547 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
548 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
549 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
550 fi
551 fi
552
553 if [ -n "$PULL_IMAGES" ]; then
554 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
555 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
556 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
557 fi
558
559 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
560 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
561 fi
562
563 echo "Finished generation of docker images"
564 }
565
566 function cmp_overwrite() {
567 file1="$1"
568 file2="$2"
569 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
570 if [ -f "${file2}" ]; then
571 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
572 else
573 cp -b ${file1} ${file2}
574 fi
575 fi
576 }
577
578 function generate_docker_env_files() {
579 echo "Doing a backup of existing env files"
580 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
581 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
582 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
583 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
584 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
585 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
586 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
587 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
588 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
589
590 echo "Generating docker env files"
591 if [ -n "$KUBERNETES" ]; then
592 #Kubernetes resources
593 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
594 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
595 else
596 if [ -n "$NGUI" ]; then
597 # For NG-UI
598 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
599 else
600 # Docker-compose
601 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
602 fi
603 if [ -n "$INSTALL_PLA" ]; then
604 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
605 fi
606
607 # Prometheus files
608 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
609 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
610
611 # Grafana files
612 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
613 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
614 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
615 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
616 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
617
618 # Prometheus Exporters files
619 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
620 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
621 fi
622
623 # LCM
624 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
625 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
626 fi
627
628 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
629 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
630 else
631 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
632 fi
633
634 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
635 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
636 else
637 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
638 fi
639
640 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
641 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
642 else
643 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
644 fi
645
646 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
647 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
648 else
649 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
650 fi
651
652 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
653 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
654 else
655 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
656 fi
657
658 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
659 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
660 fi
661
662 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
663 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
664 fi
665
666 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
667 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
668 else
669 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
670 fi
671
672 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
673 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
674 else
675 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
676 fi
677
678 # RO
679 MYSQL_ROOT_PASSWORD=$(generate_secret)
680 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
681 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
682 fi
683 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
684 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
685 fi
686
687 # Keystone
688 KEYSTONE_DB_PASSWORD=$(generate_secret)
689 SERVICE_PASSWORD=$(generate_secret)
690 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
691 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
692 fi
693 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
694 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
695 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
696 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
697 fi
698
699 # NBI
700 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
701 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
702 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
703 fi
704
705 # MON
706 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
707 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
708 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
709 fi
710
711 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
712 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
713 else
714 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
715 fi
716
717 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
718 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
719 else
720 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
721 fi
722
723 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
724 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
725 else
726 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
727 fi
728
729 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
730 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
731 else
732 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
733 fi
734
735
736 # POL
737 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
738 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
739 fi
740
741 # LW-UI
742 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
743 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
744 fi
745
746 echo "Finished generation of docker env files"
747 }
748
749 function generate_osmclient_script () {
750 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
751 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
752 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
753 }
754
755 #installs kubernetes packages
756 function install_kube() {
757 sudo apt-get update && sudo apt-get install -y apt-transport-https
758 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
759 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
760 sudo apt-get update
761 echo "Installing Kubernetes Packages ..."
762 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
763 }
764
765 #initializes kubernetes control plane
766 function init_kubeadm() {
767 sudo swapoff -a
768 sudo kubeadm init --config $1
769 sleep 5
770 }
771
772 function kube_config_dir() {
773 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
774 mkdir -p $HOME/.kube
775 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
776 sudo chown $(id -u):$(id -g) $HOME/.kube/config
777 }
778
779 function install_k8s_storageclass() {
780 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
781 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
782 }
783
784 #deploys flannel as daemonsets
785 function deploy_cni_provider() {
786 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
787 trap 'rm -rf "${CNI_DIR}"' EXIT
788 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
789 kubectl apply -f $CNI_DIR
790 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
791 }
792
793 #creates secrets from env files which will be used by containers
794 function kube_secrets(){
795 kubectl create ns $OSM_STACK_NAME
796 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
797 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
798 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
799 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
800 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
801 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
802 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
803 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
804 }
805
806 #taints K8s master node
807 function taint_master_node() {
808 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
809 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
810 sleep 5
811 }
812
813 #deploys osm pods and services
814 function deploy_osm_services() {
815 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
816 }
817
818 function deploy_osm_pla_service() {
819 # corresponding to parse_yaml
820 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
821 # corresponding to namespace_vol
822 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
823 # corresponding to deploy_osm_services
824 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
825 }
826
827 #Install helm and tiller
828 function install_helm() {
829 helm > /dev/null 2>&1
830 if [ $? != 0 ] ; then
831 # Helm is not installed. Install helm
832 echo "Helm is not installed, installing ..."
833 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
834 tar -zxvf helm-v2.15.2.tar.gz
835 sudo mv linux-amd64/helm /usr/local/bin/helm
836 rm -r linux-amd64
837 rm helm-v2.15.2.tar.gz
838 fi
839
840 # Checking if tiller has being configured
841 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
842 if [ $? == 1 ] ; then
843 # tiller account for kubernetes
844 kubectl --namespace kube-system create serviceaccount tiller
845 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
846 # HELM initialization
847 helm init --service-account tiller
848
849 # Wait for Tiller to be up and running. If timeout expires, continue installing
850 tiller_timeout=120;
851 counter=0;
852 tiller_status=""
853 while (( counter < tiller_timeout ))
854 do
855 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
856 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
857 counter=$((counter + 5))
858 sleep 5
859 done
860 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
861 fi
862 }
863
864 function parse_yaml() {
865 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
866 TAG=$1
867 for osm in $osm_services; do
868 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
869 done
870 }
871
872 function namespace_vol() {
873 osm_services="nbi lcm ro pol mon kafka mongo mysql"
874 for osm in $osm_services; do
875 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
876 done
877 }
878
879 function init_docker_swarm() {
880 if [ "${DEFAULT_MTU}" != "1500" ]; then
881 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
882 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
883 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
884 fi
885 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
886 return 0
887 }
888
889 function create_docker_network() {
890 echo "creating network"
891 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
892 echo "creating network DONE"
893 }
894
895 function deploy_lightweight() {
896
897 echo "Deploying lightweight build"
898 OSM_NBI_PORT=9999
899 OSM_RO_PORT=9090
900 OSM_KEYSTONE_PORT=5000
901 OSM_UI_PORT=80
902 OSM_MON_PORT=8662
903 OSM_PROM_PORT=9090
904 OSM_PROM_CADVISOR_PORT=8080
905 OSM_PROM_HOSTPORT=9091
906 OSM_GRAFANA_PORT=3000
907 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
908 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
909
910 if [ -n "$NO_HOST_PORTS" ]; then
911 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
912 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
913 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
914 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
915 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
916 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
917 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
918 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
919 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
920 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
921 else
922 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
923 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
924 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
925 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
926 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
927 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
928 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
929 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
930 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
931 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
932 fi
933 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
934 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
935 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
936 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
937 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
938 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
939 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
940 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
941 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
942
943 pushd $OSM_DOCKER_WORK_DIR
944 if [ -n "$INSTALL_PLA" ]; then
945 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
946 else
947 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
948 fi
949 popd
950
951 echo "Finished deployment of lightweight build"
952 }
953
954 function deploy_elk() {
955 echo "Pulling docker images for ELK"
956 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
957 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
958 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
959 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
960 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
961 echo "Finished pulling elk docker images"
962 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
963 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
964 remove_stack osm_elk
965 echo "Deploying ELK stack"
966 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
967 echo "Waiting for ELK stack to be up and running"
968 time=0
969 step=5
970 timelength=40
971 elk_is_up=1
972 while [ $time -le $timelength ]; do
973 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
974 elk_is_up=0
975 break
976 fi
977 sleep $step
978 time=$((time+step))
979 done
980 if [ $elk_is_up -eq 0 ]; then
981 echo "ELK is up and running. Trying to create index pattern..."
982 #Create index pattern
983 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
984 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
985 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
986 #Make it the default index
987 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
988 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
989 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
990 else
991 echo "Cannot connect to Kibana to create index pattern."
992 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
993 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
994 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
995 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
996 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
997 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
998 -d"{\"value\":\"filebeat-*\"}"'
999 fi
1000 echo "Finished deployment of ELK stack"
1001 return 0
1002 }
1003
1004 function add_local_k8scluster() {
1005 /usr/bin/osm --all-projects vim-create \
1006 --name _system-osm-vim \
1007 --account_type dummy \
1008 --auth_url http://dummy \
1009 --user osm --password osm --tenant osm \
1010 --description "dummy" \
1011 --config '{management_network_name: mgmt}'
1012 /usr/bin/osm --all-projects k8scluster-add \
1013 --creds ${HOME}/.kube/config \
1014 --vim _system-osm-vim \
1015 --k8s-nets '{"net1": null}' \
1016 --version '1.15' \
1017 --description "OSM Internal Cluster" \
1018 _system-osm-k8s
1019 }
1020
1021 function install_lightweight() {
1022 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1023 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1024 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1025 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1026 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1027
1028 track checkingroot
1029 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1030 track noroot
1031
1032 if [ -n "$KUBERNETES" ]; then
1033 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1034 1. Install and configure LXD
1035 2. Install juju
1036 3. Install docker CE
1037 4. Disable swap space
1038 5. Install and initialize Kubernetes
1039 as pre-requirements.
1040 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1041
1042 else
1043 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1044 fi
1045 track proceed
1046
1047 echo "Installing lightweight build of OSM"
1048 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1049 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1050 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1051 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1052 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1053 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1054 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1055 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1056
1057 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1058 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1059 need_packages_lw="snapd"
1060 echo -e "Checking required packages: $need_packages_lw"
1061 dpkg -l $need_packages_lw &>/dev/null \
1062 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1063 || sudo apt-get update \
1064 || FATAL "failed to run apt-get update"
1065 dpkg -l $need_packages_lw &>/dev/null \
1066 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1067 || sudo apt-get install -y $need_packages_lw \
1068 || FATAL "failed to install $need_packages_lw"
1069 install_lxd
1070 fi
1071
1072 track prereqok
1073
1074 [ -z "$INSTALL_NOJUJU" ] && install_juju
1075 track juju_install
1076
1077 if [ -z "$OSM_VCA_HOST" ]; then
1078 if [ -z "$CONTROLLER_NAME" ]; then
1079 if [ -n "$LXD_CLOUD_FILE" ]; then
1080 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1081 OSM_VCA_CLOUDNAME="lxd-cloud"
1082 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1083 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1084 fi
1085 juju_createcontroller
1086 else
1087 OSM_VCA_CLOUDNAME="lxd-cloud"
1088 if [ -n "$LXD_CLOUD_FILE" ]; then
1089 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1090 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1091 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1092 else
1093 mkdir -p ~/.osm
1094 cat << EOF > ~/.osm/lxd-cloud.yaml
1095 clouds:
1096 lxd-cloud:
1097 type: lxd
1098 auth-types: [certificate]
1099 endpoint: "https://$DEFAULT_IP:8443"
1100 config:
1101 ssl-hostname-verification: false
1102 EOF
1103 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1104 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1105 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1106 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1107 cat << EOF > ~/.osm/lxd-credentials.yaml
1108 credentials:
1109 lxd-cloud:
1110 lxd-cloud:
1111 auth-type: certificate
1112 server-cert: |
1113 $server_cert
1114 client-cert: |
1115 $client_cert
1116 client-key: |
1117 $client_key
1118 EOF
1119 lxc config trust add local: ~/.osm/client.crt
1120 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1121 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1122 fi
1123 fi
1124 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1125 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1126 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1127 fi
1128 track juju_controller
1129
1130 if [ -z "$OSM_VCA_SECRET" ]; then
1131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1133 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1134 fi
1135 if [ -z "$OSM_VCA_PUBKEY" ]; then
1136 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1137 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1138 fi
1139 if [ -z "$OSM_VCA_CACERT" ]; then
1140 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1141 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1142 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1143 fi
1144 if [ -z "$OSM_VCA_APIPROXY" ]; then
1145 OSM_VCA_APIPROXY=$DEFAULT_IP
1146 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1147 fi
1148 juju_createproxy
1149 track juju
1150
1151 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1152 OSM_DATABASE_COMMONKEY=$(generate_secret)
1153 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1154 fi
1155
1156 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1157 track docker_ce
1158
1159 #Installs Kubernetes and deploys osm services
1160 if [ -n "$KUBERNETES" ]; then
1161 install_kube
1162 track install_k8s
1163 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1164 kube_config_dir
1165 track init_k8s
1166 else
1167 #install_docker_compose
1168 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1169 track docker_swarm
1170 fi
1171
1172 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1173 track docker_build
1174
1175 generate_docker_env_files
1176
1177 if [ -n "$KUBERNETES" ]; then
1178 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1179 # uninstall OSM MONITORING
1180 uninstall_k8s_monitoring
1181 track uninstall_k8s_monitoring
1182 fi
1183 #remove old namespace
1184 remove_k8s_namespace $OSM_STACK_NAME
1185 deploy_cni_provider
1186 kube_secrets
1187 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1188 namespace_vol
1189 taint_master_node
1190 deploy_osm_services
1191 if [ -n "$INSTALL_PLA"]; then
1192 # optional PLA install
1193 deploy_osm_pla_service
1194 fi
1195 track deploy_osm_services_k8s
1196 install_k8s_storageclass
1197 track k8s_storageclass
1198 juju_addk8s
1199 track juju_addk8s
1200 install_helm
1201 track install_helm
1202 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1203 # install OSM MONITORING
1204 install_k8s_monitoring
1205 track install_k8s_monitoring
1206 fi
1207 else
1208 # remove old stack
1209 remove_stack $OSM_STACK_NAME
1210 create_docker_network
1211 deploy_lightweight
1212 generate_osmclient_script
1213 track docker_deploy
1214 install_prometheus_nodeexporter
1215 track nodeexporter
1216 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1217 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1218 fi
1219
1220 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1221 track osmclient
1222
1223 echo -e "Checking OSM health state..."
1224 if [ -n "$KUBERNETES" ]; then
1225 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1226 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1227 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1228 track osm_unhealthy
1229 else
1230 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1231 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1232 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1233 track osm_unhealthy
1234 fi
1235 track after_healthcheck
1236
1237 [ -n "$KUBERNETES" ] && add_local_k8scluster
1238 track add_local_k8scluster
1239
1240 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1241 track end
1242 return 0
1243 }
1244
1245 function install_to_openstack() {
1246
1247 if [ -z "$2" ]; then
1248 FATAL "OpenStack installer requires a valid external network name"
1249 fi
1250
1251 # Install Pip for Python3
1252 $WORKDIR_SUDO apt install -y python3-pip
1253 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1254
1255 # Install Ansible, OpenStack client and SDK
1256 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1257
1258 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1259
1260 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1261
1262 # Execute the Ansible playbook based on openrc or clouds.yaml
1263 if [ -e "$1" ]; then
1264 . $1
1265 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1266 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1267 else
1268 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1269 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1270 fi
1271
1272 return 0
1273 }
1274
1275 function install_vimemu() {
1276 echo "\nInstalling vim-emu"
1277 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1278 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1279 # install prerequisites (OVS is a must for the emulator to work)
1280 sudo apt-get install openvswitch-switch
1281 # clone vim-emu repository (attention: branch is currently master only)
1282 echo "Cloning vim-emu repository ..."
1283 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1284 # build vim-emu docker
1285 echo "Building vim-emu Docker container..."
1286
1287 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1288 # start vim-emu container as daemon
1289 echo "Starting vim-emu Docker container 'vim-emu' ..."
1290 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1291 # in lightweight mode, the emulator needs to be attached to netOSM
1292 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1293 else
1294 # classic build mode
1295 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1296 fi
1297 echo "Waiting for 'vim-emu' container to start ..."
1298 sleep 5
1299 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1300 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1301 # print vim-emu connection info
1302 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1303 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1304 echo -e "To add the emulated VIM to OSM you should do:"
1305 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1306 }
1307
1308 function install_k8s_monitoring() {
1309 # install OSM monitoring
1310 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1311 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1312 }
1313
1314 function uninstall_k8s_monitoring() {
1315 # uninstall OSM monitoring
1316 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1317 }
1318
1319 function dump_vars(){
1320 echo "DEVELOP=$DEVELOP"
1321 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1322 echo "UNINSTALL=$UNINSTALL"
1323 echo "UPDATE=$UPDATE"
1324 echo "RECONFIGURE=$RECONFIGURE"
1325 echo "TEST_INSTALLER=$TEST_INSTALLER"
1326 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1327 echo "INSTALL_PLA=$INSTALL_PLA"
1328 echo "INSTALL_LXD=$INSTALL_LXD"
1329 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1330 echo "INSTALL_ONLY=$INSTALL_ONLY"
1331 echo "INSTALL_ELK=$INSTALL_ELK"
1332 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1333 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1334 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1335 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1336 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1337 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1338 echo "TO_REBUILD=$TO_REBUILD"
1339 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1340 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1341 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1342 echo "RELEASE=$RELEASE"
1343 echo "REPOSITORY=$REPOSITORY"
1344 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1345 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1346 echo "OSM_DEVOPS=$OSM_DEVOPS"
1347 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1348 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1349 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1350 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1351 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1352 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1353 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1354 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1355 echo "DOCKER_USER=$DOCKER_USER"
1356 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1357 echo "PULL_IMAGES=$PULL_IMAGES"
1358 echo "KUBERNETES=$KUBERNETES"
1359 echo "NGUI=$NGUI"
1360 echo "SHOWOPTS=$SHOWOPTS"
1361 echo "Install from specific refspec (-b): $COMMIT_ID"
1362 }
1363
1364 function track(){
1365 ctime=`date +%s`
1366 duration=$((ctime - SESSION_ID))
1367 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1368 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1369 event_name="bin"
1370 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1371 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1372 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1373 event_name="${event_name}_$1"
1374 url="${url}&event=${event_name}&ce_duration=${duration}"
1375 wget -q -O /dev/null $url
1376 }
1377
1378 UNINSTALL=""
1379 DEVELOP=""
1380 UPDATE=""
1381 RECONFIGURE=""
1382 TEST_INSTALLER=""
1383 INSTALL_LXD=""
1384 SHOWOPTS=""
1385 COMMIT_ID=""
1386 ASSUME_YES=""
1387 INSTALL_FROM_SOURCE=""
1388 RELEASE="ReleaseEIGHT"
1389 REPOSITORY="stable"
1390 INSTALL_VIMEMU=""
1391 INSTALL_PLA=""
1392 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1393 LXD_REPOSITORY_PATH=""
1394 INSTALL_LIGHTWEIGHT="y"
1395 INSTALL_TO_OPENSTACK=""
1396 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1397 OPENSTACK_PUBLIC_NET_NAME=""
1398 OPENSTACK_ATTACH_VOLUME="false"
1399 INSTALL_ONLY=""
1400 INSTALL_ELK=""
1401 TO_REBUILD=""
1402 INSTALL_NOLXD=""
1403 INSTALL_NODOCKER=""
1404 INSTALL_NOJUJU=""
1405 KUBERNETES=""
1406 NGUI=""
1407 INSTALL_K8S_MONITOR=""
1408 INSTALL_NOHOSTCLIENT=""
1409 SESSION_ID=`date +%s`
1410 OSM_DEVOPS=
1411 OSM_VCA_HOST=
1412 OSM_VCA_SECRET=
1413 OSM_VCA_PUBKEY=
1414 OSM_VCA_CLOUDNAME="localhost"
1415 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1416 OSM_STACK_NAME=osm
1417 NO_HOST_PORTS=""
1418 DOCKER_NOBUILD=""
1419 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1420 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1421 WORKDIR_SUDO=sudo
1422 OSM_WORK_DIR="/etc/osm"
1423 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1424 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1425 OSM_HOST_VOL="/var/lib/osm"
1426 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1427 OSM_DOCKER_TAG=latest
1428 DOCKER_USER=opensourcemano
1429 PULL_IMAGES="y"
1430 KAFKA_TAG=2.11-1.0.2
1431 PROMETHEUS_TAG=v2.4.3
1432 GRAFANA_TAG=latest
1433 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1434 PROMETHEUS_CADVISOR_TAG=latest
1435 KEYSTONEDB_TAG=10
1436 OSM_DATABASE_COMMONKEY=
1437 ELASTIC_VERSION=6.4.2
1438 ELASTIC_CURATOR_VERSION=5.5.4
1439 POD_NETWORK_CIDR=10.244.0.0/16
1440 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1441 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1442
1443 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1444 case "${o}" in
1445 b)
1446 COMMIT_ID=${OPTARG}
1447 PULL_IMAGES=""
1448 ;;
1449 r)
1450 REPOSITORY="${OPTARG}"
1451 REPO_ARGS+=(-r "$REPOSITORY")
1452 ;;
1453 c)
1454 [ "${OPTARG}" == "swarm" ] && continue
1455 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1456 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1457 usage && exit 1
1458 ;;
1459 n)
1460 [ "${OPTARG}" == "lwui" ] && continue
1461 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1462 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1463 usage && exit 1
1464 ;;
1465 k)
1466 REPOSITORY_KEY="${OPTARG}"
1467 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1468 ;;
1469 u)
1470 REPOSITORY_BASE="${OPTARG}"
1471 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1472 ;;
1473 R)
1474 RELEASE="${OPTARG}"
1475 REPO_ARGS+=(-R "$RELEASE")
1476 ;;
1477 D)
1478 OSM_DEVOPS="${OPTARG}"
1479 ;;
1480 o)
1481 INSTALL_ONLY="y"
1482 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1483 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1484 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1485 ;;
1486 O)
1487 INSTALL_TO_OPENSTACK="y"
1488 if [ -n "${OPTARG}" ]; then
1489 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1490 else
1491 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1492 usage && exit 1
1493 fi
1494 ;;
1495 N)
1496 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1497 ;;
1498 m)
1499 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1500 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1501 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1502 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1503 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1504 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1505 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1506 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1507 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1508 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1509 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1510 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1511 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1512 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1513 ;;
1514 H)
1515 OSM_VCA_HOST="${OPTARG}"
1516 ;;
1517 S)
1518 OSM_VCA_SECRET="${OPTARG}"
1519 ;;
1520 s)
1521 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1522 ;;
1523 w)
1524 # when specifying workdir, do not use sudo for access
1525 WORKDIR_SUDO=
1526 OSM_WORK_DIR="${OPTARG}"
1527 ;;
1528 t)
1529 OSM_DOCKER_TAG="${OPTARG}"
1530 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1531 ;;
1532 U)
1533 DOCKER_USER="${OPTARG}"
1534 ;;
1535 P)
1536 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1537 ;;
1538 A)
1539 OSM_VCA_APIPROXY="${OPTARG}"
1540 ;;
1541 l)
1542 LXD_CLOUD_FILE="${OPTARG}"
1543 ;;
1544 L)
1545 LXD_CRED_FILE="${OPTARG}"
1546 ;;
1547 K)
1548 CONTROLLER_NAME="${OPTARG}"
1549 ;;
1550 -)
1551 [ "${OPTARG}" == "help" ] && usage && exit 0
1552 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1553 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1554 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1555 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1556 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1557 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1558 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1559 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1560 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1561 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1562 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1563 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1564 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1565 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1566 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1567 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1568 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1569 [ "${OPTARG}" == "pullimages" ] && continue
1570 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1571 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1572 [ "${OPTARG}" == "bundle" ] && continue
1573 [ "${OPTARG}" == "k8s" ] && continue
1574 [ "${OPTARG}" == "lxd" ] && continue
1575 [ "${OPTARG}" == "lxd-cred" ] && continue
1576 [ "${OPTARG}" == "microstack" ] && continue
1577 [ "${OPTARG}" == "ha" ] && continue
1578 [ "${OPTARG}" == "tag" ] && continue
1579 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1580 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1581 echo -e "Invalid option: '--$OPTARG'\n" >&2
1582 usage && exit 1
1583 ;;
1584 :)
1585 echo "Option -$OPTARG requires an argument" >&2
1586 usage && exit 1
1587 ;;
1588 \?)
1589 echo -e "Invalid option: '-$OPTARG'\n" >&2
1590 usage && exit 1
1591 ;;
1592 h)
1593 usage && exit 0
1594 ;;
1595 y)
1596 ASSUME_YES="y"
1597 ;;
1598 *)
1599 usage && exit 1
1600 ;;
1601 esac
1602 done
1603
1604 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1605 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1606
1607 if [ -n "$SHOWOPTS" ]; then
1608 dump_vars
1609 exit 0
1610 fi
1611
1612 if [ -n "$CHARMED" ]; then
1613 if [ -n "$UNINSTALL" ]; then
1614 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1615 else
1616 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1617
1618 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1619 echo
1620 echo "1. Get the NBI IP with the following command:"
1621 echo
1622 echo NBI_IP='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1623 echo
1624 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1625 echo
1626 echo "export OSM_HOSTNAME=\$NBI_IP"
1627 echo
1628 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1629 echo
1630 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1631 echo
1632 echo "DONE"
1633 fi
1634
1635 exit 0
1636 fi
1637
1638 # if develop, we force master
1639 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1640
1641 need_packages="git wget curl tar"
1642
1643 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1644
1645 echo -e "Checking required packages: $need_packages"
1646 dpkg -l $need_packages &>/dev/null \
1647 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1648 || sudo apt-get update \
1649 || FATAL "failed to run apt-get update"
1650 dpkg -l $need_packages &>/dev/null \
1651 || ! echo -e "Installing $need_packages requires root privileges." \
1652 || sudo apt-get install -y $need_packages \
1653 || FATAL "failed to install $need_packages"
1654 sudo snap install jq
1655 if [ -z "$OSM_DEVOPS" ]; then
1656 if [ -n "$TEST_INSTALLER" ]; then
1657 echo -e "\nUsing local devops repo for OSM installation"
1658 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1659 else
1660 echo -e "\nCreating temporary dir for OSM installation"
1661 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1662 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1663
1664 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1665
1666 if [ -z "$COMMIT_ID" ]; then
1667 echo -e "\nGuessing the current stable release"
1668 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1669 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1670
1671 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1672 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1673 else
1674 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1675 fi
1676 git -C $OSM_DEVOPS checkout $COMMIT_ID
1677 fi
1678 fi
1679
1680 . $OSM_DEVOPS/common/all_funcs
1681
1682 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1683 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1684 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1685 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1686 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1687 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1688
1689 #Installation starts here
1690 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README.txt &> /dev/null
1691 track start
1692
1693 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1694 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1695 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1696 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1697 fi
1698
1699 echo -e "Checking required packages: lxd"
1700 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1701 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1702
1703 # use local devops for containers
1704 export OSM_USE_LOCAL_DEVOPS=true
1705
1706 #Install osmclient
1707
1708 #Install vim-emu (optional)
1709 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1710
1711 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1712 track end
1713 echo -e "\nDONE"
1714
1715