f268618dc7c282503d26b0ba08dac117a41870ad
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
76
77 }
78
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password {
83 password_file="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name=$1
85 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller=$controller_name '{
90 indent = length($1)/2;
91 vname[indent] = $2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
93 if (length($3) > 0) {
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
96 printf("%s",$3);
97 }
98 }
99 }'
100 }
101
102 function generate_secret() {
103 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
104 }
105
106 function remove_volumes() {
107 if [ -n "$KUBERNETES" ]; then
108 k8_volume=$1
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
111 else
112 stack=$1
113 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume in $volumes; do
115 sg docker -c "docker volume rm ${stack}_${volume}"
116 done
117 fi
118 }
119
120 function remove_network() {
121 stack=$1
122 sg docker -c "docker network rm net${stack}"
123 }
124
125 function remove_iptables() {
126 stack=$1
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
130 fi
131
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
138 fi
139
140 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
141 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
143 fi
144 }
145
146 function remove_stack() {
147 stack=$1
148 if sg docker -c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
150 COUNTER=0
151 result=1
152 while [ ${COUNTER} -lt 30 ]; do
153 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
156 break
157 fi
158 let COUNTER=COUNTER+1
159 sleep 1
160 done
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
163 else
164 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
165 fi
166 sleep 5
167 fi
168 }
169
170 #removes osm deployments and services
171 function remove_k8s_namespace() {
172 kubectl delete ns $1
173 }
174
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm reset --force
179 kubectl delete --namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo rm /usr/local/bin/helm
182 rm -rf $HOME/.helm
183 fi
184 }
185
186 #Uninstall osmclient
187 function uninstall_osmclient() {
188 sudo apt-get remove --purge -y python-osmclient
189 sudo apt-get remove --purge -y python3-osmclient
190 }
191
192 #Uninstall lightweight OSM: remove dockers
193 function uninstall_lightweight() {
194 if [ -n "$INSTALL_ONLY" ]; then
195 if [ -n "$INSTALL_ELK" ]; then
196 echo -e "\nUninstalling OSM ELK stack"
197 remove_stack osm_elk
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
199 fi
200 else
201 echo -e "\nUninstalling OSM"
202 if [ -n "$KUBERNETES" ]; then
203 if [ -n "$INSTALL_K8S_MONITOR" ]; then
204 # uninstall OSM MONITORING
205 uninstall_k8s_monitoring
206 fi
207 remove_k8s_namespace $OSM_STACK_NAME
208 else
209 remove_stack $OSM_STACK_NAME
210 remove_stack osm_elk
211 fi
212 echo "Now osm docker images and volumes will be deleted"
213 newgrp docker << EONG
214 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
215 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
222 EONG
223
224 if [ -n "$NGUI" ]; then
225 sg docker -c "docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
226 else
227 sg docker -c "docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
228 fi
229
230 if [ -n "$KUBERNETES" ]; then
231 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
232 remove_volumes $OSM_NAMESPACE_VOL
233 else
234 remove_volumes $OSM_STACK_NAME
235 remove_network $OSM_STACK_NAME
236 fi
237 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
238 echo "Removing $OSM_DOCKER_WORK_DIR"
239 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
240 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
241 fi
242 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
243 echo "Some docker images will be kept in case they are used by other docker stacks"
244 echo "To remove them, just run 'docker image prune' in a terminal"
245 return 0
246 }
247
248 #Safe unattended install of iptables-persistent
249 function check_install_iptables_persistent(){
250 echo -e "\nChecking required packages: iptables-persistent"
251 if ! dpkg -l iptables-persistent &>/dev/null; then
252 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
253 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
254 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
255 sudo apt-get -yq install iptables-persistent
256 fi
257 }
258
259 #Configure NAT rules, based on the current IP addresses of containers
260 function nat(){
261 check_install_iptables_persistent
262
263 echo -e "\nConfiguring NAT rules"
264 echo -e " Required root privileges"
265 sudo $OSM_DEVOPS/installers/nat_osm
266 }
267
268 function FATAL(){
269 echo "FATAL error: Cannot install OSM due to \"$1\""
270 exit 1
271 }
272
273 function install_lxd() {
274 # Apply sysctl production values for optimal performance
275 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
276 sudo sysctl --system
277
278 # Install LXD snap
279 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
280 sudo snap install lxd
281 sudo apt-get install zfsutils-linux -y
282
283 # Configure LXD
284 sudo usermod -a -G lxd `whoami`
285 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
286 sg lxd -c "lxd waitready"
287 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
288 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
289 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
290 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
291 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
292 #sudo systemctl stop lxd-bridge
293 #sudo systemctl --system daemon-reload
294 #sudo systemctl enable lxd-bridge
295 #sudo systemctl start lxd-bridge
296 }
297
298 function ask_user(){
299 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
300 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
301 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
302 read -e -p "$1" USER_CONFIRMATION
303 while true ; do
304 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
305 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
306 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
307 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
308 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
309 done
310 }
311
312 function install_osmclient(){
313 CLIENT_RELEASE=${RELEASE#"-R "}
314 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
315 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
316 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
317 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
318 curl $key_location | sudo apt-key add -
319 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
320 sudo apt-get update
321 sudo apt-get install -y python3-pip
322 sudo -H LC_ALL=C python3 -m pip install -U pip
323 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
324 sudo apt-get install -y python3-osm-im python3-osmclient
325 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
326 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
327 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
328 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
329 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
330 echo -e "\nOSM client installed"
331 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
332 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
333 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
334 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
335 else
336 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
337 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
338 echo " export OSM_HOSTNAME=<OSM_host>"
339 fi
340 return 0
341 }
342
343 function install_prometheus_nodeexporter(){
344 if (systemctl -q is-active node_exporter)
345 then
346 echo "Node Exporter is already running."
347 else
348 echo "Node Exporter is not active, installing..."
349 if getent passwd node_exporter > /dev/null 2>&1; then
350 echo "node_exporter user exists"
351 else
352 echo "Creating user node_exporter"
353 sudo useradd --no-create-home --shell /bin/false node_exporter
354 fi
355 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
356 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
357 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
358 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
359 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
360 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
361 sudo systemctl daemon-reload
362 sudo systemctl restart node_exporter
363 sudo systemctl enable node_exporter
364 echo "Node Exporter has been activated in this host."
365 fi
366 return 0
367 }
368
369 function uninstall_prometheus_nodeexporter(){
370 sudo systemctl stop node_exporter
371 sudo systemctl disable node_exporter
372 sudo rm /etc/systemd/system/node_exporter.service
373 sudo systemctl daemon-reload
374 sudo userdel node_exporter
375 sudo rm /usr/local/bin/node_exporter
376 return 0
377 }
378
379 function install_docker_ce() {
380 # installs and configures Docker CE
381 echo "Installing Docker CE ..."
382 sudo apt-get -qq update
383 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
384 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
385 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
386 sudo apt-get -qq update
387 sudo apt-get install -y docker-ce
388 echo "Adding user to group 'docker'"
389 sudo groupadd -f docker
390 sudo usermod -aG docker $USER
391 sleep 2
392 sudo service docker restart
393 echo "... restarted Docker service"
394 sg docker -c "docker version" || FATAL "Docker installation failed"
395 echo "... Docker CE installation done"
396 return 0
397 }
398
399 function install_docker_compose() {
400 # installs and configures docker-compose
401 echo "Installing Docker Compose ..."
402 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
403 sudo chmod +x /usr/local/bin/docker-compose
404 echo "... Docker Compose installation done"
405 }
406
407 function install_juju() {
408 echo "Installing juju"
409 sudo snap install juju --classic --channel=2.8/stable
410 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
411 echo "Finished installation of juju"
412 return 0
413 }
414
415 function juju_createcontroller() {
416 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
417 # Not found created, create the controller
418 sudo usermod -a -G lxd ${USER}
419 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
420 fi
421 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
422 juju controller-config features=[k8s-operators]
423 }
424
425 function juju_addk8s() {
426 cat .kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
427 }
428
429 function juju_createproxy() {
430 check_install_iptables_persistent
431
432 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
433 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
434 sudo netfilter-persistent save
435 fi
436 }
437
438 function generate_docker_images() {
439 echo "Pulling and generating docker images"
440 _build_from=$COMMIT_ID
441 [ -z "$_build_from" ] && _build_from="master"
442
443 echo "OSM Docker images generated from $_build_from"
444
445 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
446 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
447 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
448 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
449
450 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
451 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
452 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
453 fi
454
455 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
456 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
457 fi
458
459 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
460 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
461 fi
462
463 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
464 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
465 fi
466
467 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
468 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
469 fi
470
471 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
472 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
473 fi
474
475 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
476 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
477 fi
478
479 if [ -n "$PULL_IMAGES" ]; then
480 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
481 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
482 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
483 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
484 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
485 fi
486
487 if [ -n "$PULL_IMAGES" ]; then
488 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
489 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
490 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
491 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
492 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
493 fi
494
495 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
496 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
497 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
498 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
499 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
500 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
501 fi
502
503 if [ -n "$PULL_IMAGES" ]; then
504 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
505 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
506 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
507 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
508 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
509 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
510 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
511 fi
512
513 if [ -n "$PULL_IMAGES" ]; then
514 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
515 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
516 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
517 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
518 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
519 fi
520
521 if [ -n "$PULL_IMAGES" ]; then
522 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
523 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
524 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
525 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
526 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
527 fi
528
529 if [ -n "$NGUI" ]; then
530 if [ -n "$PULL_IMAGES" ]; then
531 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
532 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
533 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
534 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
535 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
536 fi
537 else
538 if [ -n "$PULL_IMAGES" ]; then
539 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
540 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
541 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
542 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
543 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
544 fi
545 fi
546
547 if [ -n "$PULL_IMAGES" ]; then
548 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
549 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
550 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
551 fi
552
553 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
554 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
555 fi
556
557 echo "Finished generation of docker images"
558 }
559
560 function cmp_overwrite() {
561 file1="$1"
562 file2="$2"
563 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
564 if [ -f "${file2}" ]; then
565 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
566 else
567 cp -b ${file1} ${file2}
568 fi
569 fi
570 }
571
572 function generate_docker_env_files() {
573 echo "Doing a backup of existing env files"
574 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
575 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
576 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
577 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
578 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
579 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
580 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
581 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
582 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
583
584 echo "Generating docker env files"
585 if [ -n "$KUBERNETES" ]; then
586 #Kubernetes resources
587 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
588 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
589 else
590 if [ -n "$NGUI" ]; then
591 # For NG-UI
592 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
593 else
594 # Docker-compose
595 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
596 fi
597 if [ -n "$INSTALL_PLA" ]; then
598 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
599 fi
600
601 # Prometheus files
602 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
603 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
604
605 # Grafana files
606 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
607 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
608 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
609 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
610 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
611
612 # Prometheus Exporters files
613 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
614 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
615 fi
616
617 # LCM
618 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
619 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
620 fi
621
622 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
623 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
624 else
625 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
626 fi
627
628 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
629 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
630 else
631 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
632 fi
633
634 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
635 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
636 else
637 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
638 fi
639
640 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
641 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
642 else
643 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
644 fi
645
646 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
647 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
648 else
649 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
650 fi
651
652 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
653 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
654 fi
655
656 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
657 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
658 fi
659
660 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
661 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
662 else
663 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
664 fi
665
666 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
667 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
668 else
669 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
670 fi
671
672 # RO
673 MYSQL_ROOT_PASSWORD=$(generate_secret)
674 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
675 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
676 fi
677 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
678 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
679 fi
680
681 # Keystone
682 KEYSTONE_DB_PASSWORD=$(generate_secret)
683 SERVICE_PASSWORD=$(generate_secret)
684 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
685 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
686 fi
687 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
688 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
689 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
690 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
691 fi
692
693 # NBI
694 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
695 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
696 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
697 fi
698
699 # MON
700 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
701 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
702 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
703 fi
704
705 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
706 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
707 else
708 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
709 fi
710
711 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
712 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
713 else
714 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
715 fi
716
717 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
718 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
719 else
720 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
721 fi
722
723 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
724 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
725 else
726 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
727 fi
728
729
730 # POL
731 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
732 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
733 fi
734
735 # LW-UI
736 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
737 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
738 fi
739
740 echo "Finished generation of docker env files"
741 }
742
743 function generate_osmclient_script () {
744 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
745 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
746 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
747 }
748
749 #installs kubernetes packages
750 function install_kube() {
751 sudo apt-get update && sudo apt-get install -y apt-transport-https
752 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
753 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
754 sudo apt-get update
755 echo "Installing Kubernetes Packages ..."
756 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
757 }
758
759 #initializes kubernetes control plane
760 function init_kubeadm() {
761 sudo swapoff -a
762 sudo kubeadm init --config $1
763 sleep 5
764 }
765
766 function kube_config_dir() {
767 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
768 mkdir -p $HOME/.kube
769 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
770 sudo chown $(id -u):$(id -g) $HOME/.kube/config
771 }
772
773 function install_k8s_storageclass() {
774 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
775 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
776 }
777
778 #deploys flannel as daemonsets
779 function deploy_cni_provider() {
780 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
781 trap 'rm -rf "${CNI_DIR}"' EXIT
782 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
783 kubectl apply -f $CNI_DIR
784 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
785 }
786
787 #creates secrets from env files which will be used by containers
788 function kube_secrets(){
789 kubectl create ns $OSM_STACK_NAME
790 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
791 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
792 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
793 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
794 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
795 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
796 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
797 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
798 }
799
800 #taints K8s master node
801 function taint_master_node() {
802 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
803 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
804 sleep 5
805 }
806
807 #deploys osm pods and services
808 function deploy_osm_services() {
809 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
810 }
811
812 function deploy_osm_pla_service() {
813 # corresponding to parse_yaml
814 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
815 # corresponding to namespace_vol
816 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
817 # corresponding to deploy_osm_services
818 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
819 }
820
821 #Install helm and tiller
822 function install_helm() {
823 helm > /dev/null 2>&1
824 if [ $? != 0 ] ; then
825 # Helm is not installed. Install helm
826 echo "Helm is not installed, installing ..."
827 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
828 tar -zxvf helm-v2.15.2.tar.gz
829 sudo mv linux-amd64/helm /usr/local/bin/helm
830 rm -r linux-amd64
831 rm helm-v2.15.2.tar.gz
832 fi
833
834 # Checking if tiller has being configured
835 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
836 if [ $? == 1 ] ; then
837 # tiller account for kubernetes
838 kubectl --namespace kube-system create serviceaccount tiller
839 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
840 # HELM initialization
841 helm init --service-account tiller
842
843 # Wait for Tiller to be up and running. If timeout expires, continue installing
844 tiller_timeout=120;
845 counter=0;
846 tiller_status=""
847 while (( counter < tiller_timeout ))
848 do
849 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
850 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
851 counter=$((counter + 5))
852 sleep 5
853 done
854 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
855 fi
856 }
857
858 function parse_yaml() {
859 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
860 TAG=$1
861 for osm in $osm_services; do
862 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
863 done
864 }
865
866 function namespace_vol() {
867 osm_services="nbi lcm ro pol mon kafka mongo mysql"
868 for osm in $osm_services; do
869 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
870 done
871 }
872
873 function init_docker_swarm() {
874 if [ "${DEFAULT_MTU}" != "1500" ]; then
875 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
876 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
877 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
878 fi
879 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
880 return 0
881 }
882
883 function create_docker_network() {
884 echo "creating network"
885 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
886 echo "creating network DONE"
887 }
888
889 function deploy_lightweight() {
890
891 echo "Deploying lightweight build"
892 OSM_NBI_PORT=9999
893 OSM_RO_PORT=9090
894 OSM_KEYSTONE_PORT=5000
895 OSM_UI_PORT=80
896 OSM_MON_PORT=8662
897 OSM_PROM_PORT=9090
898 OSM_PROM_CADVISOR_PORT=8080
899 OSM_PROM_HOSTPORT=9091
900 OSM_GRAFANA_PORT=3000
901 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
902 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
903
904 if [ -n "$NO_HOST_PORTS" ]; then
905 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
906 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
907 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
908 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
909 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
910 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
911 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
912 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
913 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
914 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
915 else
916 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
917 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
918 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
919 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
920 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
921 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
922 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
923 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
924 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
925 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
926 fi
927 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
928 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
929 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
930 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
931 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
932 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
933 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
934 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
935 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
936
937 pushd $OSM_DOCKER_WORK_DIR
938 if [ -n "$INSTALL_PLA" ]; then
939 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
940 else
941 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
942 fi
943 popd
944
945 echo "Finished deployment of lightweight build"
946 }
947
948 function deploy_elk() {
949 echo "Pulling docker images for ELK"
950 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
951 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
952 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
953 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
954 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
955 echo "Finished pulling elk docker images"
956 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
957 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
958 remove_stack osm_elk
959 echo "Deploying ELK stack"
960 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
961 echo "Waiting for ELK stack to be up and running"
962 time=0
963 step=5
964 timelength=40
965 elk_is_up=1
966 while [ $time -le $timelength ]; do
967 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
968 elk_is_up=0
969 break
970 fi
971 sleep $step
972 time=$((time+step))
973 done
974 if [ $elk_is_up -eq 0 ]; then
975 echo "ELK is up and running. Trying to create index pattern..."
976 #Create index pattern
977 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
978 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
979 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
980 #Make it the default index
981 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
982 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
983 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
984 else
985 echo "Cannot connect to Kibana to create index pattern."
986 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
987 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
988 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
989 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
990 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
991 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
992 -d"{\"value\":\"filebeat-*\"}"'
993 fi
994 echo "Finished deployment of ELK stack"
995 return 0
996 }
997
998 function add_local_k8scluster() {
999 /usr/bin/osm --all-projects vim-create \
1000 --name _system-osm-vim \
1001 --account_type dummy \
1002 --auth_url http://dummy \
1003 --user osm --password osm --tenant osm \
1004 --description "dummy" \
1005 --config '{management_network_name: mgmt}'
1006 /usr/bin/osm --all-projects k8scluster-add \
1007 --creds ${HOME}/.kube/config \
1008 --vim _system-osm-vim \
1009 --k8s-nets '{"net1": null}' \
1010 --version '1.15' \
1011 --description "OSM Internal Cluster" \
1012 _system-osm-k8s
1013 }
1014
1015 function install_lightweight() {
1016 track checkingroot
1017 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1018 track noroot
1019
1020 if [ -n "$KUBERNETES" ]; then
1021 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1022 1. Install and configure LXD
1023 2. Install juju
1024 3. Install docker CE
1025 4. Disable swap space
1026 5. Install and initialize Kubernetes
1027 as pre-requirements.
1028 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1029
1030 else
1031 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1032 fi
1033 track proceed
1034
1035 echo "Installing lightweight build of OSM"
1036 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1037 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1038 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1039 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1040 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1041 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1042 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1043 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1044
1045 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1046 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1047 need_packages_lw="snapd"
1048 echo -e "Checking required packages: $need_packages_lw"
1049 dpkg -l $need_packages_lw &>/dev/null \
1050 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1051 || sudo apt-get update \
1052 || FATAL "failed to run apt-get update"
1053 dpkg -l $need_packages_lw &>/dev/null \
1054 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1055 || sudo apt-get install -y $need_packages_lw \
1056 || FATAL "failed to install $need_packages_lw"
1057 install_lxd
1058 fi
1059
1060 track prereqok
1061
1062 [ -z "$INSTALL_NOJUJU" ] && install_juju
1063 track juju_install
1064
1065 if [ -z "$OSM_VCA_HOST" ]; then
1066 if [ -z "$CONTROLLER_NAME" ]; then
1067 if [ -n "$LXD_CLOUD_FILE" ]; then
1068 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1069 OSM_VCA_CLOUDNAME="lxd-cloud"
1070 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1071 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1072 fi
1073 juju_createcontroller
1074 else
1075 OSM_VCA_CLOUDNAME="lxd-cloud"
1076 if [ -n "$LXD_CLOUD_FILE" ]; then
1077 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1078 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1079 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1080 else
1081 mkdir -p ~/.osm
1082 cat << EOF > ~/.osm/lxd-cloud.yaml
1083 clouds:
1084 lxd-cloud:
1085 type: lxd
1086 auth-types: [certificate]
1087 endpoint: "https://$DEFAULT_IP:8443"
1088 config:
1089 ssl-hostname-verification: false
1090 EOF
1091 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1092 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1093 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1094 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1095 cat << EOF > ~/.osm/lxd-credentials.yaml
1096 credentials:
1097 lxd-cloud:
1098 lxd-cloud:
1099 auth-type: certificate
1100 server-cert: |
1101 $server_cert
1102 client-cert: |
1103 $client_cert
1104 client-key: |
1105 $client_key
1106 EOF
1107 lxc config trust add local: ~/.osm/client.crt
1108 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1109 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1110 fi
1111 fi
1112 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1113 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1114 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1115 fi
1116 track juju_controller
1117
1118 if [ -z "$OSM_VCA_SECRET" ]; then
1119 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1120 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1121 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1122 fi
1123 if [ -z "$OSM_VCA_PUBKEY" ]; then
1124 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1125 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1126 fi
1127 if [ -z "$OSM_VCA_CACERT" ]; then
1128 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1129 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1130 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1131 fi
1132 if [ -z "$OSM_VCA_APIPROXY" ]; then
1133 OSM_VCA_APIPROXY=$DEFAULT_IP
1134 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1135 fi
1136 juju_createproxy
1137 track juju
1138
1139 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1140 OSM_DATABASE_COMMONKEY=$(generate_secret)
1141 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1142 fi
1143
1144 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1145 track docker_ce
1146
1147 echo "Creating folders for installation"
1148 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1149 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1150 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1151
1152 #Installs Kubernetes and deploys osm services
1153 if [ -n "$KUBERNETES" ]; then
1154 install_kube
1155 track install_k8s
1156 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1157 kube_config_dir
1158 track init_k8s
1159 else
1160 #install_docker_compose
1161 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1162 track docker_swarm
1163 fi
1164
1165 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1166 track docker_build
1167
1168 generate_docker_env_files
1169
1170 if [ -n "$KUBERNETES" ]; then
1171 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1172 # uninstall OSM MONITORING
1173 uninstall_k8s_monitoring
1174 track uninstall_k8s_monitoring
1175 fi
1176 #remove old namespace
1177 remove_k8s_namespace $OSM_STACK_NAME
1178 deploy_cni_provider
1179 kube_secrets
1180 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1181 namespace_vol
1182 taint_master_node
1183 deploy_osm_services
1184 if [ -n "$INSTALL_PLA"]; then
1185 # optional PLA install
1186 deploy_osm_pla_service
1187 fi
1188 track deploy_osm_services_k8s
1189 install_k8s_storageclass
1190 track k8s_storageclass
1191 juju_addk8s
1192 track juju_addk8s
1193 install_helm
1194 track install_helm
1195 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1196 # install OSM MONITORING
1197 install_k8s_monitoring
1198 track install_k8s_monitoring
1199 fi
1200 else
1201 # remove old stack
1202 remove_stack $OSM_STACK_NAME
1203 create_docker_network
1204 deploy_lightweight
1205 generate_osmclient_script
1206 track docker_deploy
1207 install_prometheus_nodeexporter
1208 track nodeexporter
1209 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1210 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1211 fi
1212
1213 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1214 track osmclient
1215
1216 echo -e "Checking OSM health state..."
1217 if [ -n "$KUBERNETES" ]; then
1218 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1219 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1220 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1221 track osm_unhealthy
1222 else
1223 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1224 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1225 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1226 track osm_unhealthy
1227 fi
1228 track after_healthcheck
1229
1230 [ -n "$KUBERNETES" ] && add_local_k8scluster
1231 track add_local_k8scluster
1232
1233 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1234 track end
1235 return 0
1236 }
1237
1238 function install_to_openstack() {
1239
1240 if [ -z "$2" ]; then
1241 FATAL "OpenStack installer requires a valid external network name"
1242 fi
1243
1244 # Install Pip for Python3
1245 $WORKDIR_SUDO apt install -y python3-pip
1246 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1247
1248 # Install Ansible, OpenStack client and SDK
1249 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1250
1251 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1252
1253 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1254
1255 # Execute the Ansible playbook based on openrc or clouds.yaml
1256 if [ -e "$1" ]; then
1257 . $1
1258 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1259 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1260 else
1261 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1262 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1263 fi
1264
1265 return 0
1266 }
1267
1268 function install_vimemu() {
1269 echo "\nInstalling vim-emu"
1270 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1271 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1272 # install prerequisites (OVS is a must for the emulator to work)
1273 sudo apt-get install openvswitch-switch
1274 # clone vim-emu repository (attention: branch is currently master only)
1275 echo "Cloning vim-emu repository ..."
1276 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1277 # build vim-emu docker
1278 echo "Building vim-emu Docker container..."
1279
1280 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1281 # start vim-emu container as daemon
1282 echo "Starting vim-emu Docker container 'vim-emu' ..."
1283 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1284 # in lightweight mode, the emulator needs to be attached to netOSM
1285 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1286 else
1287 # classic build mode
1288 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1289 fi
1290 echo "Waiting for 'vim-emu' container to start ..."
1291 sleep 5
1292 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1293 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1294 # print vim-emu connection info
1295 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1296 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1297 echo -e "To add the emulated VIM to OSM you should do:"
1298 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1299 }
1300
1301 function install_k8s_monitoring() {
1302 # install OSM monitoring
1303 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1304 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1305 }
1306
1307 function uninstall_k8s_monitoring() {
1308 # uninstall OSM monitoring
1309 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1310 }
1311
1312 function dump_vars(){
1313 echo "DEVELOP=$DEVELOP"
1314 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1315 echo "UNINSTALL=$UNINSTALL"
1316 echo "UPDATE=$UPDATE"
1317 echo "RECONFIGURE=$RECONFIGURE"
1318 echo "TEST_INSTALLER=$TEST_INSTALLER"
1319 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1320 echo "INSTALL_PLA=$INSTALL_PLA"
1321 echo "INSTALL_LXD=$INSTALL_LXD"
1322 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1323 echo "INSTALL_ONLY=$INSTALL_ONLY"
1324 echo "INSTALL_ELK=$INSTALL_ELK"
1325 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1326 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1327 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1328 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1329 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1330 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1331 echo "TO_REBUILD=$TO_REBUILD"
1332 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1333 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1334 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1335 echo "RELEASE=$RELEASE"
1336 echo "REPOSITORY=$REPOSITORY"
1337 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1338 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1339 echo "OSM_DEVOPS=$OSM_DEVOPS"
1340 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1341 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1342 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1343 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1344 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1345 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1346 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1347 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1348 echo "DOCKER_USER=$DOCKER_USER"
1349 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1350 echo "PULL_IMAGES=$PULL_IMAGES"
1351 echo "KUBERNETES=$KUBERNETES"
1352 echo "NGUI=$NGUI"
1353 echo "SHOWOPTS=$SHOWOPTS"
1354 echo "Install from specific refspec (-b): $COMMIT_ID"
1355 }
1356
1357 function track(){
1358 ctime=`date +%s`
1359 duration=$((ctime - SESSION_ID))
1360 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1361 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1362 event_name="bin"
1363 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1364 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1365 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1366 event_name="${event_name}_$1"
1367 url="${url}&event=${event_name}&ce_duration=${duration}"
1368 wget -q -O /dev/null $url
1369 }
1370
1371 UNINSTALL=""
1372 DEVELOP=""
1373 UPDATE=""
1374 RECONFIGURE=""
1375 TEST_INSTALLER=""
1376 INSTALL_LXD=""
1377 SHOWOPTS=""
1378 COMMIT_ID=""
1379 ASSUME_YES=""
1380 INSTALL_FROM_SOURCE=""
1381 RELEASE="ReleaseEIGHT"
1382 REPOSITORY="stable"
1383 INSTALL_VIMEMU=""
1384 INSTALL_PLA=""
1385 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1386 LXD_REPOSITORY_PATH=""
1387 INSTALL_LIGHTWEIGHT="y"
1388 INSTALL_TO_OPENSTACK=""
1389 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1390 OPENSTACK_PUBLIC_NET_NAME=""
1391 OPENSTACK_ATTACH_VOLUME="false"
1392 INSTALL_ONLY=""
1393 INSTALL_ELK=""
1394 TO_REBUILD=""
1395 INSTALL_NOLXD=""
1396 INSTALL_NODOCKER=""
1397 INSTALL_NOJUJU=""
1398 KUBERNETES=""
1399 NGUI=""
1400 INSTALL_K8S_MONITOR=""
1401 INSTALL_NOHOSTCLIENT=""
1402 SESSION_ID=`date +%s`
1403 OSM_DEVOPS=
1404 OSM_VCA_HOST=
1405 OSM_VCA_SECRET=
1406 OSM_VCA_PUBKEY=
1407 OSM_VCA_CLOUDNAME="localhost"
1408 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1409 OSM_STACK_NAME=osm
1410 NO_HOST_PORTS=""
1411 DOCKER_NOBUILD=""
1412 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1413 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1414 WORKDIR_SUDO=sudo
1415 OSM_WORK_DIR="/etc/osm"
1416 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1417 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1418 OSM_HOST_VOL="/var/lib/osm"
1419 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1420 OSM_DOCKER_TAG=latest
1421 DOCKER_USER=opensourcemano
1422 PULL_IMAGES="y"
1423 KAFKA_TAG=2.11-1.0.2
1424 PROMETHEUS_TAG=v2.4.3
1425 GRAFANA_TAG=latest
1426 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1427 PROMETHEUS_CADVISOR_TAG=latest
1428 KEYSTONEDB_TAG=10
1429 OSM_DATABASE_COMMONKEY=
1430 ELASTIC_VERSION=6.4.2
1431 ELASTIC_CURATOR_VERSION=5.5.4
1432 POD_NETWORK_CIDR=10.244.0.0/16
1433 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1434 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1435
1436 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1437 case "${o}" in
1438 b)
1439 COMMIT_ID=${OPTARG}
1440 PULL_IMAGES=""
1441 ;;
1442 r)
1443 REPOSITORY="${OPTARG}"
1444 REPO_ARGS+=(-r "$REPOSITORY")
1445 ;;
1446 c)
1447 [ "${OPTARG}" == "swarm" ] && continue
1448 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1449 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1450 usage && exit 1
1451 ;;
1452 n)
1453 [ "${OPTARG}" == "lwui" ] && continue
1454 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1455 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1456 usage && exit 1
1457 ;;
1458 k)
1459 REPOSITORY_KEY="${OPTARG}"
1460 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1461 ;;
1462 u)
1463 REPOSITORY_BASE="${OPTARG}"
1464 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1465 ;;
1466 R)
1467 RELEASE="${OPTARG}"
1468 REPO_ARGS+=(-R "$RELEASE")
1469 ;;
1470 D)
1471 OSM_DEVOPS="${OPTARG}"
1472 ;;
1473 o)
1474 INSTALL_ONLY="y"
1475 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1476 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1477 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1478 ;;
1479 O)
1480 INSTALL_TO_OPENSTACK="y"
1481 if [ -n "${OPTARG}" ]; then
1482 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1483 else
1484 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1485 usage && exit 1
1486 fi
1487 ;;
1488 N)
1489 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1490 ;;
1491 m)
1492 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1493 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1494 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1495 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1496 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1497 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1498 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1499 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1500 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1501 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1502 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1503 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1504 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1505 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1506 ;;
1507 H)
1508 OSM_VCA_HOST="${OPTARG}"
1509 ;;
1510 S)
1511 OSM_VCA_SECRET="${OPTARG}"
1512 ;;
1513 s)
1514 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1515 ;;
1516 w)
1517 # when specifying workdir, do not use sudo for access
1518 WORKDIR_SUDO=
1519 OSM_WORK_DIR="${OPTARG}"
1520 ;;
1521 t)
1522 OSM_DOCKER_TAG="${OPTARG}"
1523 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1524 ;;
1525 U)
1526 DOCKER_USER="${OPTARG}"
1527 ;;
1528 P)
1529 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1530 ;;
1531 A)
1532 OSM_VCA_APIPROXY="${OPTARG}"
1533 ;;
1534 l)
1535 LXD_CLOUD_FILE="${OPTARG}"
1536 ;;
1537 L)
1538 LXD_CRED_FILE="${OPTARG}"
1539 ;;
1540 K)
1541 CONTROLLER_NAME="${OPTARG}"
1542 ;;
1543 -)
1544 [ "${OPTARG}" == "help" ] && usage && exit 0
1545 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1546 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1547 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1548 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1549 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1550 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1551 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1552 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1553 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1554 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1555 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1556 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1557 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1558 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1559 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1560 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1561 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1562 [ "${OPTARG}" == "pullimages" ] && continue
1563 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1564 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1565 [ "${OPTARG}" == "bundle" ] && continue
1566 [ "${OPTARG}" == "k8s" ] && continue
1567 [ "${OPTARG}" == "lxd" ] && continue
1568 [ "${OPTARG}" == "lxd-cred" ] && continue
1569 [ "${OPTARG}" == "microstack" ] && continue
1570 [ "${OPTARG}" == "ha" ] && continue
1571 [ "${OPTARG}" == "tag" ] && continue
1572 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1573 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1574 echo -e "Invalid option: '--$OPTARG'\n" >&2
1575 usage && exit 1
1576 ;;
1577 :)
1578 echo "Option -$OPTARG requires an argument" >&2
1579 usage && exit 1
1580 ;;
1581 \?)
1582 echo -e "Invalid option: '-$OPTARG'\n" >&2
1583 usage && exit 1
1584 ;;
1585 h)
1586 usage && exit 0
1587 ;;
1588 y)
1589 ASSUME_YES="y"
1590 ;;
1591 *)
1592 usage && exit 1
1593 ;;
1594 esac
1595 done
1596
1597 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1598 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1599
1600 if [ -n "$SHOWOPTS" ]; then
1601 dump_vars
1602 exit 0
1603 fi
1604
1605 if [ -n "$CHARMED" ]; then
1606 if [ -n "$UNINSTALL" ]; then
1607 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1608 else
1609 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1610
1611 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1612 echo
1613 echo "1. Get the NBI IP with the following command:"
1614 echo
1615 echo NBI_IP='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1616 echo
1617 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1618 echo
1619 echo "export OSM_HOSTNAME=\$NBI_IP"
1620 echo
1621 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1622 echo
1623 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1624 echo
1625 echo "DONE"
1626 fi
1627
1628 exit 0
1629 fi
1630
1631 # if develop, we force master
1632 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1633
1634 need_packages="git wget curl tar"
1635
1636 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1637
1638 echo -e "Checking required packages: $need_packages"
1639 dpkg -l $need_packages &>/dev/null \
1640 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1641 || sudo apt-get update \
1642 || FATAL "failed to run apt-get update"
1643 dpkg -l $need_packages &>/dev/null \
1644 || ! echo -e "Installing $need_packages requires root privileges." \
1645 || sudo apt-get install -y $need_packages \
1646 || FATAL "failed to install $need_packages"
1647 sudo snap install jq
1648 if [ -z "$OSM_DEVOPS" ]; then
1649 if [ -n "$TEST_INSTALLER" ]; then
1650 echo -e "\nUsing local devops repo for OSM installation"
1651 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1652 else
1653 echo -e "\nCreating temporary dir for OSM installation"
1654 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1655 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1656
1657 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1658
1659 if [ -z "$COMMIT_ID" ]; then
1660 echo -e "\nGuessing the current stable release"
1661 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1662 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1663
1664 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1665 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1666 else
1667 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1668 fi
1669 git -C $OSM_DEVOPS checkout $COMMIT_ID
1670 fi
1671 fi
1672
1673 . $OSM_DEVOPS/common/all_funcs
1674
1675 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1676 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1677 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1678 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1679 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1680 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1681 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1682 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1683
1684 #Installation starts here
1685 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README.txt &> /dev/null
1686 track start
1687
1688 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1689 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1690 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1691 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1692 fi
1693
1694 echo -e "Checking required packages: lxd"
1695 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1696 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1697
1698 # use local devops for containers
1699 export OSM_USE_LOCAL_DEVOPS=true
1700
1701 #Install osmclient
1702
1703 #Install vim-emu (optional)
1704 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1705
1706 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1707 track end
1708 echo -e "\nDONE"
1709
1710