Feature 9912: Kubernetes VCA in K8s installation
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
76
77 }
78
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password {
83 password_file="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name=$1
85 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller=$controller_name '{
90 indent = length($1)/2;
91 vname[indent] = $2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
93 if (length($3) > 0) {
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
96 printf("%s",$3);
97 }
98 }
99 }'
100 }
101
102 function generate_secret() {
103 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
104 }
105
106 function remove_volumes() {
107 if [ -n "$KUBERNETES" ]; then
108 k8_volume=$1
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
111 else
112 stack=$1
113 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume in $volumes; do
115 sg docker -c "docker volume rm ${stack}_${volume}"
116 done
117 fi
118 }
119
120 function remove_network() {
121 stack=$1
122 sg docker -c "docker network rm net${stack}"
123 }
124
125 function remove_iptables() {
126 stack=$1
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
130 fi
131
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
138 fi
139
140 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
141 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
143 fi
144 }
145
146 function remove_stack() {
147 stack=$1
148 if sg docker -c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
150 COUNTER=0
151 result=1
152 while [ ${COUNTER} -lt 30 ]; do
153 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
156 break
157 fi
158 let COUNTER=COUNTER+1
159 sleep 1
160 done
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
163 else
164 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
165 fi
166 sleep 5
167 fi
168 }
169
170 #removes osm deployments and services
171 function remove_k8s_namespace() {
172 kubectl delete ns $1
173 }
174
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm reset --force
179 kubectl delete --namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo rm /usr/local/bin/helm
182 rm -rf $HOME/.helm
183 fi
184 }
185
186 function remove_crontab_job() {
187 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
188 }
189
190 #Uninstall osmclient
191 function uninstall_osmclient() {
192 sudo apt-get remove --purge -y python-osmclient
193 sudo apt-get remove --purge -y python3-osmclient
194 }
195
196 #Uninstall lightweight OSM: remove dockers
197 function uninstall_lightweight() {
198 if [ -n "$INSTALL_ONLY" ]; then
199 if [ -n "$INSTALL_ELK" ]; then
200 echo -e "\nUninstalling OSM ELK stack"
201 remove_stack osm_elk
202 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
203 fi
204 else
205 echo -e "\nUninstalling OSM"
206 if [ -n "$KUBERNETES" ]; then
207 if [ -n "$INSTALL_K8S_MONITOR" ]; then
208 # uninstall OSM MONITORING
209 uninstall_k8s_monitoring
210 fi
211 remove_k8s_namespace $OSM_STACK_NAME
212 else
213 remove_stack $OSM_STACK_NAME
214 remove_stack osm_elk
215 fi
216 echo "Now osm docker images and volumes will be deleted"
217 newgrp docker << EONG
218 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
226 EONG
227
228 if [ -n "$NGUI" ]; then
229 sg docker -c "docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
230 else
231 sg docker -c "docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
232 fi
233
234 if [ -n "$KUBERNETES" ]; then
235 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
236 remove_volumes $OSM_NAMESPACE_VOL
237 else
238 remove_volumes $OSM_STACK_NAME
239 remove_network $OSM_STACK_NAME
240 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
241 fi
242 echo "Removing $OSM_DOCKER_WORK_DIR"
243 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
244 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
245 fi
246 remove_crontab_job
247 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
248 echo "Some docker images will be kept in case they are used by other docker stacks"
249 echo "To remove them, just run 'docker image prune' in a terminal"
250 return 0
251 }
252
253 #Safe unattended install of iptables-persistent
254 function check_install_iptables_persistent(){
255 echo -e "\nChecking required packages: iptables-persistent"
256 if ! dpkg -l iptables-persistent &>/dev/null; then
257 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
258 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
259 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
260 sudo apt-get -yq install iptables-persistent
261 fi
262 }
263
264 #Configure NAT rules, based on the current IP addresses of containers
265 function nat(){
266 check_install_iptables_persistent
267
268 echo -e "\nConfiguring NAT rules"
269 echo -e " Required root privileges"
270 sudo $OSM_DEVOPS/installers/nat_osm
271 }
272
273 function FATAL(){
274 echo "FATAL error: Cannot install OSM due to \"$1\""
275 exit 1
276 }
277
278 function update_juju_images(){
279 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
280 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
281 }
282
283 function install_lxd() {
284 # Apply sysctl production values for optimal performance
285 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
286 sudo sysctl --system
287
288 # Install LXD snap
289 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
290 sudo snap install lxd
291 sudo apt-get install zfsutils-linux -y
292
293 # Configure LXD
294 sudo usermod -a -G lxd `whoami`
295 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
296 sg lxd -c "lxd waitready"
297 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
298 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
299 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
300 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
301 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
302 #sudo systemctl stop lxd-bridge
303 #sudo systemctl --system daemon-reload
304 #sudo systemctl enable lxd-bridge
305 #sudo systemctl start lxd-bridge
306 }
307
308 function ask_user(){
309 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
310 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
311 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
312 read -e -p "$1" USER_CONFIRMATION
313 while true ; do
314 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
315 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
316 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
317 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
318 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
319 done
320 }
321
322 function install_osmclient(){
323 CLIENT_RELEASE=${RELEASE#"-R "}
324 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
325 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
326 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
327 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
328 curl $key_location | sudo apt-key add -
329 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
330 sudo apt-get update
331 sudo apt-get install -y python3-pip
332 sudo -H LC_ALL=C python3 -m pip install -U pip
333 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
334 sudo apt-get install -y python3-osm-im python3-osmclient
335 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
336 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
337 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
338 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
339 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
340 echo -e "\nOSM client installed"
341 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
342 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
343 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
344 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
345 else
346 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
347 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
348 echo " export OSM_HOSTNAME=<OSM_host>"
349 fi
350 return 0
351 }
352
353 function install_prometheus_nodeexporter(){
354 if (systemctl -q is-active node_exporter)
355 then
356 echo "Node Exporter is already running."
357 else
358 echo "Node Exporter is not active, installing..."
359 if getent passwd node_exporter > /dev/null 2>&1; then
360 echo "node_exporter user exists"
361 else
362 echo "Creating user node_exporter"
363 sudo useradd --no-create-home --shell /bin/false node_exporter
364 fi
365 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
366 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
367 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
368 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
369 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
370 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
371 sudo systemctl daemon-reload
372 sudo systemctl restart node_exporter
373 sudo systemctl enable node_exporter
374 echo "Node Exporter has been activated in this host."
375 fi
376 return 0
377 }
378
379 function uninstall_prometheus_nodeexporter(){
380 sudo systemctl stop node_exporter
381 sudo systemctl disable node_exporter
382 sudo rm /etc/systemd/system/node_exporter.service
383 sudo systemctl daemon-reload
384 sudo userdel node_exporter
385 sudo rm /usr/local/bin/node_exporter
386 return 0
387 }
388
389 function install_docker_ce() {
390 # installs and configures Docker CE
391 echo "Installing Docker CE ..."
392 sudo apt-get -qq update
393 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
394 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
395 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
396 sudo apt-get -qq update
397 sudo apt-get install -y docker-ce
398 echo "Adding user to group 'docker'"
399 sudo groupadd -f docker
400 sudo usermod -aG docker $USER
401 sleep 2
402 sudo service docker restart
403 echo "... restarted Docker service"
404 sg docker -c "docker version" || FATAL "Docker installation failed"
405 echo "... Docker CE installation done"
406 return 0
407 }
408
409 function install_docker_compose() {
410 # installs and configures docker-compose
411 echo "Installing Docker Compose ..."
412 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
413 sudo chmod +x /usr/local/bin/docker-compose
414 echo "... Docker Compose installation done"
415 }
416
417 function install_juju() {
418 echo "Installing juju"
419 sudo snap install juju --classic --channel=2.8/stable
420 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
421 update_juju_images
422 echo "Finished installation of juju"
423 return 0
424 }
425
426 function juju_createcontroller() {
427 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
428 # Not found created, create the controller
429 sudo usermod -a -G lxd ${USER}
430 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
431 fi
432 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
433 juju controller-config features=[k8s-operators]
434 }
435
436 function juju_addk8s() {
437 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
438 }
439
440 function juju_createcontroller_k8s(){
441 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
442 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
443 --config controller-service-type=loadbalancer \
444 --agent-version=$JUJU_AGENT_VERSION
445 }
446
447
448 function juju_addlxd_cloud(){
449 mkdir -p /tmp/.osm
450 OSM_VCA_CLOUDNAME="lxd-cloud"
451 LXDENDPOINT=$DEFAULT_IP
452 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
453 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
454
455 cat << EOF > $LXD_CLOUD
456 clouds:
457 $OSM_VCA_CLOUDNAME:
458 type: lxd
459 auth-types: [certificate]
460 endpoint: "https://$LXDENDPOINT:8443"
461 config:
462 ssl-hostname-verification: false
463 EOF
464 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
465 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
466 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
467 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
468
469 cat << EOF > $LXD_CREDENTIALS
470 credentials:
471 $OSM_VCA_CLOUDNAME:
472 lxd-cloud:
473 auth-type: certificate
474 server-cert: |
475 $server_cert
476 client-cert: |
477 $client_cert
478 client-key: |
479 $client_key
480 EOF
481 lxc config trust add local: /tmp/.osm/client.crt
482 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
483 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
484 sg lxd -c "lxd waitready"
485 juju controller-config features=[k8s-operators]
486 }
487
488
489 function juju_createproxy() {
490 check_install_iptables_persistent
491
492 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
493 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
494 sudo netfilter-persistent save
495 fi
496 }
497
498 function generate_docker_images() {
499 echo "Pulling and generating docker images"
500 _build_from=$COMMIT_ID
501 [ -z "$_build_from" ] && _build_from="master"
502
503 echo "OSM Docker images generated from $_build_from"
504
505 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
506 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
507 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
508 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
509
510 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
511 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
512 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
513 fi
514
515 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
516 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
517 fi
518
519 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
520 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
521 fi
522
523 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
524 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
525 fi
526
527 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
528 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
529 fi
530
531 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
532 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
533 fi
534
535 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
536 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
537 fi
538
539 if [ -n "$PULL_IMAGES" ]; then
540 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
541 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
542 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
543 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
544 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
545 fi
546
547 if [ -n "$PULL_IMAGES" ]; then
548 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
549 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
550 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
551 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
552 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
553 fi
554
555 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
556 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
557 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
558 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
559 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
560 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
561 fi
562
563 if [ -n "$PULL_IMAGES" ]; then
564 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
565 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
566 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
567 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
568 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
569 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
570 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
571 fi
572
573 if [ -n "$PULL_IMAGES" ]; then
574 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
575 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
576 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
577 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
578 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
579 fi
580
581 if [ -n "$PULL_IMAGES" ]; then
582 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
583 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
584 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
585 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
586 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
587 fi
588
589 if [ -n "$NGUI" ]; then
590 if [ -n "$PULL_IMAGES" ]; then
591 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
592 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
593 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
594 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
595 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
596 fi
597 else
598 if [ -n "$PULL_IMAGES" ]; then
599 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
600 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
601 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
602 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
603 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
604 fi
605 fi
606
607 if [ -n "$PULL_IMAGES" ]; then
608 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
609 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
610 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
611 fi
612
613 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
614 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
615 fi
616
617 echo "Finished generation of docker images"
618 }
619
620 function cmp_overwrite() {
621 file1="$1"
622 file2="$2"
623 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
624 if [ -f "${file2}" ]; then
625 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
626 else
627 cp -b ${file1} ${file2}
628 fi
629 fi
630 }
631
632 function generate_docker_compose_files() {
633 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
634 if [ -n "$NGUI" ]; then
635 # For NG-UI
636 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
637 else
638 # Docker-compose
639 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
640 fi
641 if [ -n "$INSTALL_PLA" ]; then
642 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
643 fi
644 }
645
646 function generate_k8s_manifest_files() {
647 #Kubernetes resources
648 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
649 if [ -n "$NGUI" ]; then
650 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
651 else
652 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
653 fi
654 }
655
656 function generate_prometheus_grafana_files() {
657 [ -n "$KUBERNETES" ] && return
658 # Prometheus files
659 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
660 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
661
662 # Grafana files
663 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
664 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
665 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
666 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
667 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
668
669 # Prometheus Exporters files
670 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
671 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
672 }
673
674 function generate_docker_env_files() {
675 echo "Doing a backup of existing env files"
676 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
677 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
678 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
679 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
680 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
681 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
682 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
683 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
684 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
685
686 echo "Generating docker env files"
687 # LCM
688 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
689 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
690 fi
691
692 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
693 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
694 else
695 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
696 fi
697
698 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
699 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
700 else
701 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
702 fi
703
704 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
705 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
706 else
707 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
708 fi
709
710 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
711 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
712 else
713 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
714 fi
715
716 if [ -n "$OSM_VCA_APIPROXY" ]; then
717 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
718 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
719 else
720 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
721 fi
722 fi
723
724 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
725 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
726 fi
727
728 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
729 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
730 fi
731
732 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
733 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
734 else
735 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
736 fi
737
738 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
739 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
740 else
741 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
742 fi
743
744 # RO
745 MYSQL_ROOT_PASSWORD=$(generate_secret)
746 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
747 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
748 fi
749 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
750 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
751 fi
752
753 # Keystone
754 KEYSTONE_DB_PASSWORD=$(generate_secret)
755 SERVICE_PASSWORD=$(generate_secret)
756 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
757 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
758 fi
759 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
760 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
761 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
762 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
763 fi
764
765 # NBI
766 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
767 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
768 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
769 fi
770
771 # MON
772 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
773 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
774 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
775 fi
776
777 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
778 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
779 else
780 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
781 fi
782
783 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
784 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
785 else
786 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
787 fi
788
789 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
790 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
791 else
792 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
793 fi
794
795 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
796 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
797 else
798 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
799 fi
800
801
802 # POL
803 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
804 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
805 fi
806
807 # LW-UI
808 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
809 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
810 fi
811
812 echo "Finished generation of docker env files"
813 }
814
815 function generate_osmclient_script () {
816 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
817 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
818 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
819 }
820
821 #installs kubernetes packages
822 function install_kube() {
823 sudo apt-get update && sudo apt-get install -y apt-transport-https
824 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
825 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
826 sudo apt-get update
827 echo "Installing Kubernetes Packages ..."
828 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
829 }
830
831 #initializes kubernetes control plane
832 function init_kubeadm() {
833 sudo swapoff -a
834 sudo kubeadm init --config $1
835 sleep 5
836 }
837
838 function kube_config_dir() {
839 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
840 mkdir -p $HOME/.kube
841 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
842 sudo chown $(id -u):$(id -g) $HOME/.kube/config
843 }
844
845 function install_k8s_storageclass() {
846 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
847 local storageclass_timeout=300
848 local counter=0
849 echo "Waiting for storageclass"
850 while (( counter < storageclass_timeout ))
851 do
852 kubectl get storageclass openebs-hostpath &> /dev/null
853
854 if [ $? -eq 0 ] ; then
855 echo "Storageclass available"
856 break
857 else
858 counter=$((counter + 15))
859 sleep 15
860 fi
861 done
862 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
863 }
864
865 function install_k8s_metallb() {
866 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
867 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
868 echo "apiVersion: v1
869 kind: ConfigMap
870 metadata:
871 namespace: metallb-system
872 name: config
873 data:
874 config: |
875 address-pools:
876 - name: default
877 protocol: layer2
878 addresses:
879 - $METALLB_IP_RANGE" | kubectl apply -f -
880 }
881 #deploys flannel as daemonsets
882 function deploy_cni_provider() {
883 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
884 trap 'rm -rf "${CNI_DIR}"' EXIT
885 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
886 kubectl apply -f $CNI_DIR
887 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
888 }
889
890 #creates secrets from env files which will be used by containers
891 function kube_secrets(){
892 kubectl create ns $OSM_STACK_NAME
893 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
894 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
895 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
896 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
897 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
898 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
899 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
900 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
901 }
902
903 #taints K8s master node
904 function taint_master_node() {
905 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
906 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
907 sleep 5
908 }
909
910 #deploys osm pods and services
911 function deploy_osm_services() {
912 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
913 }
914
915 function deploy_osm_pla_service() {
916 # corresponding to namespace_vol
917 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
918 # corresponding to deploy_osm_services
919 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
920 }
921
922 #Install helm and tiller
923 function install_helm() {
924 helm > /dev/null 2>&1
925 if [ $? != 0 ] ; then
926 # Helm is not installed. Install helm
927 echo "Helm is not installed, installing ..."
928 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
929 tar -zxvf helm-v2.15.2.tar.gz
930 sudo mv linux-amd64/helm /usr/local/bin/helm
931 rm -r linux-amd64
932 rm helm-v2.15.2.tar.gz
933 fi
934
935 # Checking if tiller has being configured
936 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
937 if [ $? == 1 ] ; then
938 # tiller account for kubernetes
939 kubectl --namespace kube-system create serviceaccount tiller
940 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
941 # HELM initialization
942 helm init --service-account tiller
943
944 # Wait for Tiller to be up and running. If timeout expires, continue installing
945 tiller_timeout=120;
946 counter=0;
947 tiller_status=""
948 while (( counter < tiller_timeout ))
949 do
950 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
951 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
952 counter=$((counter + 5))
953 sleep 5
954 done
955 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
956 fi
957 }
958
959 function parse_yaml() {
960 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
961 TAG=$1
962 for osm in $osm_services; do
963 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
964 done
965 $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/$DOCKER_USER\/\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
966 }
967
968 function namespace_vol() {
969 osm_services="nbi lcm ro pol mon kafka mongo mysql prometheus"
970 for osm in $osm_services; do
971 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
972 done
973 }
974
975 function init_docker_swarm() {
976 if [ "${DEFAULT_MTU}" != "1500" ]; then
977 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
978 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
979 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
980 fi
981 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
982 return 0
983 }
984
985 function create_docker_network() {
986 echo "creating network"
987 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
988 echo "creating network DONE"
989 }
990
991 function deploy_lightweight() {
992
993 echo "Deploying lightweight build"
994 OSM_NBI_PORT=9999
995 OSM_RO_PORT=9090
996 OSM_KEYSTONE_PORT=5000
997 OSM_UI_PORT=80
998 OSM_MON_PORT=8662
999 OSM_PROM_PORT=9090
1000 OSM_PROM_CADVISOR_PORT=8080
1001 OSM_PROM_HOSTPORT=9091
1002 OSM_GRAFANA_PORT=3000
1003 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1004 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1005
1006 if [ -n "$NO_HOST_PORTS" ]; then
1007 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1008 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1009 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1010 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1011 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1012 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1013 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1014 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1015 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1016 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1017 else
1018 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1019 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1020 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1021 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1022 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1023 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1024 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1025 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1026 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1027 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1028 fi
1029 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1030 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1031 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1032 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1033 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1034 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1035 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1036 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1037 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1038
1039 pushd $OSM_DOCKER_WORK_DIR
1040 if [ -n "$INSTALL_PLA" ]; then
1041 track deploy_osm_pla
1042 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1043 else
1044 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1045 fi
1046 popd
1047
1048 echo "Finished deployment of lightweight build"
1049 }
1050
1051 function deploy_elk() {
1052 echo "Pulling docker images for ELK"
1053 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1054 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1055 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1056 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1057 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1058 echo "Finished pulling elk docker images"
1059 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1060 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1061 remove_stack osm_elk
1062 echo "Deploying ELK stack"
1063 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1064 echo "Waiting for ELK stack to be up and running"
1065 time=0
1066 step=5
1067 timelength=40
1068 elk_is_up=1
1069 while [ $time -le $timelength ]; do
1070 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1071 elk_is_up=0
1072 break
1073 fi
1074 sleep $step
1075 time=$((time+step))
1076 done
1077 if [ $elk_is_up -eq 0 ]; then
1078 echo "ELK is up and running. Trying to create index pattern..."
1079 #Create index pattern
1080 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1081 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1082 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1083 #Make it the default index
1084 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1085 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1086 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1087 else
1088 echo "Cannot connect to Kibana to create index pattern."
1089 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1090 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1091 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1092 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1093 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1094 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1095 -d"{\"value\":\"filebeat-*\"}"'
1096 fi
1097 echo "Finished deployment of ELK stack"
1098 return 0
1099 }
1100
1101 function add_local_k8scluster() {
1102 /usr/bin/osm --all-projects vim-create \
1103 --name _system-osm-vim \
1104 --account_type dummy \
1105 --auth_url http://dummy \
1106 --user osm --password osm --tenant osm \
1107 --description "dummy" \
1108 --config '{management_network_name: mgmt}'
1109 /usr/bin/osm --all-projects k8scluster-add \
1110 --creds ${HOME}/.kube/config \
1111 --vim _system-osm-vim \
1112 --k8s-nets '{"net1": null}' \
1113 --version '1.15' \
1114 --description "OSM Internal Cluster" \
1115 _system-osm-k8s
1116 }
1117
1118 function install_lightweight() {
1119 track checkingroot
1120 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1121 track noroot
1122
1123 if [ -n "$KUBERNETES" ]; then
1124 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1125 1. Install and configure LXD
1126 2. Install juju
1127 3. Install docker CE
1128 4. Disable swap space
1129 5. Install and initialize Kubernetes
1130 as pre-requirements.
1131 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1132
1133 else
1134 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1135 fi
1136 track proceed
1137
1138 echo "Installing lightweight build of OSM"
1139 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1140 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1141 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1142 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1143 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1144 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1145 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1146 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1147
1148 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1149 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1150 need_packages_lw="snapd"
1151 echo -e "Checking required packages: $need_packages_lw"
1152 dpkg -l $need_packages_lw &>/dev/null \
1153 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1154 || sudo apt-get update \
1155 || FATAL "failed to run apt-get update"
1156 dpkg -l $need_packages_lw &>/dev/null \
1157 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1158 || sudo apt-get install -y $need_packages_lw \
1159 || FATAL "failed to install $need_packages_lw"
1160 install_lxd
1161 fi
1162
1163 track prereqok
1164
1165 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1166
1167 echo "Creating folders for installation"
1168 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1169 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1170 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1171
1172 #Installs Kubernetes
1173 if [ -n "$KUBERNETES" ]; then
1174 install_kube
1175 track install_k8s
1176 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1177 kube_config_dir
1178 track init_k8s
1179 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1180 # uninstall OSM MONITORING
1181 uninstall_k8s_monitoring
1182 track uninstall_k8s_monitoring
1183 fi
1184 #remove old namespace
1185 remove_k8s_namespace $OSM_STACK_NAME
1186 deploy_cni_provider
1187 taint_master_node
1188 install_k8s_storageclass
1189 track k8s_storageclass
1190 install_k8s_metallb
1191 track k8s_metallb
1192 else
1193 #install_docker_compose
1194 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1195 track docker_swarm
1196 fi
1197
1198 [ -z "$INSTALL_NOJUJU" ] && install_juju
1199 track juju_install
1200
1201 if [ -z "$OSM_VCA_HOST" ]; then
1202 if [ -z "$CONTROLLER_NAME" ]; then
1203
1204 if [ -n "$KUBERNETES" ]; then
1205 juju_createcontroller_k8s
1206 juju_addlxd_cloud
1207 else
1208 if [ -n "$LXD_CLOUD_FILE" ]; then
1209 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1210 OSM_VCA_CLOUDNAME="lxd-cloud"
1211 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1212 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1213 fi
1214 juju_createcontroller
1215 juju_createproxy
1216 fi
1217 else
1218 OSM_VCA_CLOUDNAME="lxd-cloud"
1219 if [ -n "$LXD_CLOUD_FILE" ]; then
1220 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1221 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1222 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1223 else
1224 mkdir -p ~/.osm
1225 cat << EOF > ~/.osm/lxd-cloud.yaml
1226 clouds:
1227 lxd-cloud:
1228 type: lxd
1229 auth-types: [certificate]
1230 endpoint: "https://$DEFAULT_IP:8443"
1231 config:
1232 ssl-hostname-verification: false
1233 EOF
1234 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1235 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1236 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1237 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1238 cat << EOF > ~/.osm/lxd-credentials.yaml
1239 credentials:
1240 lxd-cloud:
1241 lxd-cloud:
1242 auth-type: certificate
1243 server-cert: |
1244 $server_cert
1245 client-cert: |
1246 $client_cert
1247 client-key: |
1248 $client_key
1249 EOF
1250 lxc config trust add local: ~/.osm/client.crt
1251 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1252 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1253 fi
1254 fi
1255 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1256 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1257 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1258 fi
1259 track juju_controller
1260
1261 if [ -z "$OSM_VCA_SECRET" ]; then
1262 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1263 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1264 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1265 fi
1266 if [ -z "$OSM_VCA_PUBKEY" ]; then
1267 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1268 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1269 fi
1270 if [ -z "$OSM_VCA_CACERT" ]; then
1271 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1272 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1273 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1274 fi
1275
1276 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1277 if [ -z "$KUBERNETES" ]; then
1278 if [ -z "$OSM_VCA_APIPROXY" ]; then
1279 OSM_VCA_APIPROXY=$DEFAULT_IP
1280 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1281 fi
1282 juju_createproxy
1283 fi
1284 track juju
1285
1286 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1287 OSM_DATABASE_COMMONKEY=$(generate_secret)
1288 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1289 fi
1290
1291 # Deploy OSM services
1292 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1293 track docker_build
1294
1295 if [ -n "$KUBERNETES" ]; then
1296 generate_k8s_manifest_files
1297 else
1298 generate_docker_compose_files
1299 fi
1300 track manifest_files
1301 generate_prometheus_grafana_files
1302 generate_docker_env_files
1303 track env_files
1304
1305 if [ -n "$KUBERNETES" ]; then
1306 kube_secrets
1307 [ ! $OSM_DOCKER_TAG == "8" ] && parse_yaml $OSM_DOCKER_TAG
1308 namespace_vol
1309 deploy_osm_services
1310 if [ -n "$INSTALL_PLA"]; then
1311 # optional PLA install
1312 deploy_osm_pla_service
1313 track deploy_osm_pla
1314 fi
1315 track deploy_osm_services_k8s
1316 install_helm
1317 track install_helm
1318 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1319 # install OSM MONITORING
1320 install_k8s_monitoring
1321 track install_k8s_monitoring
1322 fi
1323 else
1324 # remove old stack
1325 remove_stack $OSM_STACK_NAME
1326 create_docker_network
1327 deploy_lightweight
1328 generate_osmclient_script
1329 track docker_deploy
1330 install_prometheus_nodeexporter
1331 track nodeexporter
1332 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1333 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1334 fi
1335
1336 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1337 track osmclient
1338
1339 echo -e "Checking OSM health state..."
1340 if [ -n "$KUBERNETES" ]; then
1341 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1342 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1343 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1344 track osm_unhealthy
1345 else
1346 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1347 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1348 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1349 track osm_unhealthy
1350 fi
1351 track after_healthcheck
1352
1353 [ -n "$KUBERNETES" ] && add_local_k8scluster
1354 track add_local_k8scluster
1355
1356 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1357 track end
1358 return 0
1359 }
1360
1361 function install_to_openstack() {
1362
1363 if [ -z "$2" ]; then
1364 FATAL "OpenStack installer requires a valid external network name"
1365 fi
1366
1367 # Install Pip for Python3
1368 $WORKDIR_SUDO apt install -y python3-pip
1369 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1370
1371 # Install Ansible, OpenStack client and SDK
1372 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1373
1374 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1375
1376 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1377
1378 # Execute the Ansible playbook based on openrc or clouds.yaml
1379 if [ -e "$1" ]; then
1380 . $1
1381 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1382 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1383 else
1384 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1385 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1386 fi
1387
1388 return 0
1389 }
1390
1391 function install_vimemu() {
1392 echo "\nInstalling vim-emu"
1393 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1394 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1395 # install prerequisites (OVS is a must for the emulator to work)
1396 sudo apt-get install openvswitch-switch
1397 # clone vim-emu repository (attention: branch is currently master only)
1398 echo "Cloning vim-emu repository ..."
1399 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1400 # build vim-emu docker
1401 echo "Building vim-emu Docker container..."
1402
1403 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1404 # start vim-emu container as daemon
1405 echo "Starting vim-emu Docker container 'vim-emu' ..."
1406 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1407 # in lightweight mode, the emulator needs to be attached to netOSM
1408 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1409 else
1410 # classic build mode
1411 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1412 fi
1413 echo "Waiting for 'vim-emu' container to start ..."
1414 sleep 5
1415 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1416 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1417 # print vim-emu connection info
1418 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1419 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1420 echo -e "To add the emulated VIM to OSM you should do:"
1421 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1422 }
1423
1424 function install_k8s_monitoring() {
1425 # install OSM monitoring
1426 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1427 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1428 }
1429
1430 function uninstall_k8s_monitoring() {
1431 # uninstall OSM monitoring
1432 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1433 }
1434
1435 function dump_vars(){
1436 echo "DEVELOP=$DEVELOP"
1437 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1438 echo "UNINSTALL=$UNINSTALL"
1439 echo "UPDATE=$UPDATE"
1440 echo "RECONFIGURE=$RECONFIGURE"
1441 echo "TEST_INSTALLER=$TEST_INSTALLER"
1442 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1443 echo "INSTALL_PLA=$INSTALL_PLA"
1444 echo "INSTALL_LXD=$INSTALL_LXD"
1445 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1446 echo "INSTALL_ONLY=$INSTALL_ONLY"
1447 echo "INSTALL_ELK=$INSTALL_ELK"
1448 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1449 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1450 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1451 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1452 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1453 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1454 echo "TO_REBUILD=$TO_REBUILD"
1455 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1456 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1457 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1458 echo "RELEASE=$RELEASE"
1459 echo "REPOSITORY=$REPOSITORY"
1460 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1461 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1462 echo "OSM_DEVOPS=$OSM_DEVOPS"
1463 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1464 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1465 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1466 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1467 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1468 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1469 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1470 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1471 echo "DOCKER_USER=$DOCKER_USER"
1472 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1473 echo "PULL_IMAGES=$PULL_IMAGES"
1474 echo "KUBERNETES=$KUBERNETES"
1475 echo "NGUI=$NGUI"
1476 echo "SHOWOPTS=$SHOWOPTS"
1477 echo "Install from specific refspec (-b): $COMMIT_ID"
1478 }
1479
1480 function track(){
1481 ctime=`date +%s`
1482 duration=$((ctime - SESSION_ID))
1483 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1484 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1485 event_name="bin"
1486 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1487 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1488 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1489 event_name="${event_name}_$1"
1490 url="${url}&event=${event_name}&ce_duration=${duration}"
1491 wget -q -O /dev/null $url
1492 }
1493
1494 JUJU_AGENT_VERSION=2.8.1
1495 UNINSTALL=""
1496 DEVELOP=""
1497 UPDATE=""
1498 RECONFIGURE=""
1499 TEST_INSTALLER=""
1500 INSTALL_LXD=""
1501 SHOWOPTS=""
1502 COMMIT_ID=""
1503 ASSUME_YES=""
1504 INSTALL_FROM_SOURCE=""
1505 RELEASE="ReleaseEIGHT"
1506 REPOSITORY="stable"
1507 INSTALL_VIMEMU=""
1508 INSTALL_PLA=""
1509 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1510 LXD_REPOSITORY_PATH=""
1511 INSTALL_LIGHTWEIGHT="y"
1512 INSTALL_TO_OPENSTACK=""
1513 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1514 OPENSTACK_PUBLIC_NET_NAME=""
1515 OPENSTACK_ATTACH_VOLUME="false"
1516 INSTALL_ONLY=""
1517 INSTALL_ELK=""
1518 TO_REBUILD=""
1519 INSTALL_NOLXD=""
1520 INSTALL_NODOCKER=""
1521 INSTALL_NOJUJU=""
1522 KUBERNETES=""
1523 NGUI=""
1524 INSTALL_K8S_MONITOR=""
1525 INSTALL_NOHOSTCLIENT=""
1526 SESSION_ID=`date +%s`
1527 OSM_DEVOPS=
1528 OSM_VCA_HOST=
1529 OSM_VCA_SECRET=
1530 OSM_VCA_PUBKEY=
1531 OSM_VCA_CLOUDNAME="localhost"
1532 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1533 OSM_STACK_NAME=osm
1534 NO_HOST_PORTS=""
1535 DOCKER_NOBUILD=""
1536 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1537 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1538 WORKDIR_SUDO=sudo
1539 OSM_WORK_DIR="/etc/osm"
1540 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1541 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1542 OSM_HOST_VOL="/var/lib/osm"
1543 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1544 OSM_DOCKER_TAG=latest
1545 DOCKER_USER=opensourcemano
1546 PULL_IMAGES="y"
1547 KAFKA_TAG=2.11-1.0.2
1548 PROMETHEUS_TAG=v2.4.3
1549 GRAFANA_TAG=latest
1550 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1551 PROMETHEUS_CADVISOR_TAG=latest
1552 KEYSTONEDB_TAG=10
1553 OSM_DATABASE_COMMONKEY=
1554 ELASTIC_VERSION=6.4.2
1555 ELASTIC_CURATOR_VERSION=5.5.4
1556 POD_NETWORK_CIDR=10.244.0.0/16
1557 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1558 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1559
1560 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1561 case "${o}" in
1562 b)
1563 COMMIT_ID=${OPTARG}
1564 PULL_IMAGES=""
1565 ;;
1566 r)
1567 REPOSITORY="${OPTARG}"
1568 REPO_ARGS+=(-r "$REPOSITORY")
1569 ;;
1570 c)
1571 [ "${OPTARG}" == "swarm" ] && continue
1572 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1573 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1574 usage && exit 1
1575 ;;
1576 n)
1577 [ "${OPTARG}" == "lwui" ] && continue
1578 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1579 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1580 usage && exit 1
1581 ;;
1582 k)
1583 REPOSITORY_KEY="${OPTARG}"
1584 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1585 ;;
1586 u)
1587 REPOSITORY_BASE="${OPTARG}"
1588 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1589 ;;
1590 R)
1591 RELEASE="${OPTARG}"
1592 REPO_ARGS+=(-R "$RELEASE")
1593 ;;
1594 D)
1595 OSM_DEVOPS="${OPTARG}"
1596 ;;
1597 o)
1598 INSTALL_ONLY="y"
1599 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1600 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1601 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1602 ;;
1603 O)
1604 INSTALL_TO_OPENSTACK="y"
1605 if [ -n "${OPTARG}" ]; then
1606 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1607 else
1608 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1609 usage && exit 1
1610 fi
1611 ;;
1612 N)
1613 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1614 ;;
1615 m)
1616 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1617 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1618 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1619 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1620 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1621 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1622 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1623 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1624 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1625 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1626 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1627 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1628 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1629 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1630 ;;
1631 H)
1632 OSM_VCA_HOST="${OPTARG}"
1633 ;;
1634 S)
1635 OSM_VCA_SECRET="${OPTARG}"
1636 ;;
1637 s)
1638 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1639 ;;
1640 w)
1641 # when specifying workdir, do not use sudo for access
1642 WORKDIR_SUDO=
1643 OSM_WORK_DIR="${OPTARG}"
1644 ;;
1645 t)
1646 OSM_DOCKER_TAG="${OPTARG}"
1647 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1648 ;;
1649 U)
1650 DOCKER_USER="${OPTARG}"
1651 ;;
1652 P)
1653 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1654 ;;
1655 A)
1656 OSM_VCA_APIPROXY="${OPTARG}"
1657 ;;
1658 l)
1659 LXD_CLOUD_FILE="${OPTARG}"
1660 ;;
1661 L)
1662 LXD_CRED_FILE="${OPTARG}"
1663 ;;
1664 K)
1665 CONTROLLER_NAME="${OPTARG}"
1666 ;;
1667 -)
1668 [ "${OPTARG}" == "help" ] && usage && exit 0
1669 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1670 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1671 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1672 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1673 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1674 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1675 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1676 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1677 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1678 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1679 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1680 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1681 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1682 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1683 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1684 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1685 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1686 [ "${OPTARG}" == "pullimages" ] && continue
1687 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1688 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1689 [ "${OPTARG}" == "bundle" ] && continue
1690 [ "${OPTARG}" == "k8s" ] && continue
1691 [ "${OPTARG}" == "lxd" ] && continue
1692 [ "${OPTARG}" == "lxd-cred" ] && continue
1693 [ "${OPTARG}" == "microstack" ] && continue
1694 [ "${OPTARG}" == "vca" ] && continue
1695 [ "${OPTARG}" == "ha" ] && continue
1696 [ "${OPTARG}" == "tag" ] && continue
1697 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1698 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1699 echo -e "Invalid option: '--$OPTARG'\n" >&2
1700 usage && exit 1
1701 ;;
1702 :)
1703 echo "Option -$OPTARG requires an argument" >&2
1704 usage && exit 1
1705 ;;
1706 \?)
1707 echo -e "Invalid option: '-$OPTARG'\n" >&2
1708 usage && exit 1
1709 ;;
1710 h)
1711 usage && exit 0
1712 ;;
1713 y)
1714 ASSUME_YES="y"
1715 ;;
1716 *)
1717 usage && exit 1
1718 ;;
1719 esac
1720 done
1721
1722 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1723 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1724
1725 if [ -n "$SHOWOPTS" ]; then
1726 dump_vars
1727 exit 0
1728 fi
1729
1730 if [ -n "$CHARMED" ]; then
1731 if [ -n "$UNINSTALL" ]; then
1732 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1733 else
1734 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1735 fi
1736
1737 exit 0
1738 fi
1739
1740 # if develop, we force master
1741 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1742
1743 need_packages="git wget curl tar"
1744
1745 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1746
1747 echo -e "Checking required packages: $need_packages"
1748 dpkg -l $need_packages &>/dev/null \
1749 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1750 || sudo apt-get update \
1751 || FATAL "failed to run apt-get update"
1752 dpkg -l $need_packages &>/dev/null \
1753 || ! echo -e "Installing $need_packages requires root privileges." \
1754 || sudo apt-get install -y $need_packages \
1755 || FATAL "failed to install $need_packages"
1756 sudo snap install jq
1757 if [ -z "$OSM_DEVOPS" ]; then
1758 if [ -n "$TEST_INSTALLER" ]; then
1759 echo -e "\nUsing local devops repo for OSM installation"
1760 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1761 else
1762 echo -e "\nCreating temporary dir for OSM installation"
1763 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1764 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1765
1766 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1767
1768 if [ -z "$COMMIT_ID" ]; then
1769 echo -e "\nGuessing the current stable release"
1770 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1771 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1772
1773 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1774 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1775 else
1776 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1777 fi
1778 git -C $OSM_DEVOPS checkout $COMMIT_ID
1779 fi
1780 fi
1781
1782 . $OSM_DEVOPS/common/all_funcs
1783
1784 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1785 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1786 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1787 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1788 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1789 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1790 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1791 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1792
1793 #Installation starts here
1794 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README.txt &> /dev/null
1795 track start
1796
1797 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1798 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1799 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1800 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1801 fi
1802
1803 echo -e "Checking required packages: lxd"
1804 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1805 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1806
1807 # use local devops for containers
1808 export OSM_USE_LOCAL_DEVOPS=true
1809
1810 #Install osmclient
1811
1812 #Install vim-emu (optional)
1813 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1814
1815 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1816 track end
1817 echo -e "\nDONE"
1818
1819