Default installation with K8s instead of swarm
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag. (--charmed option)"
76 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
77
78 }
79
80 # takes a juju/accounts.yaml file and returns the password specific
81 # for a controller. I wrote this using only bash tools to minimize
82 # additions of other packages
83 function parse_juju_password {
84 password_file="${HOME}/.local/share/juju/accounts.yaml"
85 local controller_name=$1
86 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
87 sed -ne "s|^\($s\):|\1|" \
88 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
89 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
90 awk -F$fs -v controller=$controller_name '{
91 indent = length($1)/2;
92 vname[indent] = $2;
93 for (i in vname) {if (i > indent) {delete vname[i]}}
94 if (length($3) > 0) {
95 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
96 if (match(vn,controller) && match($2,"password")) {
97 printf("%s",$3);
98 }
99 }
100 }'
101 }
102
103 function generate_secret() {
104 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
105 }
106
107 function remove_volumes() {
108 if [ -n "$KUBERNETES" ]; then
109 k8_volume=$1
110 echo "Removing ${k8_volume}"
111 $WORKDIR_SUDO rm -rf ${k8_volume}
112 else
113 stack=$1
114 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
115 for volume in $volumes; do
116 sg docker -c "docker volume rm ${stack}_${volume}"
117 done
118 fi
119 }
120
121 function remove_network() {
122 stack=$1
123 sg docker -c "docker network rm net${stack}"
124 }
125
126 function remove_iptables() {
127 stack=$1
128 if [ -z "$OSM_VCA_HOST" ]; then
129 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
130 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
131 fi
132
133 if [ -z "$DEFAULT_IP" ]; then
134 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
135 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
136 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
137 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
138 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
139 fi
140
141 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
142 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
143 sudo netfilter-persistent save
144 fi
145 }
146
147 function remove_stack() {
148 stack=$1
149 if sg docker -c "docker stack ps ${stack}" ; then
150 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
151 COUNTER=0
152 result=1
153 while [ ${COUNTER} -lt 30 ]; do
154 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
155 #echo "Dockers running: $result"
156 if [ "${result}" == "0" ]; then
157 break
158 fi
159 let COUNTER=COUNTER+1
160 sleep 1
161 done
162 if [ "${result}" == "0" ]; then
163 echo "All dockers of the stack ${stack} were removed"
164 else
165 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
166 fi
167 sleep 5
168 fi
169 }
170
171 #removes osm deployments and services
172 function remove_k8s_namespace() {
173 kubectl delete ns $1
174 }
175
176 #removes helm only if there is nothing deployed in helm
177 function remove_helm() {
178 if [ "$(helm ls -q)" == "" ] ; then
179 sudo helm reset --force
180 kubectl delete --namespace kube-system serviceaccount tiller
181 kubectl delete clusterrolebinding tiller-cluster-rule
182 sudo rm /usr/local/bin/helm
183 rm -rf $HOME/.helm
184 fi
185 }
186
187 function remove_crontab_job() {
188 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
189 }
190
191 #Uninstall osmclient
192 function uninstall_osmclient() {
193 sudo apt-get remove --purge -y python-osmclient
194 sudo apt-get remove --purge -y python3-osmclient
195 }
196
197 #Uninstall lightweight OSM: remove dockers
198 function uninstall_lightweight() {
199 if [ -n "$INSTALL_ONLY" ]; then
200 if [ -n "$INSTALL_ELK" ]; then
201 echo -e "\nUninstalling OSM ELK stack"
202 remove_stack osm_elk
203 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
204 fi
205 else
206 echo -e "\nUninstalling OSM"
207 if [ -n "$KUBERNETES" ]; then
208 if [ -n "$INSTALL_K8S_MONITOR" ]; then
209 # uninstall OSM MONITORING
210 uninstall_k8s_monitoring
211 fi
212 remove_k8s_namespace $OSM_STACK_NAME
213 else
214 remove_stack $OSM_STACK_NAME
215 remove_stack osm_elk
216 fi
217 echo "Now osm docker images and volumes will be deleted"
218 newgrp docker << EONG
219 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
226 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
227 EONG
228
229 if [ -n "$NGUI" ]; then
230 sg docker -c "docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
231 else
232 sg docker -c "docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
233 fi
234
235 if [ -n "$KUBERNETES" ]; then
236 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
237 remove_volumes $OSM_NAMESPACE_VOL
238 else
239 remove_volumes $OSM_STACK_NAME
240 remove_network $OSM_STACK_NAME
241 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
242 fi
243 echo "Removing $OSM_DOCKER_WORK_DIR"
244 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
245 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
246 fi
247 remove_crontab_job
248 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
249 echo "Some docker images will be kept in case they are used by other docker stacks"
250 echo "To remove them, just run 'docker image prune' in a terminal"
251 return 0
252 }
253
254 #Safe unattended install of iptables-persistent
255 function check_install_iptables_persistent(){
256 echo -e "\nChecking required packages: iptables-persistent"
257 if ! dpkg -l iptables-persistent &>/dev/null; then
258 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
259 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
260 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
261 sudo apt-get -yq install iptables-persistent
262 fi
263 }
264
265 #Configure NAT rules, based on the current IP addresses of containers
266 function nat(){
267 check_install_iptables_persistent
268
269 echo -e "\nConfiguring NAT rules"
270 echo -e " Required root privileges"
271 sudo $OSM_DEVOPS/installers/nat_osm
272 }
273
274 function FATAL(){
275 echo "FATAL error: Cannot install OSM due to \"$1\""
276 exit 1
277 }
278
279 function update_juju_images(){
280 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
281 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
282 }
283
284 function install_lxd() {
285 # Apply sysctl production values for optimal performance
286 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
287 sudo sysctl --system
288
289 # Install LXD snap
290 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
291 sudo snap install lxd
292 sudo apt-get install zfsutils-linux -y
293
294 # Configure LXD
295 sudo usermod -a -G lxd `whoami`
296 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
297 sg lxd -c "lxd waitready"
298 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
299 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
300 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
301 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
302 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
303 #sudo systemctl stop lxd-bridge
304 #sudo systemctl --system daemon-reload
305 #sudo systemctl enable lxd-bridge
306 #sudo systemctl start lxd-bridge
307 }
308
309 function ask_user(){
310 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
311 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
312 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
313 read -e -p "$1" USER_CONFIRMATION
314 while true ; do
315 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
316 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
317 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
318 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
319 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
320 done
321 }
322
323 function install_osmclient(){
324 CLIENT_RELEASE=${RELEASE#"-R "}
325 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
326 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
327 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
328 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
329 curl $key_location | sudo apt-key add -
330 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
331 sudo apt-get update
332 sudo apt-get install -y python3-pip
333 sudo -H LC_ALL=C python3 -m pip install -U pip
334 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
335 sudo apt-get install -y python3-osm-im python3-osmclient
336 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
337 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
338 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
339 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
340 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
341 echo -e "\nOSM client installed"
342 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
343 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
344 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
345 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
346 else
347 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
348 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
349 echo " export OSM_HOSTNAME=<OSM_host>"
350 fi
351 return 0
352 }
353
354 function install_prometheus_nodeexporter(){
355 if (systemctl -q is-active node_exporter)
356 then
357 echo "Node Exporter is already running."
358 else
359 echo "Node Exporter is not active, installing..."
360 if getent passwd node_exporter > /dev/null 2>&1; then
361 echo "node_exporter user exists"
362 else
363 echo "Creating user node_exporter"
364 sudo useradd --no-create-home --shell /bin/false node_exporter
365 fi
366 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
367 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
368 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
369 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
370 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
371 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
372 sudo systemctl daemon-reload
373 sudo systemctl restart node_exporter
374 sudo systemctl enable node_exporter
375 echo "Node Exporter has been activated in this host."
376 fi
377 return 0
378 }
379
380 function uninstall_prometheus_nodeexporter(){
381 sudo systemctl stop node_exporter
382 sudo systemctl disable node_exporter
383 sudo rm /etc/systemd/system/node_exporter.service
384 sudo systemctl daemon-reload
385 sudo userdel node_exporter
386 sudo rm /usr/local/bin/node_exporter
387 return 0
388 }
389
390 function install_docker_ce() {
391 # installs and configures Docker CE
392 echo "Installing Docker CE ..."
393 sudo apt-get -qq update
394 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
395 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
396 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
397 sudo apt-get -qq update
398 sudo apt-get install -y docker-ce
399 echo "Adding user to group 'docker'"
400 sudo groupadd -f docker
401 sudo usermod -aG docker $USER
402 sleep 2
403 sudo service docker restart
404 echo "... restarted Docker service"
405 sg docker -c "docker version" || FATAL "Docker installation failed"
406 echo "... Docker CE installation done"
407 return 0
408 }
409
410 function install_docker_compose() {
411 # installs and configures docker-compose
412 echo "Installing Docker Compose ..."
413 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
414 sudo chmod +x /usr/local/bin/docker-compose
415 echo "... Docker Compose installation done"
416 }
417
418 function install_juju() {
419 echo "Installing juju"
420 sudo snap install juju --classic --channel=2.8/stable
421 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
422 update_juju_images
423 echo "Finished installation of juju"
424 return 0
425 }
426
427 function juju_createcontroller() {
428 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
429 # Not found created, create the controller
430 sudo usermod -a -G lxd ${USER}
431 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
432 fi
433 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
434 juju controller-config features=[k8s-operators]
435 }
436
437 function juju_addk8s() {
438 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
439 }
440
441 function juju_createcontroller_k8s(){
442 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
443 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
444 --config controller-service-type=loadbalancer \
445 --agent-version=$JUJU_AGENT_VERSION
446 }
447
448
449 function juju_addlxd_cloud(){
450 mkdir -p /tmp/.osm
451 OSM_VCA_CLOUDNAME="lxd-cloud"
452 LXDENDPOINT=$DEFAULT_IP
453 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
454 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
455
456 cat << EOF > $LXD_CLOUD
457 clouds:
458 $OSM_VCA_CLOUDNAME:
459 type: lxd
460 auth-types: [certificate]
461 endpoint: "https://$LXDENDPOINT:8443"
462 config:
463 ssl-hostname-verification: false
464 EOF
465 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
466 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
467 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
468 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
469
470 cat << EOF > $LXD_CREDENTIALS
471 credentials:
472 $OSM_VCA_CLOUDNAME:
473 lxd-cloud:
474 auth-type: certificate
475 server-cert: |
476 $server_cert
477 client-cert: |
478 $client_cert
479 client-key: |
480 $client_key
481 EOF
482 lxc config trust add local: /tmp/.osm/client.crt
483 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
484 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
485 sg lxd -c "lxd waitready"
486 juju controller-config features=[k8s-operators]
487 }
488
489
490 function juju_createproxy() {
491 check_install_iptables_persistent
492
493 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
494 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
495 sudo netfilter-persistent save
496 fi
497 }
498
499 function generate_docker_images() {
500 echo "Pulling and generating docker images"
501 _build_from=$COMMIT_ID
502 [ -z "$_build_from" ] && _build_from="master"
503
504 echo "OSM Docker images generated from $_build_from"
505
506 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
507 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
508 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
509 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
510
511 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
512 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
513 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
514 fi
515
516 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
517 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
518 fi
519
520 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
521 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
522 fi
523
524 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
525 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
526 fi
527
528 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
529 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
530 fi
531
532 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
533 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
534 fi
535
536 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
537 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
538 fi
539
540 if [ -n "$PULL_IMAGES" ]; then
541 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
542 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
543 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
544 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
545 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
546 fi
547
548 if [ -n "$PULL_IMAGES" ]; then
549 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
550 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
551 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
552 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
553 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
554 fi
555
556 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
557 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
558 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
559 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
560 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
561 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
562 fi
563
564 if [ -n "$PULL_IMAGES" ]; then
565 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
566 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
567 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
568 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
569 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
570 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
571 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
572 fi
573
574 if [ -n "$PULL_IMAGES" ]; then
575 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
576 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
577 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
578 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
579 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
580 fi
581
582 if [ -n "$PULL_IMAGES" ]; then
583 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
584 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
585 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
586 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
587 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
588 fi
589
590 if [ -n "$NGUI" ]; then
591 if [ -n "$PULL_IMAGES" ]; then
592 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
593 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
594 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
595 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
596 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
597 fi
598 else
599 if [ -n "$PULL_IMAGES" ]; then
600 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
601 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
602 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
603 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
604 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
605 fi
606 fi
607
608 if [ -n "$PULL_IMAGES" ]; then
609 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
610 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
611 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
612 fi
613
614 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
615 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
616 fi
617
618 echo "Finished generation of docker images"
619 }
620
621 function cmp_overwrite() {
622 file1="$1"
623 file2="$2"
624 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
625 if [ -f "${file2}" ]; then
626 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
627 else
628 cp -b ${file1} ${file2}
629 fi
630 fi
631 }
632
633 function generate_docker_compose_files() {
634 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
635 if [ -n "$NGUI" ]; then
636 # For NG-UI
637 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
638 else
639 # Docker-compose
640 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
641 fi
642 if [ -n "$INSTALL_PLA" ]; then
643 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
644 fi
645 }
646
647 function generate_k8s_manifest_files() {
648 #Kubernetes resources
649 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
650 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
651 if [ -n "$NGUI" ]; then
652 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
653 else
654 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
655 fi
656 }
657
658 function generate_prometheus_grafana_files() {
659 [ -n "$KUBERNETES" ] && return
660 # Prometheus files
661 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
662 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
663
664 # Grafana files
665 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
666 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
667 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
668 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
669 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
670
671 # Prometheus Exporters files
672 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
673 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
674 }
675
676 function generate_docker_env_files() {
677 echo "Doing a backup of existing env files"
678 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
679 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
680 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
681 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
682 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
683 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
684 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
685 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
686 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
687
688 echo "Generating docker env files"
689 # LCM
690 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
691 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
692 fi
693
694 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
695 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
696 else
697 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
698 fi
699
700 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
701 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
702 else
703 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
704 fi
705
706 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
707 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
708 else
709 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
710 fi
711
712 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
713 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
714 else
715 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
716 fi
717
718 if [ -n "$OSM_VCA_APIPROXY" ]; then
719 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
720 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
721 else
722 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
723 fi
724 fi
725
726 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
727 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
728 fi
729
730 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
731 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
732 fi
733
734 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
735 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
736 else
737 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
738 fi
739
740 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
741 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
742 else
743 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
744 fi
745
746 # RO
747 MYSQL_ROOT_PASSWORD=$(generate_secret)
748 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
749 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
750 fi
751 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
752 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
753 fi
754 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
755 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
756 fi
757
758 # Keystone
759 KEYSTONE_DB_PASSWORD=$(generate_secret)
760 SERVICE_PASSWORD=$(generate_secret)
761 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
762 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
763 fi
764 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
765 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
766 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
767 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
768 fi
769
770 # NBI
771 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
772 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
773 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
774 fi
775
776 # MON
777 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
778 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
779 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
780 fi
781
782 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
783 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
784 else
785 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
786 fi
787
788 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
789 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
790 else
791 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
792 fi
793
794 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
795 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
796 else
797 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
798 fi
799
800 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
801 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
802 else
803 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
804 fi
805
806
807 # POL
808 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
809 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
810 fi
811
812 # LW-UI
813 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
814 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
815 fi
816
817 echo "Finished generation of docker env files"
818 }
819
820 function generate_osmclient_script () {
821 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
822 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
823 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
824 }
825
826 #installs kubernetes packages
827 function install_kube() {
828 sudo apt-get update && sudo apt-get install -y apt-transport-https
829 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
830 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
831 sudo apt-get update
832 echo "Installing Kubernetes Packages ..."
833 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
834 }
835
836 #initializes kubernetes control plane
837 function init_kubeadm() {
838 sudo swapoff -a
839 sudo kubeadm init --config $1
840 sleep 5
841 }
842
843 function kube_config_dir() {
844 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
845 mkdir -p $HOME/.kube
846 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
847 sudo chown $(id -u):$(id -g) $HOME/.kube/config
848 }
849
850 function install_k8s_storageclass() {
851 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
852 local storageclass_timeout=300
853 local counter=0
854 echo "Waiting for storageclass"
855 while (( counter < storageclass_timeout ))
856 do
857 kubectl get storageclass openebs-hostpath &> /dev/null
858
859 if [ $? -eq 0 ] ; then
860 echo "Storageclass available"
861 break
862 else
863 counter=$((counter + 15))
864 sleep 15
865 fi
866 done
867 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
868 }
869
870 function install_k8s_metallb() {
871 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
872 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
873 echo "apiVersion: v1
874 kind: ConfigMap
875 metadata:
876 namespace: metallb-system
877 name: config
878 data:
879 config: |
880 address-pools:
881 - name: default
882 protocol: layer2
883 addresses:
884 - $METALLB_IP_RANGE" | kubectl apply -f -
885 }
886 #deploys flannel as daemonsets
887 function deploy_cni_provider() {
888 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
889 trap 'rm -rf "${CNI_DIR}"' EXIT
890 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
891 kubectl apply -f $CNI_DIR
892 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
893 }
894
895 #creates secrets from env files which will be used by containers
896 function kube_secrets(){
897 kubectl create ns $OSM_STACK_NAME
898 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
899 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
900 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
901 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
902 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
903 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
904 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
905 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
906 }
907
908 #taints K8s master node
909 function taint_master_node() {
910 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
911 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
912 sleep 5
913 }
914
915 #deploys osm pods and services
916 function deploy_osm_services() {
917 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
918 }
919
920 #deploy charmed services
921 function deploy_charmed_services() {
922 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
923 # deploy mongodb charm
924 namespace=$OSM_STACK_NAME
925 juju deploy cs:~charmed-osm/mongodb-k8s \
926 --config enable-sidecar=true \
927 --config replica-set=rs0 \
928 --config namespace=$namespace \
929 -m $namespace
930 }
931
932 function deploy_osm_pla_service() {
933 # corresponding to namespace_vol
934 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
935 # corresponding to deploy_osm_services
936 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
937 }
938
939 #Install helm and tiller
940 function install_helm() {
941 helm > /dev/null 2>&1
942 if [ $? != 0 ] ; then
943 # Helm is not installed. Install helm
944 echo "Helm is not installed, installing ..."
945 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
946 tar -zxvf helm-v2.15.2.tar.gz
947 sudo mv linux-amd64/helm /usr/local/bin/helm
948 rm -r linux-amd64
949 rm helm-v2.15.2.tar.gz
950 fi
951
952 # Checking if tiller has being configured
953 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
954 if [ $? == 1 ] ; then
955 # tiller account for kubernetes
956 kubectl --namespace kube-system create serviceaccount tiller
957 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
958 # HELM initialization
959 helm init --service-account tiller
960
961 # Wait for Tiller to be up and running. If timeout expires, continue installing
962 tiller_timeout=120;
963 counter=0;
964 tiller_status=""
965 while (( counter < tiller_timeout ))
966 do
967 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
968 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
969 counter=$((counter + 5))
970 sleep 5
971 done
972 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
973 fi
974 }
975
976 function parse_yaml() {
977 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
978 TAG=$1
979 for osm in $osm_services; do
980 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
981 done
982 $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/$DOCKER_USER\/\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
983 }
984
985 function namespace_vol() {
986 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
987 for osm in $osm_services; do
988 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
989 done
990 }
991
992 function init_docker_swarm() {
993 if [ "${DEFAULT_MTU}" != "1500" ]; then
994 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
995 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
996 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
997 fi
998 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
999 return 0
1000 }
1001
1002 function create_docker_network() {
1003 echo "creating network"
1004 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1005 echo "creating network DONE"
1006 }
1007
1008 function deploy_lightweight() {
1009
1010 echo "Deploying lightweight build"
1011 OSM_NBI_PORT=9999
1012 OSM_RO_PORT=9090
1013 OSM_KEYSTONE_PORT=5000
1014 OSM_UI_PORT=80
1015 OSM_MON_PORT=8662
1016 OSM_PROM_PORT=9090
1017 OSM_PROM_CADVISOR_PORT=8080
1018 OSM_PROM_HOSTPORT=9091
1019 OSM_GRAFANA_PORT=3000
1020 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1021 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1022
1023 if [ -n "$NO_HOST_PORTS" ]; then
1024 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1025 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1026 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1027 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1028 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1029 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1030 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1031 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1032 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1033 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1034 else
1035 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1036 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1037 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1038 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1039 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1040 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1041 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1042 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1043 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1044 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1045 fi
1046 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1047 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1048 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1049 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1050 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1051 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1052 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1053 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1054 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1055
1056 pushd $OSM_DOCKER_WORK_DIR
1057 if [ -n "$INSTALL_PLA" ]; then
1058 track deploy_osm_pla
1059 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1060 else
1061 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1062 fi
1063 popd
1064
1065 echo "Finished deployment of lightweight build"
1066 }
1067
1068 function deploy_elk() {
1069 echo "Pulling docker images for ELK"
1070 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1071 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1072 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1073 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1074 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1075 echo "Finished pulling elk docker images"
1076 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1077 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1078 remove_stack osm_elk
1079 echo "Deploying ELK stack"
1080 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1081 echo "Waiting for ELK stack to be up and running"
1082 time=0
1083 step=5
1084 timelength=40
1085 elk_is_up=1
1086 while [ $time -le $timelength ]; do
1087 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1088 elk_is_up=0
1089 break
1090 fi
1091 sleep $step
1092 time=$((time+step))
1093 done
1094 if [ $elk_is_up -eq 0 ]; then
1095 echo "ELK is up and running. Trying to create index pattern..."
1096 #Create index pattern
1097 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1098 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1099 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1100 #Make it the default index
1101 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1102 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1103 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1104 else
1105 echo "Cannot connect to Kibana to create index pattern."
1106 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1107 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1108 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1109 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1110 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1111 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1112 -d"{\"value\":\"filebeat-*\"}"'
1113 fi
1114 echo "Finished deployment of ELK stack"
1115 return 0
1116 }
1117
1118 function add_local_k8scluster() {
1119 /usr/bin/osm --all-projects vim-create \
1120 --name _system-osm-vim \
1121 --account_type dummy \
1122 --auth_url http://dummy \
1123 --user osm --password osm --tenant osm \
1124 --description "dummy" \
1125 --config '{management_network_name: mgmt}'
1126 /usr/bin/osm --all-projects k8scluster-add \
1127 --creds ${HOME}/.kube/config \
1128 --vim _system-osm-vim \
1129 --k8s-nets '{"net1": null}' \
1130 --version '1.15' \
1131 --description "OSM Internal Cluster" \
1132 _system-osm-k8s
1133 }
1134
1135 function install_lightweight() {
1136 track checkingroot
1137 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1138 track noroot
1139
1140 if [ -n "$KUBERNETES" ]; then
1141 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1142 1. Install and configure LXD
1143 2. Install juju
1144 3. Install docker CE
1145 4. Disable swap space
1146 5. Install and initialize Kubernetes
1147 as pre-requirements.
1148 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1149
1150 else
1151 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1152 fi
1153 track proceed
1154
1155 echo "Installing lightweight build of OSM"
1156 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1157 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1158 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1159 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1160 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1161 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1162 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1163 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1164
1165 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1166 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1167 need_packages_lw="snapd"
1168 echo -e "Checking required packages: $need_packages_lw"
1169 dpkg -l $need_packages_lw &>/dev/null \
1170 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1171 || sudo apt-get update \
1172 || FATAL "failed to run apt-get update"
1173 dpkg -l $need_packages_lw &>/dev/null \
1174 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1175 || sudo apt-get install -y $need_packages_lw \
1176 || FATAL "failed to install $need_packages_lw"
1177 install_lxd
1178 fi
1179
1180 track prereqok
1181
1182 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1183
1184 echo "Creating folders for installation"
1185 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1186 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1187 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1188
1189 #Installs Kubernetes
1190 if [ -n "$KUBERNETES" ]; then
1191 install_kube
1192 track install_k8s
1193 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1194 kube_config_dir
1195 track init_k8s
1196 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1197 # uninstall OSM MONITORING
1198 uninstall_k8s_monitoring
1199 track uninstall_k8s_monitoring
1200 fi
1201 #remove old namespace
1202 remove_k8s_namespace $OSM_STACK_NAME
1203 deploy_cni_provider
1204 taint_master_node
1205 install_k8s_storageclass
1206 track k8s_storageclass
1207 install_k8s_metallb
1208 track k8s_metallb
1209 else
1210 #install_docker_compose
1211 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1212 track docker_swarm
1213 fi
1214
1215 [ -z "$INSTALL_NOJUJU" ] && install_juju
1216 track juju_install
1217
1218 if [ -z "$OSM_VCA_HOST" ]; then
1219 if [ -z "$CONTROLLER_NAME" ]; then
1220
1221 if [ -n "$KUBERNETES" ]; then
1222 juju_createcontroller_k8s
1223 juju_addlxd_cloud
1224 else
1225 if [ -n "$LXD_CLOUD_FILE" ]; then
1226 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1227 OSM_VCA_CLOUDNAME="lxd-cloud"
1228 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1229 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1230 fi
1231 juju_createcontroller
1232 juju_createproxy
1233 fi
1234 else
1235 OSM_VCA_CLOUDNAME="lxd-cloud"
1236 if [ -n "$LXD_CLOUD_FILE" ]; then
1237 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1238 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1239 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1240 else
1241 mkdir -p ~/.osm
1242 cat << EOF > ~/.osm/lxd-cloud.yaml
1243 clouds:
1244 lxd-cloud:
1245 type: lxd
1246 auth-types: [certificate]
1247 endpoint: "https://$DEFAULT_IP:8443"
1248 config:
1249 ssl-hostname-verification: false
1250 EOF
1251 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1252 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1253 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1254 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1255 cat << EOF > ~/.osm/lxd-credentials.yaml
1256 credentials:
1257 lxd-cloud:
1258 lxd-cloud:
1259 auth-type: certificate
1260 server-cert: |
1261 $server_cert
1262 client-cert: |
1263 $client_cert
1264 client-key: |
1265 $client_key
1266 EOF
1267 lxc config trust add local: ~/.osm/client.crt
1268 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1269 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1270 fi
1271 fi
1272 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1273 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1274 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1275 fi
1276 track juju_controller
1277
1278 if [ -z "$OSM_VCA_SECRET" ]; then
1279 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1280 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1281 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1282 fi
1283 if [ -z "$OSM_VCA_PUBKEY" ]; then
1284 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1285 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1286 fi
1287 if [ -z "$OSM_VCA_CACERT" ]; then
1288 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1289 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1290 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1291 fi
1292
1293 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1294 if [ -z "$KUBERNETES" ]; then
1295 if [ -z "$OSM_VCA_APIPROXY" ]; then
1296 OSM_VCA_APIPROXY=$DEFAULT_IP
1297 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1298 fi
1299 juju_createproxy
1300 fi
1301 track juju
1302
1303 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1304 OSM_DATABASE_COMMONKEY=$(generate_secret)
1305 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1306 fi
1307
1308 # Deploy OSM services
1309 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1310 track docker_build
1311
1312 if [ -n "$KUBERNETES" ]; then
1313 generate_k8s_manifest_files
1314 else
1315 generate_docker_compose_files
1316 fi
1317 track manifest_files
1318 generate_prometheus_grafana_files
1319 generate_docker_env_files
1320 track env_files
1321
1322 if [ -n "$KUBERNETES" ]; then
1323 deploy_charmed_services
1324 kube_secrets
1325 [ ! $OSM_DOCKER_TAG == "8" ] && parse_yaml $OSM_DOCKER_TAG
1326 namespace_vol
1327 deploy_osm_services
1328 if [ -n "$INSTALL_PLA"]; then
1329 # optional PLA install
1330 deploy_osm_pla_service
1331 track deploy_osm_pla
1332 fi
1333 track deploy_osm_services_k8s
1334 install_helm
1335 track install_helm
1336 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1337 # install OSM MONITORING
1338 install_k8s_monitoring
1339 track install_k8s_monitoring
1340 fi
1341 else
1342 # remove old stack
1343 remove_stack $OSM_STACK_NAME
1344 create_docker_network
1345 deploy_lightweight
1346 generate_osmclient_script
1347 track docker_deploy
1348 install_prometheus_nodeexporter
1349 track nodeexporter
1350 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1351 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1352 fi
1353
1354 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1355 track osmclient
1356
1357 echo -e "Checking OSM health state..."
1358 if [ -n "$KUBERNETES" ]; then
1359 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1360 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1361 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1362 track osm_unhealthy
1363 else
1364 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1365 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1366 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1367 track osm_unhealthy
1368 fi
1369 track after_healthcheck
1370
1371 [ -n "$KUBERNETES" ] && add_local_k8scluster
1372 track add_local_k8scluster
1373
1374 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1375 track end
1376 return 0
1377 }
1378
1379 function install_to_openstack() {
1380
1381 if [ -z "$2" ]; then
1382 FATAL "OpenStack installer requires a valid external network name"
1383 fi
1384
1385 # Install Pip for Python3
1386 $WORKDIR_SUDO apt install -y python3-pip
1387 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1388
1389 # Install Ansible, OpenStack client and SDK
1390 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1391
1392 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1393
1394 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1395
1396 # Execute the Ansible playbook based on openrc or clouds.yaml
1397 if [ -e "$1" ]; then
1398 . $1
1399 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1400 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1401 else
1402 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1403 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1404 fi
1405
1406 return 0
1407 }
1408
1409 function install_vimemu() {
1410 echo "\nInstalling vim-emu"
1411 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1412 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1413 # install prerequisites (OVS is a must for the emulator to work)
1414 sudo apt-get install openvswitch-switch
1415 # clone vim-emu repository (attention: branch is currently master only)
1416 echo "Cloning vim-emu repository ..."
1417 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1418 # build vim-emu docker
1419 echo "Building vim-emu Docker container..."
1420
1421 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1422 # start vim-emu container as daemon
1423 echo "Starting vim-emu Docker container 'vim-emu' ..."
1424 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1425 # in lightweight mode, the emulator needs to be attached to netOSM
1426 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1427 else
1428 # classic build mode
1429 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1430 fi
1431 echo "Waiting for 'vim-emu' container to start ..."
1432 sleep 5
1433 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1434 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1435 # print vim-emu connection info
1436 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1437 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1438 echo -e "To add the emulated VIM to OSM you should do:"
1439 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1440 }
1441
1442 function install_k8s_monitoring() {
1443 # install OSM monitoring
1444 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1445 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1446 }
1447
1448 function uninstall_k8s_monitoring() {
1449 # uninstall OSM monitoring
1450 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1451 }
1452
1453 function dump_vars(){
1454 echo "DEVELOP=$DEVELOP"
1455 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1456 echo "UNINSTALL=$UNINSTALL"
1457 echo "UPDATE=$UPDATE"
1458 echo "RECONFIGURE=$RECONFIGURE"
1459 echo "TEST_INSTALLER=$TEST_INSTALLER"
1460 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1461 echo "INSTALL_PLA=$INSTALL_PLA"
1462 echo "INSTALL_LXD=$INSTALL_LXD"
1463 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1464 echo "INSTALL_ONLY=$INSTALL_ONLY"
1465 echo "INSTALL_ELK=$INSTALL_ELK"
1466 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1467 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1468 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1469 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1470 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1471 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1472 echo "TO_REBUILD=$TO_REBUILD"
1473 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1474 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1475 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1476 echo "RELEASE=$RELEASE"
1477 echo "REPOSITORY=$REPOSITORY"
1478 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1479 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1480 echo "OSM_DEVOPS=$OSM_DEVOPS"
1481 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1482 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1483 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1484 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1485 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1486 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1487 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1488 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1489 echo "DOCKER_USER=$DOCKER_USER"
1490 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1491 echo "PULL_IMAGES=$PULL_IMAGES"
1492 echo "KUBERNETES=$KUBERNETES"
1493 echo "NGUI=$NGUI"
1494 echo "SHOWOPTS=$SHOWOPTS"
1495 echo "Install from specific refspec (-b): $COMMIT_ID"
1496 }
1497
1498 function track(){
1499 ctime=`date +%s`
1500 duration=$((ctime - SESSION_ID))
1501 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1502 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1503 event_name="bin"
1504 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1505 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1506 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1507 event_name="${event_name}_$1"
1508 url="${url}&event=${event_name}&ce_duration=${duration}"
1509 wget -q -O /dev/null $url
1510 }
1511
1512 JUJU_AGENT_VERSION=2.8.6
1513 UNINSTALL=""
1514 DEVELOP=""
1515 UPDATE=""
1516 RECONFIGURE=""
1517 TEST_INSTALLER=""
1518 INSTALL_LXD=""
1519 SHOWOPTS=""
1520 COMMIT_ID=""
1521 ASSUME_YES=""
1522 INSTALL_FROM_SOURCE=""
1523 RELEASE="ReleaseNINE"
1524 REPOSITORY="stable"
1525 INSTALL_VIMEMU=""
1526 INSTALL_PLA=""
1527 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1528 LXD_REPOSITORY_PATH=""
1529 INSTALL_LIGHTWEIGHT="y"
1530 INSTALL_TO_OPENSTACK=""
1531 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1532 OPENSTACK_PUBLIC_NET_NAME=""
1533 OPENSTACK_ATTACH_VOLUME="false"
1534 INSTALL_ONLY=""
1535 INSTALL_ELK=""
1536 TO_REBUILD=""
1537 INSTALL_NOLXD=""
1538 INSTALL_NODOCKER=""
1539 INSTALL_NOJUJU=""
1540 KUBERNETES="y"
1541 NGUI="y"
1542 INSTALL_K8S_MONITOR=""
1543 INSTALL_NOHOSTCLIENT=""
1544 SESSION_ID=`date +%s`
1545 OSM_DEVOPS=
1546 OSM_VCA_HOST=
1547 OSM_VCA_SECRET=
1548 OSM_VCA_PUBKEY=
1549 OSM_VCA_CLOUDNAME="localhost"
1550 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1551 OSM_STACK_NAME=osm
1552 NO_HOST_PORTS=""
1553 DOCKER_NOBUILD=""
1554 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1555 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1556 WORKDIR_SUDO=sudo
1557 OSM_WORK_DIR="/etc/osm"
1558 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1559 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1560 OSM_HOST_VOL="/var/lib/osm"
1561 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1562 OSM_DOCKER_TAG=latest
1563 DOCKER_USER=opensourcemano
1564 PULL_IMAGES="y"
1565 KAFKA_TAG=2.11-1.0.2
1566 PROMETHEUS_TAG=v2.4.3
1567 GRAFANA_TAG=latest
1568 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1569 PROMETHEUS_CADVISOR_TAG=latest
1570 KEYSTONEDB_TAG=10
1571 OSM_DATABASE_COMMONKEY=
1572 ELASTIC_VERSION=6.4.2
1573 ELASTIC_CURATOR_VERSION=5.5.4
1574 POD_NETWORK_CIDR=10.244.0.0/16
1575 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1576 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1577
1578 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1579 case "${o}" in
1580 b)
1581 COMMIT_ID=${OPTARG}
1582 PULL_IMAGES=""
1583 ;;
1584 r)
1585 REPOSITORY="${OPTARG}"
1586 REPO_ARGS+=(-r "$REPOSITORY")
1587 ;;
1588 c)
1589 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && continue
1590 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1591 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1592 usage && exit 1
1593 ;;
1594 n)
1595 [ "${OPTARG}" == "lwui" ] && NGUI="" && continue
1596 [ "${OPTARG}" == "ngui" ] && continue
1597 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1598 usage && exit 1
1599 ;;
1600 k)
1601 REPOSITORY_KEY="${OPTARG}"
1602 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1603 ;;
1604 u)
1605 REPOSITORY_BASE="${OPTARG}"
1606 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1607 ;;
1608 R)
1609 RELEASE="${OPTARG}"
1610 REPO_ARGS+=(-R "$RELEASE")
1611 ;;
1612 D)
1613 OSM_DEVOPS="${OPTARG}"
1614 ;;
1615 o)
1616 INSTALL_ONLY="y"
1617 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1618 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1619 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1620 ;;
1621 O)
1622 INSTALL_TO_OPENSTACK="y"
1623 if [ -n "${OPTARG}" ]; then
1624 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1625 else
1626 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1627 usage && exit 1
1628 fi
1629 ;;
1630 N)
1631 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1632 ;;
1633 m)
1634 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1635 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1636 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1637 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1638 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1639 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1640 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1641 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1642 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1643 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1644 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1645 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1646 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1647 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1648 ;;
1649 H)
1650 OSM_VCA_HOST="${OPTARG}"
1651 ;;
1652 S)
1653 OSM_VCA_SECRET="${OPTARG}"
1654 ;;
1655 s)
1656 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1657 ;;
1658 w)
1659 # when specifying workdir, do not use sudo for access
1660 WORKDIR_SUDO=
1661 OSM_WORK_DIR="${OPTARG}"
1662 ;;
1663 t)
1664 OSM_DOCKER_TAG="${OPTARG}"
1665 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1666 ;;
1667 U)
1668 DOCKER_USER="${OPTARG}"
1669 ;;
1670 P)
1671 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1672 ;;
1673 A)
1674 OSM_VCA_APIPROXY="${OPTARG}"
1675 ;;
1676 l)
1677 LXD_CLOUD_FILE="${OPTARG}"
1678 ;;
1679 L)
1680 LXD_CRED_FILE="${OPTARG}"
1681 ;;
1682 K)
1683 CONTROLLER_NAME="${OPTARG}"
1684 ;;
1685 -)
1686 [ "${OPTARG}" == "help" ] && usage && exit 0
1687 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1688 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1689 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1690 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1691 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1692 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1693 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1694 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1695 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1696 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1697 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1698 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1699 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1700 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1701 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1702 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1703 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1704 [ "${OPTARG}" == "pullimages" ] && continue
1705 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1706 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1707 [ "${OPTARG}" == "bundle" ] && continue
1708 [ "${OPTARG}" == "k8s" ] && continue
1709 [ "${OPTARG}" == "lxd" ] && continue
1710 [ "${OPTARG}" == "lxd-cred" ] && continue
1711 [ "${OPTARG}" == "microstack" ] && continue
1712 [ "${OPTARG}" == "vca" ] && continue
1713 [ "${OPTARG}" == "ha" ] && continue
1714 [ "${OPTARG}" == "tag" ] && continue
1715 [ "${OPTARG}" == "registry" ] && continue
1716 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1717 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1718 echo -e "Invalid option: '--$OPTARG'\n" >&2
1719 usage && exit 1
1720 ;;
1721 :)
1722 echo "Option -$OPTARG requires an argument" >&2
1723 usage && exit 1
1724 ;;
1725 \?)
1726 echo -e "Invalid option: '-$OPTARG'\n" >&2
1727 usage && exit 1
1728 ;;
1729 h)
1730 usage && exit 0
1731 ;;
1732 y)
1733 ASSUME_YES="y"
1734 ;;
1735 *)
1736 usage && exit 1
1737 ;;
1738 esac
1739 done
1740
1741 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1742 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1743
1744 if [ -n "$SHOWOPTS" ]; then
1745 dump_vars
1746 exit 0
1747 fi
1748
1749 if [ -n "$CHARMED" ]; then
1750 if [ -n "$UNINSTALL" ]; then
1751 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1752 else
1753 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1754 fi
1755
1756 exit 0
1757 fi
1758
1759 # if develop, we force master
1760 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1761
1762 need_packages="git wget curl tar"
1763
1764 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1765
1766 echo -e "Checking required packages: $need_packages"
1767 dpkg -l $need_packages &>/dev/null \
1768 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1769 || sudo apt-get update \
1770 || FATAL "failed to run apt-get update"
1771 dpkg -l $need_packages &>/dev/null \
1772 || ! echo -e "Installing $need_packages requires root privileges." \
1773 || sudo apt-get install -y $need_packages \
1774 || FATAL "failed to install $need_packages"
1775 sudo snap install jq
1776 if [ -z "$OSM_DEVOPS" ]; then
1777 if [ -n "$TEST_INSTALLER" ]; then
1778 echo -e "\nUsing local devops repo for OSM installation"
1779 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1780 else
1781 echo -e "\nCreating temporary dir for OSM installation"
1782 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1783 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1784
1785 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1786
1787 if [ -z "$COMMIT_ID" ]; then
1788 echo -e "\nGuessing the current stable release"
1789 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1790 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1791
1792 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1793 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1794 else
1795 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1796 fi
1797 git -C $OSM_DEVOPS checkout $COMMIT_ID
1798 fi
1799 fi
1800
1801 . $OSM_DEVOPS/common/all_funcs
1802
1803 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1804 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1805 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1806 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1807 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1808 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1809 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1810 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1811
1812 #Installation starts here
1813 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1814 track start
1815
1816 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1817 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1818 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1819 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1820 fi
1821
1822 echo -e "Checking required packages: lxd"
1823 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1824 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1825
1826 # use local devops for containers
1827 export OSM_USE_LOCAL_DEVOPS=true
1828
1829 #Install osmclient
1830
1831 #Install vim-emu (optional)
1832 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1833
1834 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1835 track end
1836 echo -e "\nDONE"
1837
1838