Use mongodb charm in k8s installer
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag. (--charmed option)"
76 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
77
78 }
79
80 # takes a juju/accounts.yaml file and returns the password specific
81 # for a controller. I wrote this using only bash tools to minimize
82 # additions of other packages
83 function parse_juju_password {
84 password_file="${HOME}/.local/share/juju/accounts.yaml"
85 local controller_name=$1
86 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
87 sed -ne "s|^\($s\):|\1|" \
88 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
89 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
90 awk -F$fs -v controller=$controller_name '{
91 indent = length($1)/2;
92 vname[indent] = $2;
93 for (i in vname) {if (i > indent) {delete vname[i]}}
94 if (length($3) > 0) {
95 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
96 if (match(vn,controller) && match($2,"password")) {
97 printf("%s",$3);
98 }
99 }
100 }'
101 }
102
103 function generate_secret() {
104 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
105 }
106
107 function remove_volumes() {
108 if [ -n "$KUBERNETES" ]; then
109 k8_volume=$1
110 echo "Removing ${k8_volume}"
111 $WORKDIR_SUDO rm -rf ${k8_volume}
112 else
113 stack=$1
114 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
115 for volume in $volumes; do
116 sg docker -c "docker volume rm ${stack}_${volume}"
117 done
118 fi
119 }
120
121 function remove_network() {
122 stack=$1
123 sg docker -c "docker network rm net${stack}"
124 }
125
126 function remove_iptables() {
127 stack=$1
128 if [ -z "$OSM_VCA_HOST" ]; then
129 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
130 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
131 fi
132
133 if [ -z "$DEFAULT_IP" ]; then
134 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
135 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
136 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
137 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
138 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
139 fi
140
141 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
142 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
143 sudo netfilter-persistent save
144 fi
145 }
146
147 function remove_stack() {
148 stack=$1
149 if sg docker -c "docker stack ps ${stack}" ; then
150 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
151 COUNTER=0
152 result=1
153 while [ ${COUNTER} -lt 30 ]; do
154 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
155 #echo "Dockers running: $result"
156 if [ "${result}" == "0" ]; then
157 break
158 fi
159 let COUNTER=COUNTER+1
160 sleep 1
161 done
162 if [ "${result}" == "0" ]; then
163 echo "All dockers of the stack ${stack} were removed"
164 else
165 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
166 fi
167 sleep 5
168 fi
169 }
170
171 #removes osm deployments and services
172 function remove_k8s_namespace() {
173 kubectl delete ns $1
174 }
175
176 #removes helm only if there is nothing deployed in helm
177 function remove_helm() {
178 if [ "$(helm ls -q)" == "" ] ; then
179 sudo helm reset --force
180 kubectl delete --namespace kube-system serviceaccount tiller
181 kubectl delete clusterrolebinding tiller-cluster-rule
182 sudo rm /usr/local/bin/helm
183 rm -rf $HOME/.helm
184 fi
185 }
186
187 function remove_crontab_job() {
188 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
189 }
190
191 #Uninstall osmclient
192 function uninstall_osmclient() {
193 sudo apt-get remove --purge -y python-osmclient
194 sudo apt-get remove --purge -y python3-osmclient
195 }
196
197 #Uninstall lightweight OSM: remove dockers
198 function uninstall_lightweight() {
199 if [ -n "$INSTALL_ONLY" ]; then
200 if [ -n "$INSTALL_ELK" ]; then
201 echo -e "\nUninstalling OSM ELK stack"
202 remove_stack osm_elk
203 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
204 fi
205 else
206 echo -e "\nUninstalling OSM"
207 if [ -n "$KUBERNETES" ]; then
208 if [ -n "$INSTALL_K8S_MONITOR" ]; then
209 # uninstall OSM MONITORING
210 uninstall_k8s_monitoring
211 fi
212 remove_k8s_namespace $OSM_STACK_NAME
213 else
214 remove_stack $OSM_STACK_NAME
215 remove_stack osm_elk
216 fi
217 echo "Now osm docker images and volumes will be deleted"
218 newgrp docker << EONG
219 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
226 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
227 EONG
228
229 if [ -n "$NGUI" ]; then
230 sg docker -c "docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
231 else
232 sg docker -c "docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
233 fi
234
235 if [ -n "$KUBERNETES" ]; then
236 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
237 remove_volumes $OSM_NAMESPACE_VOL
238 else
239 remove_volumes $OSM_STACK_NAME
240 remove_network $OSM_STACK_NAME
241 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
242 fi
243 echo "Removing $OSM_DOCKER_WORK_DIR"
244 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
245 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
246 fi
247 remove_crontab_job
248 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
249 echo "Some docker images will be kept in case they are used by other docker stacks"
250 echo "To remove them, just run 'docker image prune' in a terminal"
251 return 0
252 }
253
254 #Safe unattended install of iptables-persistent
255 function check_install_iptables_persistent(){
256 echo -e "\nChecking required packages: iptables-persistent"
257 if ! dpkg -l iptables-persistent &>/dev/null; then
258 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
259 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
260 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
261 sudo apt-get -yq install iptables-persistent
262 fi
263 }
264
265 #Configure NAT rules, based on the current IP addresses of containers
266 function nat(){
267 check_install_iptables_persistent
268
269 echo -e "\nConfiguring NAT rules"
270 echo -e " Required root privileges"
271 sudo $OSM_DEVOPS/installers/nat_osm
272 }
273
274 function FATAL(){
275 echo "FATAL error: Cannot install OSM due to \"$1\""
276 exit 1
277 }
278
279 function update_juju_images(){
280 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
281 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
282 }
283
284 function install_lxd() {
285 # Apply sysctl production values for optimal performance
286 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
287 sudo sysctl --system
288
289 # Install LXD snap
290 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
291 sudo snap install lxd
292 sudo apt-get install zfsutils-linux -y
293
294 # Configure LXD
295 sudo usermod -a -G lxd `whoami`
296 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
297 sg lxd -c "lxd waitready"
298 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
299 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
300 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
301 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
302 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
303 #sudo systemctl stop lxd-bridge
304 #sudo systemctl --system daemon-reload
305 #sudo systemctl enable lxd-bridge
306 #sudo systemctl start lxd-bridge
307 }
308
309 function ask_user(){
310 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
311 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
312 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
313 read -e -p "$1" USER_CONFIRMATION
314 while true ; do
315 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
316 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
317 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
318 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
319 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
320 done
321 }
322
323 function install_osmclient(){
324 CLIENT_RELEASE=${RELEASE#"-R "}
325 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
326 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
327 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
328 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
329 curl $key_location | sudo apt-key add -
330 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
331 sudo apt-get update
332 sudo apt-get install -y python3-pip
333 sudo -H LC_ALL=C python3 -m pip install -U pip
334 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
335 sudo apt-get install -y python3-osm-im python3-osmclient
336 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
337 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
338 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
339 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
340 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
341 echo -e "\nOSM client installed"
342 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
343 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
344 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
345 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
346 else
347 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
348 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
349 echo " export OSM_HOSTNAME=<OSM_host>"
350 fi
351 return 0
352 }
353
354 function install_prometheus_nodeexporter(){
355 if (systemctl -q is-active node_exporter)
356 then
357 echo "Node Exporter is already running."
358 else
359 echo "Node Exporter is not active, installing..."
360 if getent passwd node_exporter > /dev/null 2>&1; then
361 echo "node_exporter user exists"
362 else
363 echo "Creating user node_exporter"
364 sudo useradd --no-create-home --shell /bin/false node_exporter
365 fi
366 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
367 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
368 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
369 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
370 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
371 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
372 sudo systemctl daemon-reload
373 sudo systemctl restart node_exporter
374 sudo systemctl enable node_exporter
375 echo "Node Exporter has been activated in this host."
376 fi
377 return 0
378 }
379
380 function uninstall_prometheus_nodeexporter(){
381 sudo systemctl stop node_exporter
382 sudo systemctl disable node_exporter
383 sudo rm /etc/systemd/system/node_exporter.service
384 sudo systemctl daemon-reload
385 sudo userdel node_exporter
386 sudo rm /usr/local/bin/node_exporter
387 return 0
388 }
389
390 function install_docker_ce() {
391 # installs and configures Docker CE
392 echo "Installing Docker CE ..."
393 sudo apt-get -qq update
394 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
395 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
396 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
397 sudo apt-get -qq update
398 sudo apt-get install -y docker-ce
399 echo "Adding user to group 'docker'"
400 sudo groupadd -f docker
401 sudo usermod -aG docker $USER
402 sleep 2
403 sudo service docker restart
404 echo "... restarted Docker service"
405 sg docker -c "docker version" || FATAL "Docker installation failed"
406 echo "... Docker CE installation done"
407 return 0
408 }
409
410 function install_docker_compose() {
411 # installs and configures docker-compose
412 echo "Installing Docker Compose ..."
413 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
414 sudo chmod +x /usr/local/bin/docker-compose
415 echo "... Docker Compose installation done"
416 }
417
418 function install_juju() {
419 echo "Installing juju"
420 sudo snap install juju --classic --channel=2.8/stable
421 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
422 update_juju_images
423 echo "Finished installation of juju"
424 return 0
425 }
426
427 function juju_createcontroller() {
428 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
429 # Not found created, create the controller
430 sudo usermod -a -G lxd ${USER}
431 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
432 fi
433 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
434 juju controller-config features=[k8s-operators]
435 }
436
437 function juju_addk8s() {
438 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
439 }
440
441 function juju_createcontroller_k8s(){
442 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
443 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
444 --config controller-service-type=loadbalancer \
445 --agent-version=$JUJU_AGENT_VERSION
446 }
447
448
449 function juju_addlxd_cloud(){
450 mkdir -p /tmp/.osm
451 OSM_VCA_CLOUDNAME="lxd-cloud"
452 LXDENDPOINT=$DEFAULT_IP
453 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
454 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
455
456 cat << EOF > $LXD_CLOUD
457 clouds:
458 $OSM_VCA_CLOUDNAME:
459 type: lxd
460 auth-types: [certificate]
461 endpoint: "https://$LXDENDPOINT:8443"
462 config:
463 ssl-hostname-verification: false
464 EOF
465 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
466 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
467 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
468 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
469
470 cat << EOF > $LXD_CREDENTIALS
471 credentials:
472 $OSM_VCA_CLOUDNAME:
473 lxd-cloud:
474 auth-type: certificate
475 server-cert: |
476 $server_cert
477 client-cert: |
478 $client_cert
479 client-key: |
480 $client_key
481 EOF
482 lxc config trust add local: /tmp/.osm/client.crt
483 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
484 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
485 sg lxd -c "lxd waitready"
486 juju controller-config features=[k8s-operators]
487 }
488
489
490 function juju_createproxy() {
491 check_install_iptables_persistent
492
493 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
494 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
495 sudo netfilter-persistent save
496 fi
497 }
498
499 function generate_docker_images() {
500 echo "Pulling and generating docker images"
501 _build_from=$COMMIT_ID
502 [ -z "$_build_from" ] && _build_from="master"
503
504 echo "OSM Docker images generated from $_build_from"
505
506 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
507 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
508 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
509 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
510
511 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
512 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
513 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
514 fi
515
516 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
517 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
518 fi
519
520 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
521 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
522 fi
523
524 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
525 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
526 fi
527
528 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
529 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
530 fi
531
532 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
533 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
534 fi
535
536 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
537 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
538 fi
539
540 if [ -n "$PULL_IMAGES" ]; then
541 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
542 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
543 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
544 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
545 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
546 fi
547
548 if [ -n "$PULL_IMAGES" ]; then
549 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
550 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
551 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
552 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
553 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
554 fi
555
556 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
557 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
558 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
559 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
560 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
561 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
562 fi
563
564 if [ -n "$PULL_IMAGES" ]; then
565 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
566 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
567 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
568 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
569 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
570 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
571 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
572 fi
573
574 if [ -n "$PULL_IMAGES" ]; then
575 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
576 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
577 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
578 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
579 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
580 fi
581
582 if [ -n "$PULL_IMAGES" ]; then
583 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
584 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
585 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
586 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
587 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
588 fi
589
590 if [ -n "$NGUI" ]; then
591 if [ -n "$PULL_IMAGES" ]; then
592 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
593 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
594 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
595 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
596 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
597 fi
598 else
599 if [ -n "$PULL_IMAGES" ]; then
600 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
601 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
602 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
603 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
604 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
605 fi
606 fi
607
608 if [ -n "$PULL_IMAGES" ]; then
609 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
610 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
611 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
612 fi
613
614 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
615 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
616 fi
617
618 echo "Finished generation of docker images"
619 }
620
621 function cmp_overwrite() {
622 file1="$1"
623 file2="$2"
624 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
625 if [ -f "${file2}" ]; then
626 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
627 else
628 cp -b ${file1} ${file2}
629 fi
630 fi
631 }
632
633 function generate_docker_compose_files() {
634 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
635 if [ -n "$NGUI" ]; then
636 # For NG-UI
637 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
638 else
639 # Docker-compose
640 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
641 fi
642 if [ -n "$INSTALL_PLA" ]; then
643 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
644 fi
645 }
646
647 function generate_k8s_manifest_files() {
648 #Kubernetes resources
649 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
650 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
651 if [ -n "$NGUI" ]; then
652 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
653 else
654 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
655 fi
656 }
657
658 function generate_prometheus_grafana_files() {
659 [ -n "$KUBERNETES" ] && return
660 # Prometheus files
661 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
662 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
663
664 # Grafana files
665 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
666 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
667 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
668 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
669 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
670
671 # Prometheus Exporters files
672 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
673 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
674 }
675
676 function generate_docker_env_files() {
677 echo "Doing a backup of existing env files"
678 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
679 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
680 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
681 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
682 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
683 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
684 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
685 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
686 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
687
688 echo "Generating docker env files"
689 # LCM
690 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
691 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
692 fi
693
694 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
695 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
696 else
697 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
698 fi
699
700 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
701 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
702 else
703 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
704 fi
705
706 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
707 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
708 else
709 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
710 fi
711
712 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
713 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
714 else
715 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
716 fi
717
718 if [ -n "$OSM_VCA_APIPROXY" ]; then
719 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
720 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
721 else
722 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
723 fi
724 fi
725
726 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
727 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
728 fi
729
730 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
731 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
732 fi
733
734 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
735 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
736 else
737 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
738 fi
739
740 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
741 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
742 else
743 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
744 fi
745
746 # RO
747 MYSQL_ROOT_PASSWORD=$(generate_secret)
748 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
749 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
750 fi
751 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
752 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
753 fi
754
755 # Keystone
756 KEYSTONE_DB_PASSWORD=$(generate_secret)
757 SERVICE_PASSWORD=$(generate_secret)
758 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
759 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
760 fi
761 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
762 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
763 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
764 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
765 fi
766
767 # NBI
768 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
769 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
770 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
771 fi
772
773 # MON
774 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
775 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
776 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
777 fi
778
779 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
780 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
781 else
782 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
783 fi
784
785 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
786 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
787 else
788 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
789 fi
790
791 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
792 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
793 else
794 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
795 fi
796
797 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
798 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
799 else
800 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
801 fi
802
803
804 # POL
805 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
806 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
807 fi
808
809 # LW-UI
810 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
811 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
812 fi
813
814 echo "Finished generation of docker env files"
815 }
816
817 function generate_osmclient_script () {
818 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
819 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
820 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
821 }
822
823 #installs kubernetes packages
824 function install_kube() {
825 sudo apt-get update && sudo apt-get install -y apt-transport-https
826 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
827 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
828 sudo apt-get update
829 echo "Installing Kubernetes Packages ..."
830 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
831 }
832
833 #initializes kubernetes control plane
834 function init_kubeadm() {
835 sudo swapoff -a
836 sudo kubeadm init --config $1
837 sleep 5
838 }
839
840 function kube_config_dir() {
841 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
842 mkdir -p $HOME/.kube
843 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
844 sudo chown $(id -u):$(id -g) $HOME/.kube/config
845 }
846
847 function install_k8s_storageclass() {
848 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
849 local storageclass_timeout=300
850 local counter=0
851 echo "Waiting for storageclass"
852 while (( counter < storageclass_timeout ))
853 do
854 kubectl get storageclass openebs-hostpath &> /dev/null
855
856 if [ $? -eq 0 ] ; then
857 echo "Storageclass available"
858 break
859 else
860 counter=$((counter + 15))
861 sleep 15
862 fi
863 done
864 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
865 }
866
867 function install_k8s_metallb() {
868 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
869 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
870 echo "apiVersion: v1
871 kind: ConfigMap
872 metadata:
873 namespace: metallb-system
874 name: config
875 data:
876 config: |
877 address-pools:
878 - name: default
879 protocol: layer2
880 addresses:
881 - $METALLB_IP_RANGE" | kubectl apply -f -
882 }
883 #deploys flannel as daemonsets
884 function deploy_cni_provider() {
885 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
886 trap 'rm -rf "${CNI_DIR}"' EXIT
887 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
888 kubectl apply -f $CNI_DIR
889 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
890 }
891
892 #creates secrets from env files which will be used by containers
893 function kube_secrets(){
894 kubectl create ns $OSM_STACK_NAME
895 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
896 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
897 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
898 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
899 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
900 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
901 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
902 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
903 }
904
905 #taints K8s master node
906 function taint_master_node() {
907 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
908 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
909 sleep 5
910 }
911
912 #deploys osm pods and services
913 function deploy_osm_services() {
914 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
915 }
916
917 #deploy charmed services
918 function deploy_charmed_services() {
919 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
920 # deploy mongodb charm
921 namespace=$OSM_STACK_NAME
922 juju deploy cs:~charmed-osm/mongodb-k8s \
923 --config enable-sidecar=true \
924 --config replica-set=rs0 \
925 --config namespace=$namespace \
926 -m $namespace
927 }
928
929 function deploy_osm_pla_service() {
930 # corresponding to namespace_vol
931 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
932 # corresponding to deploy_osm_services
933 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
934 }
935
936 #Install helm and tiller
937 function install_helm() {
938 helm > /dev/null 2>&1
939 if [ $? != 0 ] ; then
940 # Helm is not installed. Install helm
941 echo "Helm is not installed, installing ..."
942 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
943 tar -zxvf helm-v2.15.2.tar.gz
944 sudo mv linux-amd64/helm /usr/local/bin/helm
945 rm -r linux-amd64
946 rm helm-v2.15.2.tar.gz
947 fi
948
949 # Checking if tiller has being configured
950 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
951 if [ $? == 1 ] ; then
952 # tiller account for kubernetes
953 kubectl --namespace kube-system create serviceaccount tiller
954 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
955 # HELM initialization
956 helm init --service-account tiller
957
958 # Wait for Tiller to be up and running. If timeout expires, continue installing
959 tiller_timeout=120;
960 counter=0;
961 tiller_status=""
962 while (( counter < tiller_timeout ))
963 do
964 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
965 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
966 counter=$((counter + 5))
967 sleep 5
968 done
969 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
970 fi
971 }
972
973 function parse_yaml() {
974 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
975 TAG=$1
976 for osm in $osm_services; do
977 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
978 done
979 $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/$DOCKER_USER\/\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
980 }
981
982 function namespace_vol() {
983 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
984 for osm in $osm_services; do
985 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
986 done
987 }
988
989 function init_docker_swarm() {
990 if [ "${DEFAULT_MTU}" != "1500" ]; then
991 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
992 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
993 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
994 fi
995 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
996 return 0
997 }
998
999 function create_docker_network() {
1000 echo "creating network"
1001 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1002 echo "creating network DONE"
1003 }
1004
1005 function deploy_lightweight() {
1006
1007 echo "Deploying lightweight build"
1008 OSM_NBI_PORT=9999
1009 OSM_RO_PORT=9090
1010 OSM_KEYSTONE_PORT=5000
1011 OSM_UI_PORT=80
1012 OSM_MON_PORT=8662
1013 OSM_PROM_PORT=9090
1014 OSM_PROM_CADVISOR_PORT=8080
1015 OSM_PROM_HOSTPORT=9091
1016 OSM_GRAFANA_PORT=3000
1017 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1018 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1019
1020 if [ -n "$NO_HOST_PORTS" ]; then
1021 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1022 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1023 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1024 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1025 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1026 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1027 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1028 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1029 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1030 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1031 else
1032 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1033 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1034 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1035 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1036 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1037 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1038 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1039 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1040 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1041 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1042 fi
1043 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1044 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1045 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1046 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1047 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1048 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1049 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1050 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1051 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1052
1053 pushd $OSM_DOCKER_WORK_DIR
1054 if [ -n "$INSTALL_PLA" ]; then
1055 track deploy_osm_pla
1056 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1057 else
1058 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1059 fi
1060 popd
1061
1062 echo "Finished deployment of lightweight build"
1063 }
1064
1065 function deploy_elk() {
1066 echo "Pulling docker images for ELK"
1067 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1068 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1069 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1070 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1071 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1072 echo "Finished pulling elk docker images"
1073 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1074 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1075 remove_stack osm_elk
1076 echo "Deploying ELK stack"
1077 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1078 echo "Waiting for ELK stack to be up and running"
1079 time=0
1080 step=5
1081 timelength=40
1082 elk_is_up=1
1083 while [ $time -le $timelength ]; do
1084 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1085 elk_is_up=0
1086 break
1087 fi
1088 sleep $step
1089 time=$((time+step))
1090 done
1091 if [ $elk_is_up -eq 0 ]; then
1092 echo "ELK is up and running. Trying to create index pattern..."
1093 #Create index pattern
1094 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1095 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1096 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1097 #Make it the default index
1098 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1099 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1100 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1101 else
1102 echo "Cannot connect to Kibana to create index pattern."
1103 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1104 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1105 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1106 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1107 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1108 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1109 -d"{\"value\":\"filebeat-*\"}"'
1110 fi
1111 echo "Finished deployment of ELK stack"
1112 return 0
1113 }
1114
1115 function add_local_k8scluster() {
1116 /usr/bin/osm --all-projects vim-create \
1117 --name _system-osm-vim \
1118 --account_type dummy \
1119 --auth_url http://dummy \
1120 --user osm --password osm --tenant osm \
1121 --description "dummy" \
1122 --config '{management_network_name: mgmt}'
1123 /usr/bin/osm --all-projects k8scluster-add \
1124 --creds ${HOME}/.kube/config \
1125 --vim _system-osm-vim \
1126 --k8s-nets '{"net1": null}' \
1127 --version '1.15' \
1128 --description "OSM Internal Cluster" \
1129 _system-osm-k8s
1130 }
1131
1132 function install_lightweight() {
1133 track checkingroot
1134 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1135 track noroot
1136
1137 if [ -n "$KUBERNETES" ]; then
1138 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1139 1. Install and configure LXD
1140 2. Install juju
1141 3. Install docker CE
1142 4. Disable swap space
1143 5. Install and initialize Kubernetes
1144 as pre-requirements.
1145 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1146
1147 else
1148 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1149 fi
1150 track proceed
1151
1152 echo "Installing lightweight build of OSM"
1153 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1154 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1155 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1156 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1157 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1158 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1159 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1160 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1161
1162 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1163 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1164 need_packages_lw="snapd"
1165 echo -e "Checking required packages: $need_packages_lw"
1166 dpkg -l $need_packages_lw &>/dev/null \
1167 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1168 || sudo apt-get update \
1169 || FATAL "failed to run apt-get update"
1170 dpkg -l $need_packages_lw &>/dev/null \
1171 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1172 || sudo apt-get install -y $need_packages_lw \
1173 || FATAL "failed to install $need_packages_lw"
1174 install_lxd
1175 fi
1176
1177 track prereqok
1178
1179 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1180
1181 echo "Creating folders for installation"
1182 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1183 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1184 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1185
1186 #Installs Kubernetes
1187 if [ -n "$KUBERNETES" ]; then
1188 install_kube
1189 track install_k8s
1190 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1191 kube_config_dir
1192 track init_k8s
1193 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1194 # uninstall OSM MONITORING
1195 uninstall_k8s_monitoring
1196 track uninstall_k8s_monitoring
1197 fi
1198 #remove old namespace
1199 remove_k8s_namespace $OSM_STACK_NAME
1200 deploy_cni_provider
1201 taint_master_node
1202 install_k8s_storageclass
1203 track k8s_storageclass
1204 install_k8s_metallb
1205 track k8s_metallb
1206 else
1207 #install_docker_compose
1208 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1209 track docker_swarm
1210 fi
1211
1212 [ -z "$INSTALL_NOJUJU" ] && install_juju
1213 track juju_install
1214
1215 if [ -z "$OSM_VCA_HOST" ]; then
1216 if [ -z "$CONTROLLER_NAME" ]; then
1217
1218 if [ -n "$KUBERNETES" ]; then
1219 juju_createcontroller_k8s
1220 juju_addlxd_cloud
1221 else
1222 if [ -n "$LXD_CLOUD_FILE" ]; then
1223 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1224 OSM_VCA_CLOUDNAME="lxd-cloud"
1225 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1226 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1227 fi
1228 juju_createcontroller
1229 juju_createproxy
1230 fi
1231 else
1232 OSM_VCA_CLOUDNAME="lxd-cloud"
1233 if [ -n "$LXD_CLOUD_FILE" ]; then
1234 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1235 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1236 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1237 else
1238 mkdir -p ~/.osm
1239 cat << EOF > ~/.osm/lxd-cloud.yaml
1240 clouds:
1241 lxd-cloud:
1242 type: lxd
1243 auth-types: [certificate]
1244 endpoint: "https://$DEFAULT_IP:8443"
1245 config:
1246 ssl-hostname-verification: false
1247 EOF
1248 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1249 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1250 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1251 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1252 cat << EOF > ~/.osm/lxd-credentials.yaml
1253 credentials:
1254 lxd-cloud:
1255 lxd-cloud:
1256 auth-type: certificate
1257 server-cert: |
1258 $server_cert
1259 client-cert: |
1260 $client_cert
1261 client-key: |
1262 $client_key
1263 EOF
1264 lxc config trust add local: ~/.osm/client.crt
1265 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1266 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1267 fi
1268 fi
1269 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1270 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1271 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1272 fi
1273 track juju_controller
1274
1275 if [ -z "$OSM_VCA_SECRET" ]; then
1276 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1277 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1278 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1279 fi
1280 if [ -z "$OSM_VCA_PUBKEY" ]; then
1281 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1282 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1283 fi
1284 if [ -z "$OSM_VCA_CACERT" ]; then
1285 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1286 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1287 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1288 fi
1289
1290 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1291 if [ -z "$KUBERNETES" ]; then
1292 if [ -z "$OSM_VCA_APIPROXY" ]; then
1293 OSM_VCA_APIPROXY=$DEFAULT_IP
1294 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1295 fi
1296 juju_createproxy
1297 fi
1298 track juju
1299
1300 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1301 OSM_DATABASE_COMMONKEY=$(generate_secret)
1302 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1303 fi
1304
1305 # Deploy OSM services
1306 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1307 track docker_build
1308
1309 if [ -n "$KUBERNETES" ]; then
1310 generate_k8s_manifest_files
1311 else
1312 generate_docker_compose_files
1313 fi
1314 track manifest_files
1315 generate_prometheus_grafana_files
1316 generate_docker_env_files
1317 track env_files
1318
1319 if [ -n "$KUBERNETES" ]; then
1320 deploy_charmed_services
1321 kube_secrets
1322 [ ! $OSM_DOCKER_TAG == "8" ] && parse_yaml $OSM_DOCKER_TAG
1323 namespace_vol
1324 deploy_osm_services
1325 if [ -n "$INSTALL_PLA"]; then
1326 # optional PLA install
1327 deploy_osm_pla_service
1328 track deploy_osm_pla
1329 fi
1330 track deploy_osm_services_k8s
1331 install_helm
1332 track install_helm
1333 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1334 # install OSM MONITORING
1335 install_k8s_monitoring
1336 track install_k8s_monitoring
1337 fi
1338 else
1339 # remove old stack
1340 remove_stack $OSM_STACK_NAME
1341 create_docker_network
1342 deploy_lightweight
1343 generate_osmclient_script
1344 track docker_deploy
1345 install_prometheus_nodeexporter
1346 track nodeexporter
1347 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1348 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1349 fi
1350
1351 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1352 track osmclient
1353
1354 echo -e "Checking OSM health state..."
1355 if [ -n "$KUBERNETES" ]; then
1356 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1357 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1358 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1359 track osm_unhealthy
1360 else
1361 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1362 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1363 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1364 track osm_unhealthy
1365 fi
1366 track after_healthcheck
1367
1368 [ -n "$KUBERNETES" ] && add_local_k8scluster
1369 track add_local_k8scluster
1370
1371 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1372 track end
1373 return 0
1374 }
1375
1376 function install_to_openstack() {
1377
1378 if [ -z "$2" ]; then
1379 FATAL "OpenStack installer requires a valid external network name"
1380 fi
1381
1382 # Install Pip for Python3
1383 $WORKDIR_SUDO apt install -y python3-pip
1384 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1385
1386 # Install Ansible, OpenStack client and SDK
1387 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1388
1389 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1390
1391 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1392
1393 # Execute the Ansible playbook based on openrc or clouds.yaml
1394 if [ -e "$1" ]; then
1395 . $1
1396 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1397 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1398 else
1399 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1400 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1401 fi
1402
1403 return 0
1404 }
1405
1406 function install_vimemu() {
1407 echo "\nInstalling vim-emu"
1408 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1409 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1410 # install prerequisites (OVS is a must for the emulator to work)
1411 sudo apt-get install openvswitch-switch
1412 # clone vim-emu repository (attention: branch is currently master only)
1413 echo "Cloning vim-emu repository ..."
1414 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1415 # build vim-emu docker
1416 echo "Building vim-emu Docker container..."
1417
1418 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1419 # start vim-emu container as daemon
1420 echo "Starting vim-emu Docker container 'vim-emu' ..."
1421 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1422 # in lightweight mode, the emulator needs to be attached to netOSM
1423 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1424 else
1425 # classic build mode
1426 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1427 fi
1428 echo "Waiting for 'vim-emu' container to start ..."
1429 sleep 5
1430 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1431 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1432 # print vim-emu connection info
1433 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1434 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1435 echo -e "To add the emulated VIM to OSM you should do:"
1436 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1437 }
1438
1439 function install_k8s_monitoring() {
1440 # install OSM monitoring
1441 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1442 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1443 }
1444
1445 function uninstall_k8s_monitoring() {
1446 # uninstall OSM monitoring
1447 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1448 }
1449
1450 function dump_vars(){
1451 echo "DEVELOP=$DEVELOP"
1452 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1453 echo "UNINSTALL=$UNINSTALL"
1454 echo "UPDATE=$UPDATE"
1455 echo "RECONFIGURE=$RECONFIGURE"
1456 echo "TEST_INSTALLER=$TEST_INSTALLER"
1457 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1458 echo "INSTALL_PLA=$INSTALL_PLA"
1459 echo "INSTALL_LXD=$INSTALL_LXD"
1460 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1461 echo "INSTALL_ONLY=$INSTALL_ONLY"
1462 echo "INSTALL_ELK=$INSTALL_ELK"
1463 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1464 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1465 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1466 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1467 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1468 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1469 echo "TO_REBUILD=$TO_REBUILD"
1470 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1471 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1472 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1473 echo "RELEASE=$RELEASE"
1474 echo "REPOSITORY=$REPOSITORY"
1475 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1476 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1477 echo "OSM_DEVOPS=$OSM_DEVOPS"
1478 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1479 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1480 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1481 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1482 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1483 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1484 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1485 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1486 echo "DOCKER_USER=$DOCKER_USER"
1487 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1488 echo "PULL_IMAGES=$PULL_IMAGES"
1489 echo "KUBERNETES=$KUBERNETES"
1490 echo "NGUI=$NGUI"
1491 echo "SHOWOPTS=$SHOWOPTS"
1492 echo "Install from specific refspec (-b): $COMMIT_ID"
1493 }
1494
1495 function track(){
1496 ctime=`date +%s`
1497 duration=$((ctime - SESSION_ID))
1498 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1499 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1500 event_name="bin"
1501 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1502 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1503 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1504 event_name="${event_name}_$1"
1505 url="${url}&event=${event_name}&ce_duration=${duration}"
1506 wget -q -O /dev/null $url
1507 }
1508
1509 JUJU_AGENT_VERSION=2.8.6
1510 UNINSTALL=""
1511 DEVELOP=""
1512 UPDATE=""
1513 RECONFIGURE=""
1514 TEST_INSTALLER=""
1515 INSTALL_LXD=""
1516 SHOWOPTS=""
1517 COMMIT_ID=""
1518 ASSUME_YES=""
1519 INSTALL_FROM_SOURCE=""
1520 RELEASE="ReleaseEIGHT"
1521 REPOSITORY="stable"
1522 INSTALL_VIMEMU=""
1523 INSTALL_PLA=""
1524 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1525 LXD_REPOSITORY_PATH=""
1526 INSTALL_LIGHTWEIGHT="y"
1527 INSTALL_TO_OPENSTACK=""
1528 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1529 OPENSTACK_PUBLIC_NET_NAME=""
1530 OPENSTACK_ATTACH_VOLUME="false"
1531 INSTALL_ONLY=""
1532 INSTALL_ELK=""
1533 TO_REBUILD=""
1534 INSTALL_NOLXD=""
1535 INSTALL_NODOCKER=""
1536 INSTALL_NOJUJU=""
1537 KUBERNETES=""
1538 NGUI=""
1539 INSTALL_K8S_MONITOR=""
1540 INSTALL_NOHOSTCLIENT=""
1541 SESSION_ID=`date +%s`
1542 OSM_DEVOPS=
1543 OSM_VCA_HOST=
1544 OSM_VCA_SECRET=
1545 OSM_VCA_PUBKEY=
1546 OSM_VCA_CLOUDNAME="localhost"
1547 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1548 OSM_STACK_NAME=osm
1549 NO_HOST_PORTS=""
1550 DOCKER_NOBUILD=""
1551 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1552 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1553 WORKDIR_SUDO=sudo
1554 OSM_WORK_DIR="/etc/osm"
1555 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1556 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1557 OSM_HOST_VOL="/var/lib/osm"
1558 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1559 OSM_DOCKER_TAG=latest
1560 DOCKER_USER=opensourcemano
1561 PULL_IMAGES="y"
1562 KAFKA_TAG=2.11-1.0.2
1563 PROMETHEUS_TAG=v2.4.3
1564 GRAFANA_TAG=latest
1565 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1566 PROMETHEUS_CADVISOR_TAG=latest
1567 KEYSTONEDB_TAG=10
1568 OSM_DATABASE_COMMONKEY=
1569 ELASTIC_VERSION=6.4.2
1570 ELASTIC_CURATOR_VERSION=5.5.4
1571 POD_NETWORK_CIDR=10.244.0.0/16
1572 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1573 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1574
1575 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1576 case "${o}" in
1577 b)
1578 COMMIT_ID=${OPTARG}
1579 PULL_IMAGES=""
1580 ;;
1581 r)
1582 REPOSITORY="${OPTARG}"
1583 REPO_ARGS+=(-r "$REPOSITORY")
1584 ;;
1585 c)
1586 [ "${OPTARG}" == "swarm" ] && continue
1587 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1588 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1589 usage && exit 1
1590 ;;
1591 n)
1592 [ "${OPTARG}" == "lwui" ] && continue
1593 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1594 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1595 usage && exit 1
1596 ;;
1597 k)
1598 REPOSITORY_KEY="${OPTARG}"
1599 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1600 ;;
1601 u)
1602 REPOSITORY_BASE="${OPTARG}"
1603 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1604 ;;
1605 R)
1606 RELEASE="${OPTARG}"
1607 REPO_ARGS+=(-R "$RELEASE")
1608 ;;
1609 D)
1610 OSM_DEVOPS="${OPTARG}"
1611 ;;
1612 o)
1613 INSTALL_ONLY="y"
1614 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1615 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1616 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1617 ;;
1618 O)
1619 INSTALL_TO_OPENSTACK="y"
1620 if [ -n "${OPTARG}" ]; then
1621 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1622 else
1623 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1624 usage && exit 1
1625 fi
1626 ;;
1627 N)
1628 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1629 ;;
1630 m)
1631 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1632 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1633 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1634 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1635 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1636 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1637 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1638 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1639 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1640 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1641 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1642 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1643 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1644 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1645 ;;
1646 H)
1647 OSM_VCA_HOST="${OPTARG}"
1648 ;;
1649 S)
1650 OSM_VCA_SECRET="${OPTARG}"
1651 ;;
1652 s)
1653 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1654 ;;
1655 w)
1656 # when specifying workdir, do not use sudo for access
1657 WORKDIR_SUDO=
1658 OSM_WORK_DIR="${OPTARG}"
1659 ;;
1660 t)
1661 OSM_DOCKER_TAG="${OPTARG}"
1662 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1663 ;;
1664 U)
1665 DOCKER_USER="${OPTARG}"
1666 ;;
1667 P)
1668 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1669 ;;
1670 A)
1671 OSM_VCA_APIPROXY="${OPTARG}"
1672 ;;
1673 l)
1674 LXD_CLOUD_FILE="${OPTARG}"
1675 ;;
1676 L)
1677 LXD_CRED_FILE="${OPTARG}"
1678 ;;
1679 K)
1680 CONTROLLER_NAME="${OPTARG}"
1681 ;;
1682 -)
1683 [ "${OPTARG}" == "help" ] && usage && exit 0
1684 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1685 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1686 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1687 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1688 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1689 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1690 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1691 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1692 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1693 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1694 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1695 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1696 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1697 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1698 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1699 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1700 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1701 [ "${OPTARG}" == "pullimages" ] && continue
1702 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1703 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1704 [ "${OPTARG}" == "bundle" ] && continue
1705 [ "${OPTARG}" == "k8s" ] && continue
1706 [ "${OPTARG}" == "lxd" ] && continue
1707 [ "${OPTARG}" == "lxd-cred" ] && continue
1708 [ "${OPTARG}" == "microstack" ] && continue
1709 [ "${OPTARG}" == "vca" ] && continue
1710 [ "${OPTARG}" == "ha" ] && continue
1711 [ "${OPTARG}" == "tag" ] && continue
1712 [ "${OPTARG}" == "registry" ] && continue
1713 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1714 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1715 echo -e "Invalid option: '--$OPTARG'\n" >&2
1716 usage && exit 1
1717 ;;
1718 :)
1719 echo "Option -$OPTARG requires an argument" >&2
1720 usage && exit 1
1721 ;;
1722 \?)
1723 echo -e "Invalid option: '-$OPTARG'\n" >&2
1724 usage && exit 1
1725 ;;
1726 h)
1727 usage && exit 0
1728 ;;
1729 y)
1730 ASSUME_YES="y"
1731 ;;
1732 *)
1733 usage && exit 1
1734 ;;
1735 esac
1736 done
1737
1738 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1739 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1740
1741 if [ -n "$SHOWOPTS" ]; then
1742 dump_vars
1743 exit 0
1744 fi
1745
1746 if [ -n "$CHARMED" ]; then
1747 if [ -n "$UNINSTALL" ]; then
1748 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1749 else
1750 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1751 fi
1752
1753 exit 0
1754 fi
1755
1756 # if develop, we force master
1757 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1758
1759 need_packages="git wget curl tar"
1760
1761 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1762
1763 echo -e "Checking required packages: $need_packages"
1764 dpkg -l $need_packages &>/dev/null \
1765 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1766 || sudo apt-get update \
1767 || FATAL "failed to run apt-get update"
1768 dpkg -l $need_packages &>/dev/null \
1769 || ! echo -e "Installing $need_packages requires root privileges." \
1770 || sudo apt-get install -y $need_packages \
1771 || FATAL "failed to install $need_packages"
1772 sudo snap install jq
1773 if [ -z "$OSM_DEVOPS" ]; then
1774 if [ -n "$TEST_INSTALLER" ]; then
1775 echo -e "\nUsing local devops repo for OSM installation"
1776 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1777 else
1778 echo -e "\nCreating temporary dir for OSM installation"
1779 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1780 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1781
1782 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1783
1784 if [ -z "$COMMIT_ID" ]; then
1785 echo -e "\nGuessing the current stable release"
1786 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1787 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1788
1789 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1790 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1791 else
1792 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1793 fi
1794 git -C $OSM_DEVOPS checkout $COMMIT_ID
1795 fi
1796 fi
1797
1798 . $OSM_DEVOPS/common/all_funcs
1799
1800 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1801 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1802 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1803 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1804 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1805 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1806 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1807 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1808
1809 #Installation starts here
1810 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README.txt &> /dev/null
1811 track start
1812
1813 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1814 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1815 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1816 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1817 fi
1818
1819 echo -e "Checking required packages: lxd"
1820 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1821 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1822
1823 # use local devops for containers
1824 export OSM_USE_LOCAL_DEVOPS=true
1825
1826 #Install osmclient
1827
1828 #Install vim-emu (optional)
1829 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1830
1831 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1832 track end
1833 echo -e "\nDONE"
1834
1835