Adding registry flag
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag. (--charmed option)"
76 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
77
78 }
79
80 # takes a juju/accounts.yaml file and returns the password specific
81 # for a controller. I wrote this using only bash tools to minimize
82 # additions of other packages
83 function parse_juju_password {
84 password_file="${HOME}/.local/share/juju/accounts.yaml"
85 local controller_name=$1
86 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
87 sed -ne "s|^\($s\):|\1|" \
88 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
89 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
90 awk -F$fs -v controller=$controller_name '{
91 indent = length($1)/2;
92 vname[indent] = $2;
93 for (i in vname) {if (i > indent) {delete vname[i]}}
94 if (length($3) > 0) {
95 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
96 if (match(vn,controller) && match($2,"password")) {
97 printf("%s",$3);
98 }
99 }
100 }'
101 }
102
103 function generate_secret() {
104 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
105 }
106
107 function remove_volumes() {
108 if [ -n "$KUBERNETES" ]; then
109 k8_volume=$1
110 echo "Removing ${k8_volume}"
111 $WORKDIR_SUDO rm -rf ${k8_volume}
112 else
113 stack=$1
114 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
115 for volume in $volumes; do
116 sg docker -c "docker volume rm ${stack}_${volume}"
117 done
118 fi
119 }
120
121 function remove_network() {
122 stack=$1
123 sg docker -c "docker network rm net${stack}"
124 }
125
126 function remove_iptables() {
127 stack=$1
128 if [ -z "$OSM_VCA_HOST" ]; then
129 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
130 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
131 fi
132
133 if [ -z "$DEFAULT_IP" ]; then
134 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
135 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
136 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
137 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
138 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
139 fi
140
141 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
142 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
143 sudo netfilter-persistent save
144 fi
145 }
146
147 function remove_stack() {
148 stack=$1
149 if sg docker -c "docker stack ps ${stack}" ; then
150 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
151 COUNTER=0
152 result=1
153 while [ ${COUNTER} -lt 30 ]; do
154 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
155 #echo "Dockers running: $result"
156 if [ "${result}" == "0" ]; then
157 break
158 fi
159 let COUNTER=COUNTER+1
160 sleep 1
161 done
162 if [ "${result}" == "0" ]; then
163 echo "All dockers of the stack ${stack} were removed"
164 else
165 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
166 fi
167 sleep 5
168 fi
169 }
170
171 #removes osm deployments and services
172 function remove_k8s_namespace() {
173 kubectl delete ns $1
174 }
175
176 #removes helm only if there is nothing deployed in helm
177 function remove_helm() {
178 if [ "$(helm ls -q)" == "" ] ; then
179 sudo helm reset --force
180 kubectl delete --namespace kube-system serviceaccount tiller
181 kubectl delete clusterrolebinding tiller-cluster-rule
182 sudo rm /usr/local/bin/helm
183 rm -rf $HOME/.helm
184 fi
185 }
186
187 function remove_crontab_job() {
188 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
189 }
190
191 #Uninstall osmclient
192 function uninstall_osmclient() {
193 sudo apt-get remove --purge -y python-osmclient
194 sudo apt-get remove --purge -y python3-osmclient
195 }
196
197 #Uninstall lightweight OSM: remove dockers
198 function uninstall_lightweight() {
199 if [ -n "$INSTALL_ONLY" ]; then
200 if [ -n "$INSTALL_ELK" ]; then
201 echo -e "\nUninstalling OSM ELK stack"
202 remove_stack osm_elk
203 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
204 fi
205 else
206 echo -e "\nUninstalling OSM"
207 if [ -n "$KUBERNETES" ]; then
208 if [ -n "$INSTALL_K8S_MONITOR" ]; then
209 # uninstall OSM MONITORING
210 uninstall_k8s_monitoring
211 fi
212 remove_k8s_namespace $OSM_STACK_NAME
213 else
214 remove_stack $OSM_STACK_NAME
215 remove_stack osm_elk
216 fi
217 echo "Now osm docker images and volumes will be deleted"
218 newgrp docker << EONG
219 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
226 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
227 EONG
228
229 if [ -n "$NGUI" ]; then
230 sg docker -c "docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
231 else
232 sg docker -c "docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
233 fi
234
235 if [ -n "$KUBERNETES" ]; then
236 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
237 remove_volumes $OSM_NAMESPACE_VOL
238 else
239 remove_volumes $OSM_STACK_NAME
240 remove_network $OSM_STACK_NAME
241 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
242 fi
243 echo "Removing $OSM_DOCKER_WORK_DIR"
244 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
245 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
246 fi
247 remove_crontab_job
248 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
249 echo "Some docker images will be kept in case they are used by other docker stacks"
250 echo "To remove them, just run 'docker image prune' in a terminal"
251 return 0
252 }
253
254 #Safe unattended install of iptables-persistent
255 function check_install_iptables_persistent(){
256 echo -e "\nChecking required packages: iptables-persistent"
257 if ! dpkg -l iptables-persistent &>/dev/null; then
258 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
259 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
260 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
261 sudo apt-get -yq install iptables-persistent
262 fi
263 }
264
265 #Configure NAT rules, based on the current IP addresses of containers
266 function nat(){
267 check_install_iptables_persistent
268
269 echo -e "\nConfiguring NAT rules"
270 echo -e " Required root privileges"
271 sudo $OSM_DEVOPS/installers/nat_osm
272 }
273
274 function FATAL(){
275 echo "FATAL error: Cannot install OSM due to \"$1\""
276 exit 1
277 }
278
279 function update_juju_images(){
280 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
281 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
282 }
283
284 function install_lxd() {
285 # Apply sysctl production values for optimal performance
286 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
287 sudo sysctl --system
288
289 # Install LXD snap
290 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
291 sudo snap install lxd
292 sudo apt-get install zfsutils-linux -y
293
294 # Configure LXD
295 sudo usermod -a -G lxd `whoami`
296 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
297 sg lxd -c "lxd waitready"
298 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
299 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
300 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
301 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
302 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
303 #sudo systemctl stop lxd-bridge
304 #sudo systemctl --system daemon-reload
305 #sudo systemctl enable lxd-bridge
306 #sudo systemctl start lxd-bridge
307 }
308
309 function ask_user(){
310 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
311 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
312 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
313 read -e -p "$1" USER_CONFIRMATION
314 while true ; do
315 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
316 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
317 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
318 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
319 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
320 done
321 }
322
323 function install_osmclient(){
324 CLIENT_RELEASE=${RELEASE#"-R "}
325 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
326 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
327 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
328 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
329 curl $key_location | sudo apt-key add -
330 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
331 sudo apt-get update
332 sudo apt-get install -y python3-pip
333 sudo -H LC_ALL=C python3 -m pip install -U pip
334 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
335 sudo apt-get install -y python3-osm-im python3-osmclient
336 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
337 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
338 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
339 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
340 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
341 echo -e "\nOSM client installed"
342 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
343 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
344 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
345 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
346 else
347 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
348 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
349 echo " export OSM_HOSTNAME=<OSM_host>"
350 fi
351 return 0
352 }
353
354 function install_prometheus_nodeexporter(){
355 if (systemctl -q is-active node_exporter)
356 then
357 echo "Node Exporter is already running."
358 else
359 echo "Node Exporter is not active, installing..."
360 if getent passwd node_exporter > /dev/null 2>&1; then
361 echo "node_exporter user exists"
362 else
363 echo "Creating user node_exporter"
364 sudo useradd --no-create-home --shell /bin/false node_exporter
365 fi
366 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
367 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
368 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
369 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
370 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
371 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
372 sudo systemctl daemon-reload
373 sudo systemctl restart node_exporter
374 sudo systemctl enable node_exporter
375 echo "Node Exporter has been activated in this host."
376 fi
377 return 0
378 }
379
380 function uninstall_prometheus_nodeexporter(){
381 sudo systemctl stop node_exporter
382 sudo systemctl disable node_exporter
383 sudo rm /etc/systemd/system/node_exporter.service
384 sudo systemctl daemon-reload
385 sudo userdel node_exporter
386 sudo rm /usr/local/bin/node_exporter
387 return 0
388 }
389
390 function install_docker_ce() {
391 # installs and configures Docker CE
392 echo "Installing Docker CE ..."
393 sudo apt-get -qq update
394 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
395 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
396 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
397 sudo apt-get -qq update
398 sudo apt-get install -y docker-ce
399 echo "Adding user to group 'docker'"
400 sudo groupadd -f docker
401 sudo usermod -aG docker $USER
402 sleep 2
403 sudo service docker restart
404 echo "... restarted Docker service"
405 sg docker -c "docker version" || FATAL "Docker installation failed"
406 echo "... Docker CE installation done"
407 return 0
408 }
409
410 function install_docker_compose() {
411 # installs and configures docker-compose
412 echo "Installing Docker Compose ..."
413 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
414 sudo chmod +x /usr/local/bin/docker-compose
415 echo "... Docker Compose installation done"
416 }
417
418 function install_juju() {
419 echo "Installing juju"
420 sudo snap install juju --classic --channel=2.8/stable
421 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
422 update_juju_images
423 echo "Finished installation of juju"
424 return 0
425 }
426
427 function juju_createcontroller() {
428 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
429 # Not found created, create the controller
430 sudo usermod -a -G lxd ${USER}
431 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
432 fi
433 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
434 juju controller-config features=[k8s-operators]
435 }
436
437 function juju_addk8s() {
438 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
439 }
440
441 function juju_createcontroller_k8s(){
442 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
443 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
444 --config controller-service-type=loadbalancer \
445 --agent-version=$JUJU_AGENT_VERSION
446 }
447
448
449 function juju_addlxd_cloud(){
450 mkdir -p /tmp/.osm
451 OSM_VCA_CLOUDNAME="lxd-cloud"
452 LXDENDPOINT=$DEFAULT_IP
453 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
454 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
455
456 cat << EOF > $LXD_CLOUD
457 clouds:
458 $OSM_VCA_CLOUDNAME:
459 type: lxd
460 auth-types: [certificate]
461 endpoint: "https://$LXDENDPOINT:8443"
462 config:
463 ssl-hostname-verification: false
464 EOF
465 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
466 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
467 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
468 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
469
470 cat << EOF > $LXD_CREDENTIALS
471 credentials:
472 $OSM_VCA_CLOUDNAME:
473 lxd-cloud:
474 auth-type: certificate
475 server-cert: |
476 $server_cert
477 client-cert: |
478 $client_cert
479 client-key: |
480 $client_key
481 EOF
482 lxc config trust add local: /tmp/.osm/client.crt
483 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
484 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
485 sg lxd -c "lxd waitready"
486 juju controller-config features=[k8s-operators]
487 }
488
489
490 function juju_createproxy() {
491 check_install_iptables_persistent
492
493 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
494 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
495 sudo netfilter-persistent save
496 fi
497 }
498
499 function generate_docker_images() {
500 echo "Pulling and generating docker images"
501 _build_from=$COMMIT_ID
502 [ -z "$_build_from" ] && _build_from="master"
503
504 echo "OSM Docker images generated from $_build_from"
505
506 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
507 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
508 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
509 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
510
511 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
512 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
513 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
514 fi
515
516 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
517 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
518 fi
519
520 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
521 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
522 fi
523
524 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
525 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
526 fi
527
528 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
529 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
530 fi
531
532 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
533 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
534 fi
535
536 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
537 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
538 fi
539
540 if [ -n "$PULL_IMAGES" ]; then
541 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
542 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
543 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
544 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
545 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
546 fi
547
548 if [ -n "$PULL_IMAGES" ]; then
549 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
550 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
551 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
552 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
553 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
554 fi
555
556 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
557 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
558 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
559 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
560 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
561 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
562 fi
563
564 if [ -n "$PULL_IMAGES" ]; then
565 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
566 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
567 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
568 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
569 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
570 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
571 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
572 fi
573
574 if [ -n "$PULL_IMAGES" ]; then
575 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
576 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
577 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
578 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
579 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
580 fi
581
582 if [ -n "$PULL_IMAGES" ]; then
583 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
584 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
585 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
586 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
587 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
588 fi
589
590 if [ -n "$NGUI" ]; then
591 if [ -n "$PULL_IMAGES" ]; then
592 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
593 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
594 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
595 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
596 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
597 fi
598 else
599 if [ -n "$PULL_IMAGES" ]; then
600 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
601 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
602 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
603 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
604 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
605 fi
606 fi
607
608 if [ -n "$PULL_IMAGES" ]; then
609 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
610 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
611 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
612 fi
613
614 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
615 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
616 fi
617
618 echo "Finished generation of docker images"
619 }
620
621 function cmp_overwrite() {
622 file1="$1"
623 file2="$2"
624 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
625 if [ -f "${file2}" ]; then
626 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
627 else
628 cp -b ${file1} ${file2}
629 fi
630 fi
631 }
632
633 function generate_docker_compose_files() {
634 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
635 if [ -n "$NGUI" ]; then
636 # For NG-UI
637 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
638 else
639 # Docker-compose
640 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
641 fi
642 if [ -n "$INSTALL_PLA" ]; then
643 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
644 fi
645 }
646
647 function generate_k8s_manifest_files() {
648 #Kubernetes resources
649 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
650 if [ -n "$NGUI" ]; then
651 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
652 else
653 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
654 fi
655 }
656
657 function generate_prometheus_grafana_files() {
658 [ -n "$KUBERNETES" ] && return
659 # Prometheus files
660 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
661 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
662
663 # Grafana files
664 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
665 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
666 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
667 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
668 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
669
670 # Prometheus Exporters files
671 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
672 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
673 }
674
675 function generate_docker_env_files() {
676 echo "Doing a backup of existing env files"
677 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
678 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
679 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
680 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
681 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
682 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
683 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
684 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
685 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
686
687 echo "Generating docker env files"
688 # LCM
689 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
690 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
691 fi
692
693 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
694 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
695 else
696 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
697 fi
698
699 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
700 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
701 else
702 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
703 fi
704
705 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
706 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
707 else
708 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
709 fi
710
711 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
712 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
713 else
714 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
715 fi
716
717 if [ -n "$OSM_VCA_APIPROXY" ]; then
718 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
719 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
720 else
721 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
722 fi
723 fi
724
725 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
726 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
727 fi
728
729 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
730 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
731 fi
732
733 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
734 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
735 else
736 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
737 fi
738
739 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
740 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
741 else
742 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
743 fi
744
745 # RO
746 MYSQL_ROOT_PASSWORD=$(generate_secret)
747 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
748 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
749 fi
750 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
751 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
752 fi
753
754 # Keystone
755 KEYSTONE_DB_PASSWORD=$(generate_secret)
756 SERVICE_PASSWORD=$(generate_secret)
757 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
758 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
759 fi
760 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
761 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
762 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
763 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
764 fi
765
766 # NBI
767 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
768 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
769 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
770 fi
771
772 # MON
773 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
774 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
775 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
776 fi
777
778 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
779 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
780 else
781 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
782 fi
783
784 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
785 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
786 else
787 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
788 fi
789
790 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
791 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
792 else
793 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
794 fi
795
796 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
797 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
798 else
799 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
800 fi
801
802
803 # POL
804 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
805 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
806 fi
807
808 # LW-UI
809 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
810 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
811 fi
812
813 echo "Finished generation of docker env files"
814 }
815
816 function generate_osmclient_script () {
817 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
818 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
819 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
820 }
821
822 #installs kubernetes packages
823 function install_kube() {
824 sudo apt-get update && sudo apt-get install -y apt-transport-https
825 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
826 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
827 sudo apt-get update
828 echo "Installing Kubernetes Packages ..."
829 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
830 }
831
832 #initializes kubernetes control plane
833 function init_kubeadm() {
834 sudo swapoff -a
835 sudo kubeadm init --config $1
836 sleep 5
837 }
838
839 function kube_config_dir() {
840 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
841 mkdir -p $HOME/.kube
842 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
843 sudo chown $(id -u):$(id -g) $HOME/.kube/config
844 }
845
846 function install_k8s_storageclass() {
847 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
848 local storageclass_timeout=300
849 local counter=0
850 echo "Waiting for storageclass"
851 while (( counter < storageclass_timeout ))
852 do
853 kubectl get storageclass openebs-hostpath &> /dev/null
854
855 if [ $? -eq 0 ] ; then
856 echo "Storageclass available"
857 break
858 else
859 counter=$((counter + 15))
860 sleep 15
861 fi
862 done
863 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
864 }
865
866 function install_k8s_metallb() {
867 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
868 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
869 echo "apiVersion: v1
870 kind: ConfigMap
871 metadata:
872 namespace: metallb-system
873 name: config
874 data:
875 config: |
876 address-pools:
877 - name: default
878 protocol: layer2
879 addresses:
880 - $METALLB_IP_RANGE" | kubectl apply -f -
881 }
882 #deploys flannel as daemonsets
883 function deploy_cni_provider() {
884 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
885 trap 'rm -rf "${CNI_DIR}"' EXIT
886 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
887 kubectl apply -f $CNI_DIR
888 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
889 }
890
891 #creates secrets from env files which will be used by containers
892 function kube_secrets(){
893 kubectl create ns $OSM_STACK_NAME
894 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
895 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
896 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
897 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
898 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
899 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
900 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
901 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
902 }
903
904 #taints K8s master node
905 function taint_master_node() {
906 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
907 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
908 sleep 5
909 }
910
911 #deploys osm pods and services
912 function deploy_osm_services() {
913 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
914 }
915
916 function deploy_osm_pla_service() {
917 # corresponding to namespace_vol
918 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
919 # corresponding to deploy_osm_services
920 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
921 }
922
923 #Install helm and tiller
924 function install_helm() {
925 helm > /dev/null 2>&1
926 if [ $? != 0 ] ; then
927 # Helm is not installed. Install helm
928 echo "Helm is not installed, installing ..."
929 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
930 tar -zxvf helm-v2.15.2.tar.gz
931 sudo mv linux-amd64/helm /usr/local/bin/helm
932 rm -r linux-amd64
933 rm helm-v2.15.2.tar.gz
934 fi
935
936 # Checking if tiller has being configured
937 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
938 if [ $? == 1 ] ; then
939 # tiller account for kubernetes
940 kubectl --namespace kube-system create serviceaccount tiller
941 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
942 # HELM initialization
943 helm init --service-account tiller
944
945 # Wait for Tiller to be up and running. If timeout expires, continue installing
946 tiller_timeout=120;
947 counter=0;
948 tiller_status=""
949 while (( counter < tiller_timeout ))
950 do
951 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
952 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
953 counter=$((counter + 5))
954 sleep 5
955 done
956 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
957 fi
958 }
959
960 function parse_yaml() {
961 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
962 TAG=$1
963 for osm in $osm_services; do
964 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
965 done
966 $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/$DOCKER_USER\/\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
967 }
968
969 function namespace_vol() {
970 osm_services="nbi lcm ro pol mon kafka mongo mysql prometheus"
971 for osm in $osm_services; do
972 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
973 done
974 }
975
976 function init_docker_swarm() {
977 if [ "${DEFAULT_MTU}" != "1500" ]; then
978 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
979 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
980 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
981 fi
982 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
983 return 0
984 }
985
986 function create_docker_network() {
987 echo "creating network"
988 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
989 echo "creating network DONE"
990 }
991
992 function deploy_lightweight() {
993
994 echo "Deploying lightweight build"
995 OSM_NBI_PORT=9999
996 OSM_RO_PORT=9090
997 OSM_KEYSTONE_PORT=5000
998 OSM_UI_PORT=80
999 OSM_MON_PORT=8662
1000 OSM_PROM_PORT=9090
1001 OSM_PROM_CADVISOR_PORT=8080
1002 OSM_PROM_HOSTPORT=9091
1003 OSM_GRAFANA_PORT=3000
1004 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1005 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1006
1007 if [ -n "$NO_HOST_PORTS" ]; then
1008 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1009 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1010 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1011 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1012 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1013 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1014 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1015 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1016 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1017 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1018 else
1019 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1020 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1021 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1022 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1023 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1024 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1025 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1026 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1027 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1028 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1029 fi
1030 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1031 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1032 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1033 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1034 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1035 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1036 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1037 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1038 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1039
1040 pushd $OSM_DOCKER_WORK_DIR
1041 if [ -n "$INSTALL_PLA" ]; then
1042 track deploy_osm_pla
1043 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1044 else
1045 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1046 fi
1047 popd
1048
1049 echo "Finished deployment of lightweight build"
1050 }
1051
1052 function deploy_elk() {
1053 echo "Pulling docker images for ELK"
1054 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1055 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1056 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1057 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1058 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1059 echo "Finished pulling elk docker images"
1060 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1061 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1062 remove_stack osm_elk
1063 echo "Deploying ELK stack"
1064 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1065 echo "Waiting for ELK stack to be up and running"
1066 time=0
1067 step=5
1068 timelength=40
1069 elk_is_up=1
1070 while [ $time -le $timelength ]; do
1071 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1072 elk_is_up=0
1073 break
1074 fi
1075 sleep $step
1076 time=$((time+step))
1077 done
1078 if [ $elk_is_up -eq 0 ]; then
1079 echo "ELK is up and running. Trying to create index pattern..."
1080 #Create index pattern
1081 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1082 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1083 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1084 #Make it the default index
1085 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1086 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1087 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1088 else
1089 echo "Cannot connect to Kibana to create index pattern."
1090 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1091 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1092 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1093 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1094 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1095 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1096 -d"{\"value\":\"filebeat-*\"}"'
1097 fi
1098 echo "Finished deployment of ELK stack"
1099 return 0
1100 }
1101
1102 function add_local_k8scluster() {
1103 /usr/bin/osm --all-projects vim-create \
1104 --name _system-osm-vim \
1105 --account_type dummy \
1106 --auth_url http://dummy \
1107 --user osm --password osm --tenant osm \
1108 --description "dummy" \
1109 --config '{management_network_name: mgmt}'
1110 /usr/bin/osm --all-projects k8scluster-add \
1111 --creds ${HOME}/.kube/config \
1112 --vim _system-osm-vim \
1113 --k8s-nets '{"net1": null}' \
1114 --version '1.15' \
1115 --description "OSM Internal Cluster" \
1116 _system-osm-k8s
1117 }
1118
1119 function install_lightweight() {
1120 track checkingroot
1121 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1122 track noroot
1123
1124 if [ -n "$KUBERNETES" ]; then
1125 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1126 1. Install and configure LXD
1127 2. Install juju
1128 3. Install docker CE
1129 4. Disable swap space
1130 5. Install and initialize Kubernetes
1131 as pre-requirements.
1132 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1133
1134 else
1135 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1136 fi
1137 track proceed
1138
1139 echo "Installing lightweight build of OSM"
1140 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1141 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1142 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1143 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1144 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1145 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1146 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1147 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1148
1149 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1150 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1151 need_packages_lw="snapd"
1152 echo -e "Checking required packages: $need_packages_lw"
1153 dpkg -l $need_packages_lw &>/dev/null \
1154 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1155 || sudo apt-get update \
1156 || FATAL "failed to run apt-get update"
1157 dpkg -l $need_packages_lw &>/dev/null \
1158 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1159 || sudo apt-get install -y $need_packages_lw \
1160 || FATAL "failed to install $need_packages_lw"
1161 install_lxd
1162 fi
1163
1164 track prereqok
1165
1166 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1167
1168 echo "Creating folders for installation"
1169 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1170 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1171 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1172
1173 #Installs Kubernetes
1174 if [ -n "$KUBERNETES" ]; then
1175 install_kube
1176 track install_k8s
1177 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1178 kube_config_dir
1179 track init_k8s
1180 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1181 # uninstall OSM MONITORING
1182 uninstall_k8s_monitoring
1183 track uninstall_k8s_monitoring
1184 fi
1185 #remove old namespace
1186 remove_k8s_namespace $OSM_STACK_NAME
1187 deploy_cni_provider
1188 taint_master_node
1189 install_k8s_storageclass
1190 track k8s_storageclass
1191 install_k8s_metallb
1192 track k8s_metallb
1193 else
1194 #install_docker_compose
1195 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1196 track docker_swarm
1197 fi
1198
1199 [ -z "$INSTALL_NOJUJU" ] && install_juju
1200 track juju_install
1201
1202 if [ -z "$OSM_VCA_HOST" ]; then
1203 if [ -z "$CONTROLLER_NAME" ]; then
1204
1205 if [ -n "$KUBERNETES" ]; then
1206 juju_createcontroller_k8s
1207 juju_addlxd_cloud
1208 else
1209 if [ -n "$LXD_CLOUD_FILE" ]; then
1210 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1211 OSM_VCA_CLOUDNAME="lxd-cloud"
1212 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1213 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1214 fi
1215 juju_createcontroller
1216 juju_createproxy
1217 fi
1218 else
1219 OSM_VCA_CLOUDNAME="lxd-cloud"
1220 if [ -n "$LXD_CLOUD_FILE" ]; then
1221 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1222 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1223 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1224 else
1225 mkdir -p ~/.osm
1226 cat << EOF > ~/.osm/lxd-cloud.yaml
1227 clouds:
1228 lxd-cloud:
1229 type: lxd
1230 auth-types: [certificate]
1231 endpoint: "https://$DEFAULT_IP:8443"
1232 config:
1233 ssl-hostname-verification: false
1234 EOF
1235 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1236 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1237 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1238 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1239 cat << EOF > ~/.osm/lxd-credentials.yaml
1240 credentials:
1241 lxd-cloud:
1242 lxd-cloud:
1243 auth-type: certificate
1244 server-cert: |
1245 $server_cert
1246 client-cert: |
1247 $client_cert
1248 client-key: |
1249 $client_key
1250 EOF
1251 lxc config trust add local: ~/.osm/client.crt
1252 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1253 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1254 fi
1255 fi
1256 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1257 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1258 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1259 fi
1260 track juju_controller
1261
1262 if [ -z "$OSM_VCA_SECRET" ]; then
1263 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1264 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1265 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1266 fi
1267 if [ -z "$OSM_VCA_PUBKEY" ]; then
1268 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1269 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1270 fi
1271 if [ -z "$OSM_VCA_CACERT" ]; then
1272 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1273 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1274 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1275 fi
1276
1277 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1278 if [ -z "$KUBERNETES" ]; then
1279 if [ -z "$OSM_VCA_APIPROXY" ]; then
1280 OSM_VCA_APIPROXY=$DEFAULT_IP
1281 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1282 fi
1283 juju_createproxy
1284 fi
1285 track juju
1286
1287 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1288 OSM_DATABASE_COMMONKEY=$(generate_secret)
1289 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1290 fi
1291
1292 # Deploy OSM services
1293 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1294 track docker_build
1295
1296 if [ -n "$KUBERNETES" ]; then
1297 generate_k8s_manifest_files
1298 else
1299 generate_docker_compose_files
1300 fi
1301 track manifest_files
1302 generate_prometheus_grafana_files
1303 generate_docker_env_files
1304 track env_files
1305
1306 if [ -n "$KUBERNETES" ]; then
1307 kube_secrets
1308 [ ! $OSM_DOCKER_TAG == "8" ] && parse_yaml $OSM_DOCKER_TAG
1309 namespace_vol
1310 deploy_osm_services
1311 if [ -n "$INSTALL_PLA"]; then
1312 # optional PLA install
1313 deploy_osm_pla_service
1314 track deploy_osm_pla
1315 fi
1316 track deploy_osm_services_k8s
1317 install_helm
1318 track install_helm
1319 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1320 # install OSM MONITORING
1321 install_k8s_monitoring
1322 track install_k8s_monitoring
1323 fi
1324 else
1325 # remove old stack
1326 remove_stack $OSM_STACK_NAME
1327 create_docker_network
1328 deploy_lightweight
1329 generate_osmclient_script
1330 track docker_deploy
1331 install_prometheus_nodeexporter
1332 track nodeexporter
1333 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1334 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1335 fi
1336
1337 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1338 track osmclient
1339
1340 echo -e "Checking OSM health state..."
1341 if [ -n "$KUBERNETES" ]; then
1342 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1343 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1344 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1345 track osm_unhealthy
1346 else
1347 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1348 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1349 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1350 track osm_unhealthy
1351 fi
1352 track after_healthcheck
1353
1354 [ -n "$KUBERNETES" ] && add_local_k8scluster
1355 track add_local_k8scluster
1356
1357 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1358 track end
1359 return 0
1360 }
1361
1362 function install_to_openstack() {
1363
1364 if [ -z "$2" ]; then
1365 FATAL "OpenStack installer requires a valid external network name"
1366 fi
1367
1368 # Install Pip for Python3
1369 $WORKDIR_SUDO apt install -y python3-pip
1370 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1371
1372 # Install Ansible, OpenStack client and SDK
1373 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1374
1375 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1376
1377 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1378
1379 # Execute the Ansible playbook based on openrc or clouds.yaml
1380 if [ -e "$1" ]; then
1381 . $1
1382 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1383 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1384 else
1385 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1386 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1387 fi
1388
1389 return 0
1390 }
1391
1392 function install_vimemu() {
1393 echo "\nInstalling vim-emu"
1394 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1395 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1396 # install prerequisites (OVS is a must for the emulator to work)
1397 sudo apt-get install openvswitch-switch
1398 # clone vim-emu repository (attention: branch is currently master only)
1399 echo "Cloning vim-emu repository ..."
1400 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1401 # build vim-emu docker
1402 echo "Building vim-emu Docker container..."
1403
1404 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1405 # start vim-emu container as daemon
1406 echo "Starting vim-emu Docker container 'vim-emu' ..."
1407 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1408 # in lightweight mode, the emulator needs to be attached to netOSM
1409 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1410 else
1411 # classic build mode
1412 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1413 fi
1414 echo "Waiting for 'vim-emu' container to start ..."
1415 sleep 5
1416 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1417 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1418 # print vim-emu connection info
1419 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1420 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1421 echo -e "To add the emulated VIM to OSM you should do:"
1422 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1423 }
1424
1425 function install_k8s_monitoring() {
1426 # install OSM monitoring
1427 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1428 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1429 }
1430
1431 function uninstall_k8s_monitoring() {
1432 # uninstall OSM monitoring
1433 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1434 }
1435
1436 function dump_vars(){
1437 echo "DEVELOP=$DEVELOP"
1438 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1439 echo "UNINSTALL=$UNINSTALL"
1440 echo "UPDATE=$UPDATE"
1441 echo "RECONFIGURE=$RECONFIGURE"
1442 echo "TEST_INSTALLER=$TEST_INSTALLER"
1443 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1444 echo "INSTALL_PLA=$INSTALL_PLA"
1445 echo "INSTALL_LXD=$INSTALL_LXD"
1446 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1447 echo "INSTALL_ONLY=$INSTALL_ONLY"
1448 echo "INSTALL_ELK=$INSTALL_ELK"
1449 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1450 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1451 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1452 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1453 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1454 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1455 echo "TO_REBUILD=$TO_REBUILD"
1456 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1457 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1458 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1459 echo "RELEASE=$RELEASE"
1460 echo "REPOSITORY=$REPOSITORY"
1461 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1462 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1463 echo "OSM_DEVOPS=$OSM_DEVOPS"
1464 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1465 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1466 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1467 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1468 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1469 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1470 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1471 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1472 echo "DOCKER_USER=$DOCKER_USER"
1473 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1474 echo "PULL_IMAGES=$PULL_IMAGES"
1475 echo "KUBERNETES=$KUBERNETES"
1476 echo "NGUI=$NGUI"
1477 echo "SHOWOPTS=$SHOWOPTS"
1478 echo "Install from specific refspec (-b): $COMMIT_ID"
1479 }
1480
1481 function track(){
1482 ctime=`date +%s`
1483 duration=$((ctime - SESSION_ID))
1484 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1485 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1486 event_name="bin"
1487 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1488 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1489 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1490 event_name="${event_name}_$1"
1491 url="${url}&event=${event_name}&ce_duration=${duration}"
1492 wget -q -O /dev/null $url
1493 }
1494
1495 JUJU_AGENT_VERSION=2.8.1
1496 UNINSTALL=""
1497 DEVELOP=""
1498 UPDATE=""
1499 RECONFIGURE=""
1500 TEST_INSTALLER=""
1501 INSTALL_LXD=""
1502 SHOWOPTS=""
1503 COMMIT_ID=""
1504 ASSUME_YES=""
1505 INSTALL_FROM_SOURCE=""
1506 RELEASE="ReleaseEIGHT"
1507 REPOSITORY="stable"
1508 INSTALL_VIMEMU=""
1509 INSTALL_PLA=""
1510 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1511 LXD_REPOSITORY_PATH=""
1512 INSTALL_LIGHTWEIGHT="y"
1513 INSTALL_TO_OPENSTACK=""
1514 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1515 OPENSTACK_PUBLIC_NET_NAME=""
1516 OPENSTACK_ATTACH_VOLUME="false"
1517 INSTALL_ONLY=""
1518 INSTALL_ELK=""
1519 TO_REBUILD=""
1520 INSTALL_NOLXD=""
1521 INSTALL_NODOCKER=""
1522 INSTALL_NOJUJU=""
1523 KUBERNETES=""
1524 NGUI=""
1525 INSTALL_K8S_MONITOR=""
1526 INSTALL_NOHOSTCLIENT=""
1527 SESSION_ID=`date +%s`
1528 OSM_DEVOPS=
1529 OSM_VCA_HOST=
1530 OSM_VCA_SECRET=
1531 OSM_VCA_PUBKEY=
1532 OSM_VCA_CLOUDNAME="localhost"
1533 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1534 OSM_STACK_NAME=osm
1535 NO_HOST_PORTS=""
1536 DOCKER_NOBUILD=""
1537 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1538 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1539 WORKDIR_SUDO=sudo
1540 OSM_WORK_DIR="/etc/osm"
1541 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1542 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1543 OSM_HOST_VOL="/var/lib/osm"
1544 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1545 OSM_DOCKER_TAG=latest
1546 DOCKER_USER=opensourcemano
1547 PULL_IMAGES="y"
1548 KAFKA_TAG=2.11-1.0.2
1549 PROMETHEUS_TAG=v2.4.3
1550 GRAFANA_TAG=latest
1551 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1552 PROMETHEUS_CADVISOR_TAG=latest
1553 KEYSTONEDB_TAG=10
1554 OSM_DATABASE_COMMONKEY=
1555 ELASTIC_VERSION=6.4.2
1556 ELASTIC_CURATOR_VERSION=5.5.4
1557 POD_NETWORK_CIDR=10.244.0.0/16
1558 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1559 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1560
1561 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1562 case "${o}" in
1563 b)
1564 COMMIT_ID=${OPTARG}
1565 PULL_IMAGES=""
1566 ;;
1567 r)
1568 REPOSITORY="${OPTARG}"
1569 REPO_ARGS+=(-r "$REPOSITORY")
1570 ;;
1571 c)
1572 [ "${OPTARG}" == "swarm" ] && continue
1573 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1574 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1575 usage && exit 1
1576 ;;
1577 n)
1578 [ "${OPTARG}" == "lwui" ] && continue
1579 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1580 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1581 usage && exit 1
1582 ;;
1583 k)
1584 REPOSITORY_KEY="${OPTARG}"
1585 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1586 ;;
1587 u)
1588 REPOSITORY_BASE="${OPTARG}"
1589 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1590 ;;
1591 R)
1592 RELEASE="${OPTARG}"
1593 REPO_ARGS+=(-R "$RELEASE")
1594 ;;
1595 D)
1596 OSM_DEVOPS="${OPTARG}"
1597 ;;
1598 o)
1599 INSTALL_ONLY="y"
1600 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1601 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1602 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1603 ;;
1604 O)
1605 INSTALL_TO_OPENSTACK="y"
1606 if [ -n "${OPTARG}" ]; then
1607 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1608 else
1609 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1610 usage && exit 1
1611 fi
1612 ;;
1613 N)
1614 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1615 ;;
1616 m)
1617 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1618 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1619 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1620 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1621 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1622 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1623 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1624 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1625 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1626 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1627 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1628 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1629 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1630 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1631 ;;
1632 H)
1633 OSM_VCA_HOST="${OPTARG}"
1634 ;;
1635 S)
1636 OSM_VCA_SECRET="${OPTARG}"
1637 ;;
1638 s)
1639 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1640 ;;
1641 w)
1642 # when specifying workdir, do not use sudo for access
1643 WORKDIR_SUDO=
1644 OSM_WORK_DIR="${OPTARG}"
1645 ;;
1646 t)
1647 OSM_DOCKER_TAG="${OPTARG}"
1648 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1649 ;;
1650 U)
1651 DOCKER_USER="${OPTARG}"
1652 ;;
1653 P)
1654 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1655 ;;
1656 A)
1657 OSM_VCA_APIPROXY="${OPTARG}"
1658 ;;
1659 l)
1660 LXD_CLOUD_FILE="${OPTARG}"
1661 ;;
1662 L)
1663 LXD_CRED_FILE="${OPTARG}"
1664 ;;
1665 K)
1666 CONTROLLER_NAME="${OPTARG}"
1667 ;;
1668 -)
1669 [ "${OPTARG}" == "help" ] && usage && exit 0
1670 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1671 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1672 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1673 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1674 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1675 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1676 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1677 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1678 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1679 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1680 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1681 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1682 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1683 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1684 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1685 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1686 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1687 [ "${OPTARG}" == "pullimages" ] && continue
1688 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1689 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1690 [ "${OPTARG}" == "bundle" ] && continue
1691 [ "${OPTARG}" == "k8s" ] && continue
1692 [ "${OPTARG}" == "lxd" ] && continue
1693 [ "${OPTARG}" == "lxd-cred" ] && continue
1694 [ "${OPTARG}" == "microstack" ] && continue
1695 [ "${OPTARG}" == "vca" ] && continue
1696 [ "${OPTARG}" == "ha" ] && continue
1697 [ "${OPTARG}" == "tag" ] && continue
1698 [ "${OPTARG}" == "registry" ] && continue
1699 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1700 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1701 echo -e "Invalid option: '--$OPTARG'\n" >&2
1702 usage && exit 1
1703 ;;
1704 :)
1705 echo "Option -$OPTARG requires an argument" >&2
1706 usage && exit 1
1707 ;;
1708 \?)
1709 echo -e "Invalid option: '-$OPTARG'\n" >&2
1710 usage && exit 1
1711 ;;
1712 h)
1713 usage && exit 0
1714 ;;
1715 y)
1716 ASSUME_YES="y"
1717 ;;
1718 *)
1719 usage && exit 1
1720 ;;
1721 esac
1722 done
1723
1724 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1725 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1726
1727 if [ -n "$SHOWOPTS" ]; then
1728 dump_vars
1729 exit 0
1730 fi
1731
1732 if [ -n "$CHARMED" ]; then
1733 if [ -n "$UNINSTALL" ]; then
1734 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1735 else
1736 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1737 fi
1738
1739 exit 0
1740 fi
1741
1742 # if develop, we force master
1743 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1744
1745 need_packages="git wget curl tar"
1746
1747 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1748
1749 echo -e "Checking required packages: $need_packages"
1750 dpkg -l $need_packages &>/dev/null \
1751 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1752 || sudo apt-get update \
1753 || FATAL "failed to run apt-get update"
1754 dpkg -l $need_packages &>/dev/null \
1755 || ! echo -e "Installing $need_packages requires root privileges." \
1756 || sudo apt-get install -y $need_packages \
1757 || FATAL "failed to install $need_packages"
1758 sudo snap install jq
1759 if [ -z "$OSM_DEVOPS" ]; then
1760 if [ -n "$TEST_INSTALLER" ]; then
1761 echo -e "\nUsing local devops repo for OSM installation"
1762 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1763 else
1764 echo -e "\nCreating temporary dir for OSM installation"
1765 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1766 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1767
1768 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1769
1770 if [ -z "$COMMIT_ID" ]; then
1771 echo -e "\nGuessing the current stable release"
1772 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1773 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1774
1775 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1776 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1777 else
1778 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1779 fi
1780 git -C $OSM_DEVOPS checkout $COMMIT_ID
1781 fi
1782 fi
1783
1784 . $OSM_DEVOPS/common/all_funcs
1785
1786 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1787 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1788 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1789 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1790 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1791 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1792 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1793 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1794
1795 #Installation starts here
1796 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README.txt &> /dev/null
1797 track start
1798
1799 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1800 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1801 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1802 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1803 fi
1804
1805 echo -e "Checking required packages: lxd"
1806 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1807 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1808
1809 # use local devops for containers
1810 export OSM_USE_LOCAL_DEVOPS=true
1811
1812 #Install osmclient
1813
1814 #Install vim-emu (optional)
1815 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1816
1817 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1818 track end
1819 echo -e "\nDONE"
1820
1821