apt hold k8s packages
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
80 echo -e " [--tag]: Docker image tag. (--charmed option)"
81 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
82
83 }
84
85 # takes a juju/accounts.yaml file and returns the password specific
86 # for a controller. I wrote this using only bash tools to minimize
87 # additions of other packages
88 function parse_juju_password {
89 password_file="${HOME}/.local/share/juju/accounts.yaml"
90 local controller_name=$1
91 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
92 sed -ne "s|^\($s\):|\1|" \
93 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
94 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
95 awk -F$fs -v controller=$controller_name '{
96 indent = length($1)/2;
97 vname[indent] = $2;
98 for (i in vname) {if (i > indent) {delete vname[i]}}
99 if (length($3) > 0) {
100 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
101 if (match(vn,controller) && match($2,"password")) {
102 printf("%s",$3);
103 }
104 }
105 }'
106 }
107
108 function generate_secret() {
109 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
110 }
111
112 function remove_volumes() {
113 if [ -n "$KUBERNETES" ]; then
114 k8_volume=$1
115 echo "Removing ${k8_volume}"
116 $WORKDIR_SUDO rm -rf ${k8_volume}
117 else
118 stack=$1
119 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
120 for volume in $volumes; do
121 sg docker -c "docker volume rm ${stack}_${volume}"
122 done
123 fi
124 }
125
126 function remove_network() {
127 stack=$1
128 sg docker -c "docker network rm net${stack}"
129 }
130
131 function remove_iptables() {
132 stack=$1
133 if [ -z "$OSM_VCA_HOST" ]; then
134 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
135 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
136 fi
137
138 if [ -z "$DEFAULT_IP" ]; then
139 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
140 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
141 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
142 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
143 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
144 fi
145
146 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
147 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
148 sudo netfilter-persistent save
149 fi
150 }
151
152 function remove_stack() {
153 stack=$1
154 if sg docker -c "docker stack ps ${stack}" ; then
155 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
156 COUNTER=0
157 result=1
158 while [ ${COUNTER} -lt 30 ]; do
159 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
160 #echo "Dockers running: $result"
161 if [ "${result}" == "0" ]; then
162 break
163 fi
164 let COUNTER=COUNTER+1
165 sleep 1
166 done
167 if [ "${result}" == "0" ]; then
168 echo "All dockers of the stack ${stack} were removed"
169 else
170 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
171 fi
172 sleep 5
173 fi
174 }
175
176 #removes osm deployments and services
177 function remove_k8s_namespace() {
178 kubectl delete ns $1
179 }
180
181 #removes helm only if there is nothing deployed in helm
182 function remove_helm() {
183 if [ "$(helm ls -q)" == "" ] ; then
184 sudo helm reset --force
185 kubectl delete --namespace kube-system serviceaccount tiller
186 kubectl delete clusterrolebinding tiller-cluster-rule
187 sudo rm /usr/local/bin/helm
188 rm -rf $HOME/.helm
189 fi
190 }
191
192 function remove_crontab_job() {
193 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
194 }
195
196 #Uninstall osmclient
197 function uninstall_osmclient() {
198 sudo apt-get remove --purge -y python-osmclient
199 sudo apt-get remove --purge -y python3-osmclient
200 }
201
202 #Uninstall lightweight OSM: remove dockers
203 function uninstall_lightweight() {
204 if [ -n "$INSTALL_ONLY" ]; then
205 if [ -n "$INSTALL_ELK" ]; then
206 echo -e "\nUninstalling OSM ELK stack"
207 remove_stack osm_elk
208 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
209 fi
210 else
211 echo -e "\nUninstalling OSM"
212 if [ -n "$KUBERNETES" ]; then
213 if [ -n "$INSTALL_K8S_MONITOR" ]; then
214 # uninstall OSM MONITORING
215 uninstall_k8s_monitoring
216 fi
217 remove_k8s_namespace $OSM_STACK_NAME
218 else
219 remove_stack $OSM_STACK_NAME
220 remove_stack osm_elk
221 fi
222 echo "Now osm docker images and volumes will be deleted"
223 # TODO: clean-up of images should take into account if other tags were used for specific modules
224 newgrp docker << EONG
225 for module in ro lcm keystone nbi mon pol pla osmclient; do
226 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
227 done
228 EONG
229
230 if [ -n "$NGUI" ]; then
231 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
232 else
233 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
234 fi
235
236 if [ -n "$KUBERNETES" ]; then
237 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
238 remove_volumes $OSM_NAMESPACE_VOL
239 else
240 remove_volumes $OSM_STACK_NAME
241 remove_network $OSM_STACK_NAME
242 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
243 fi
244 echo "Removing $OSM_DOCKER_WORK_DIR"
245 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
246 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
247 fi
248 remove_crontab_job
249
250 # Cleanup Openstack installer venv
251 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
252 rm -r $OPENSTACK_PYTHON_VENV
253 fi
254
255 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
256 echo "Some docker images will be kept in case they are used by other docker stacks"
257 echo "To remove them, just run 'docker image prune' in a terminal"
258 return 0
259 }
260
261 #Safe unattended install of iptables-persistent
262 function check_install_iptables_persistent(){
263 echo -e "\nChecking required packages: iptables-persistent"
264 if ! dpkg -l iptables-persistent &>/dev/null; then
265 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
266 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
267 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
268 sudo apt-get -yq install iptables-persistent
269 fi
270 }
271
272 #Configure NAT rules, based on the current IP addresses of containers
273 function nat(){
274 check_install_iptables_persistent
275
276 echo -e "\nConfiguring NAT rules"
277 echo -e " Required root privileges"
278 sudo $OSM_DEVOPS/installers/nat_osm
279 }
280
281 function FATAL(){
282 echo "FATAL error: Cannot install OSM due to \"$1\""
283 exit 1
284 }
285
286 function update_juju_images(){
287 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
288 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
289 }
290
291 function install_lxd() {
292 # Apply sysctl production values for optimal performance
293 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
294 sudo sysctl --system
295
296 # Install LXD snap
297 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
298 sudo snap install lxd
299
300 # Configure LXD
301 sudo usermod -a -G lxd `whoami`
302 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
303 sg lxd -c "lxd waitready"
304 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
305 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
306 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
307 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
308 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
309 #sudo systemctl stop lxd-bridge
310 #sudo systemctl --system daemon-reload
311 #sudo systemctl enable lxd-bridge
312 #sudo systemctl start lxd-bridge
313 }
314
315 function ask_user(){
316 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
317 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
318 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
319 read -e -p "$1" USER_CONFIRMATION
320 while true ; do
321 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
322 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
323 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
324 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
325 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
326 done
327 }
328
329 function install_osmclient(){
330 CLIENT_RELEASE=${RELEASE#"-R "}
331 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
332 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
333 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
334 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
335 curl $key_location | sudo apt-key add -
336 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
337 sudo apt-get update
338 sudo apt-get install -y python3-pip
339 sudo -H LC_ALL=C python3 -m pip install -U pip
340 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
341 sudo apt-get install -y python3-osm-im python3-osmclient
342 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
343 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
344 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
345 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
346 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
347 echo -e "\nOSM client installed"
348 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
349 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
350 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
351 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
352 else
353 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
354 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
355 echo " export OSM_HOSTNAME=<OSM_host>"
356 fi
357 return 0
358 }
359
360 function install_prometheus_nodeexporter(){
361 if (systemctl -q is-active node_exporter)
362 then
363 echo "Node Exporter is already running."
364 else
365 echo "Node Exporter is not active, installing..."
366 if getent passwd node_exporter > /dev/null 2>&1; then
367 echo "node_exporter user exists"
368 else
369 echo "Creating user node_exporter"
370 sudo useradd --no-create-home --shell /bin/false node_exporter
371 fi
372 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
373 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
374 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
375 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
376 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
377 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
378 sudo systemctl daemon-reload
379 sudo systemctl restart node_exporter
380 sudo systemctl enable node_exporter
381 echo "Node Exporter has been activated in this host."
382 fi
383 return 0
384 }
385
386 function uninstall_prometheus_nodeexporter(){
387 sudo systemctl stop node_exporter
388 sudo systemctl disable node_exporter
389 sudo rm /etc/systemd/system/node_exporter.service
390 sudo systemctl daemon-reload
391 sudo userdel node_exporter
392 sudo rm /usr/local/bin/node_exporter
393 return 0
394 }
395
396 function install_docker_ce() {
397 # installs and configures Docker CE
398 echo "Installing Docker CE ..."
399 sudo apt-get -qq update
400 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
401 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
402 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
403 sudo apt-get -qq update
404 sudo apt-get install -y docker-ce
405 echo "Adding user to group 'docker'"
406 sudo groupadd -f docker
407 sudo usermod -aG docker $USER
408 sleep 2
409 sudo service docker restart
410 echo "... restarted Docker service"
411 if [ -n "${DOCKER_PROXY_URL}" ]; then
412 echo "Configuring docker proxy ..."
413 if [ -f /etc/docker/daemon.json ]; then
414 if grep -q registry-mirrors /etc/docker/daemon.json; then
415 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
416 else
417 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
418 fi
419 else
420 sudo bash -c "cat << EOF > /etc/docker/daemon.json
421 {
422 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
423 }
424 EOF"
425 fi
426 sudo systemctl daemon-reload
427 sudo service docker restart
428 echo "... restarted Docker service again"
429 fi
430 sg docker -c "docker version" || FATAL "Docker installation failed"
431 echo "... Docker CE installation done"
432 return 0
433 }
434
435 function install_docker_compose() {
436 # installs and configures docker-compose
437 echo "Installing Docker Compose ..."
438 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
439 sudo chmod +x /usr/local/bin/docker-compose
440 echo "... Docker Compose installation done"
441 }
442
443 function install_juju() {
444 echo "Installing juju"
445 sudo snap install juju --classic --channel=2.8/stable
446 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
447 update_juju_images
448 echo "Finished installation of juju"
449 return 0
450 }
451
452 function juju_createcontroller() {
453 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
454 # Not found created, create the controller
455 sudo usermod -a -G lxd ${USER}
456 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
457 fi
458 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
459 juju controller-config features=[k8s-operators]
460 }
461
462 function juju_addk8s() {
463 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
464 }
465
466 function juju_createcontroller_k8s(){
467 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
468 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
469 --config controller-service-type=loadbalancer \
470 --agent-version=$JUJU_AGENT_VERSION
471 }
472
473
474 function juju_addlxd_cloud(){
475 mkdir -p /tmp/.osm
476 OSM_VCA_CLOUDNAME="lxd-cloud"
477 LXDENDPOINT=$DEFAULT_IP
478 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
479 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
480
481 cat << EOF > $LXD_CLOUD
482 clouds:
483 $OSM_VCA_CLOUDNAME:
484 type: lxd
485 auth-types: [certificate]
486 endpoint: "https://$LXDENDPOINT:8443"
487 config:
488 ssl-hostname-verification: false
489 EOF
490 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
491 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
492 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
493 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
494
495 cat << EOF > $LXD_CREDENTIALS
496 credentials:
497 $OSM_VCA_CLOUDNAME:
498 lxd-cloud:
499 auth-type: certificate
500 server-cert: |
501 $server_cert
502 client-cert: |
503 $client_cert
504 client-key: |
505 $client_key
506 EOF
507 lxc config trust add local: /tmp/.osm/client.crt
508 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
509 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
510 sg lxd -c "lxd waitready"
511 juju controller-config features=[k8s-operators]
512 }
513
514
515 function juju_createproxy() {
516 check_install_iptables_persistent
517
518 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
519 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
520 sudo netfilter-persistent save
521 fi
522 }
523
524 function docker_login() {
525 echo "Docker login"
526 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
527 }
528
529 function generate_docker_images() {
530 echo "Pulling and generating docker images"
531 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
532
533 echo "Pulling docker images"
534
535 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
536 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
537 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
538 fi
539
540 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
541 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
542 fi
543
544 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
545 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
546 fi
547
548 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
549 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
550 fi
551
552 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
553 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
554 fi
555
556 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
557 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
558 fi
559
560 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
561 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
562 fi
563
564 if [ -n "$PULL_IMAGES" ]; then
565 echo "Pulling OSM docker images"
566 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient; do
567 module_lower=${module,,}
568 if [ $module == "LW-UI" ]; then
569 if [ -n "$NGUI" ]; then
570 continue
571 else
572 module_lower="light-ui"
573 fi
574 fi
575 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
576 continue
577 fi
578 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
579 continue
580 fi
581 module_tag="${OSM_DOCKER_TAG}"
582 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
583 module_tag="${MODULE_DOCKER_TAG}"
584 fi
585 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
586 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
587 done
588 else
589 _build_from=$COMMIT_ID
590 [ -z "$_build_from" ] && _build_from="latest"
591 echo "OSM Docker images generated from $_build_from"
592
593 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA; do
594 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
595 module_lower=${module,,}
596 if [ $module == "LW-UI" ]; then
597 if [ -n "$NGUI" ]; then
598 continue
599 else
600 module_lower="light-ui"
601 fi
602 fi
603 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
604 continue
605 fi
606 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
607 continue
608 fi
609 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
610 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
611 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
612 fi
613 done
614 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
615 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
616 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
617 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
618 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
619 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
620 fi
621 echo "Finished generation of docker images"
622 fi
623
624 echo "Finished pulling and generating docker images"
625 }
626
627 function cmp_overwrite() {
628 file1="$1"
629 file2="$2"
630 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
631 if [ -f "${file2}" ]; then
632 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
633 else
634 cp -b ${file1} ${file2}
635 fi
636 fi
637 }
638
639 function generate_docker_compose_files() {
640 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
641 if [ -n "$NGUI" ]; then
642 # For NG-UI
643 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
644 else
645 # Docker-compose
646 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
647 fi
648 if [ -n "$INSTALL_PLA" ]; then
649 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
650 fi
651 }
652
653 function generate_k8s_manifest_files() {
654 #Kubernetes resources
655 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
656 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
657 if [ -n "$NGUI" ]; then
658 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
659 else
660 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
661 fi
662 }
663
664 function generate_prometheus_grafana_files() {
665 [ -n "$KUBERNETES" ] && return
666 # Prometheus files
667 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
668 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
669
670 # Grafana files
671 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
672 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
673 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
674 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
675 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
676
677 # Prometheus Exporters files
678 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
679 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
680 }
681
682 function generate_docker_env_files() {
683 echo "Doing a backup of existing env files"
684 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
685 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
686 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
687 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
688 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
689 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
690 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
691 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
692 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
693
694 echo "Generating docker env files"
695 # LCM
696 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
697 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
698 fi
699
700 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
701 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
702 else
703 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
704 fi
705
706 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
707 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
708 else
709 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
710 fi
711
712 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
713 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
714 else
715 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
716 fi
717
718 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
719 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
720 else
721 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
722 fi
723
724 if [ -n "$OSM_VCA_APIPROXY" ]; then
725 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
726 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
727 else
728 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
729 fi
730 fi
731
732 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
733 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
734 fi
735
736 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
737 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
738 fi
739
740 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
741 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
742 else
743 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
744 fi
745
746 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
747 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
748 else
749 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
750 fi
751
752 # RO
753 MYSQL_ROOT_PASSWORD=$(generate_secret)
754 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
755 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
756 fi
757 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
758 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
759 fi
760 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
761 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
762 fi
763
764 # Keystone
765 KEYSTONE_DB_PASSWORD=$(generate_secret)
766 SERVICE_PASSWORD=$(generate_secret)
767 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
768 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
769 fi
770 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
771 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
772 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
773 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
774 fi
775
776 # NBI
777 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
778 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
779 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
780 fi
781
782 # MON
783 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
784 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
785 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
786 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
787 fi
788
789 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
790 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
791 else
792 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
793 fi
794
795 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
796 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
797 else
798 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
799 fi
800
801 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
802 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
803 else
804 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
805 fi
806
807 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
808 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
809 else
810 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
811 fi
812
813
814 # POL
815 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
816 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
817 fi
818
819 # LW-UI
820 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
821 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
822 fi
823
824 echo "Finished generation of docker env files"
825 }
826
827 function generate_osmclient_script () {
828 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
829 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
830 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
831 }
832
833 #installs kubernetes packages
834 function install_kube() {
835 sudo apt-get update && sudo apt-get install -y apt-transport-https
836 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
837 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
838 sudo apt-get update
839 echo "Installing Kubernetes Packages ..."
840 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
841 sudo apt-mark hold kubelet kubeadm kubectl
842 }
843
844 #initializes kubernetes control plane
845 function init_kubeadm() {
846 sudo swapoff -a
847 sudo kubeadm init --config $1
848 sleep 5
849 }
850
851 function kube_config_dir() {
852 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
853 mkdir -p $HOME/.kube
854 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
855 sudo chown $(id -u):$(id -g) $HOME/.kube/config
856 }
857
858 function install_k8s_storageclass() {
859 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
860 local storageclass_timeout=300
861 local counter=0
862 echo "Waiting for storageclass"
863 while (( counter < storageclass_timeout ))
864 do
865 kubectl get storageclass openebs-hostpath &> /dev/null
866
867 if [ $? -eq 0 ] ; then
868 echo "Storageclass available"
869 break
870 else
871 counter=$((counter + 15))
872 sleep 15
873 fi
874 done
875 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
876 }
877
878 function install_k8s_metallb() {
879 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
880 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
881 echo "apiVersion: v1
882 kind: ConfigMap
883 metadata:
884 namespace: metallb-system
885 name: config
886 data:
887 config: |
888 address-pools:
889 - name: default
890 protocol: layer2
891 addresses:
892 - $METALLB_IP_RANGE" | kubectl apply -f -
893 }
894 #deploys flannel as daemonsets
895 function deploy_cni_provider() {
896 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
897 trap 'rm -rf "${CNI_DIR}"' EXIT
898 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
899 kubectl apply -f $CNI_DIR
900 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
901 }
902
903 #creates secrets from env files which will be used by containers
904 function kube_secrets(){
905 kubectl create ns $OSM_STACK_NAME
906 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
907 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
908 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
909 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
910 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
911 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
912 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
913 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
914 }
915
916 #taints K8s master node
917 function taint_master_node() {
918 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
919 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
920 sleep 5
921 }
922
923 #deploys osm pods and services
924 function deploy_osm_services() {
925 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
926 }
927
928 #deploy charmed services
929 function deploy_charmed_services() {
930 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
931 # deploy mongodb charm
932 namespace=$OSM_STACK_NAME
933 juju deploy cs:~charmed-osm/mongodb-k8s \
934 --config enable-sidecar=true \
935 --config replica-set=rs0 \
936 --config namespace=$namespace \
937 -m $namespace
938 }
939
940 function deploy_osm_pla_service() {
941 # corresponding to namespace_vol
942 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
943 # corresponding to deploy_osm_services
944 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
945 }
946
947 #Install helm and tiller
948 function install_helm() {
949 helm > /dev/null 2>&1
950 if [ $? != 0 ] ; then
951 # Helm is not installed. Install helm
952 echo "Helm is not installed, installing ..."
953 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
954 tar -zxvf helm-v2.15.2.tar.gz
955 sudo mv linux-amd64/helm /usr/local/bin/helm
956 rm -r linux-amd64
957 rm helm-v2.15.2.tar.gz
958 fi
959
960 # Checking if tiller has being configured
961 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
962 if [ $? == 1 ] ; then
963 # tiller account for kubernetes
964 kubectl --namespace kube-system create serviceaccount tiller
965 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
966 # HELM initialization
967 helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller
968
969 # Wait for Tiller to be up and running. If timeout expires, continue installing
970 tiller_timeout=120;
971 counter=0;
972 tiller_status=""
973 while (( counter < tiller_timeout ))
974 do
975 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
976 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
977 counter=$((counter + 5))
978 sleep 5
979 done
980 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
981 fi
982 }
983
984 function parse_yaml() {
985 TAG=$1
986 shift
987 services=$@
988 for module in $services; do
989 if [ "$module" == "pla" ]; then
990 if [ -n "$INSTALL_PLA" ]; then
991 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
992 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
993 fi
994 else
995 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
996 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
997 fi
998 done
999 }
1000
1001 function update_manifest_files() {
1002 if [ -n "$NGUI" ]; then
1003 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1004 else
1005 osm_services="nbi lcm ro pol mon light-ui keystone pla"
1006 fi
1007 list_of_services=""
1008 for module in $osm_services; do
1009 module_upper="${module^^}"
1010 if [ "$module_upper" == "LIGHT-UI" ]; then
1011 module_upper="LW-UI"
1012 fi
1013 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1014 list_of_services="$list_of_services $module"
1015 fi
1016 done
1017 list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
1018 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
1019 parse_yaml $OSM_DOCKER_TAG $list_of_services
1020 fi
1021 if [ -n "$MODULE_DOCKER_TAG" ]; then
1022 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1023 fi
1024 }
1025
1026 function namespace_vol() {
1027 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1028 for osm in $osm_services; do
1029 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1030 done
1031 }
1032
1033 function init_docker_swarm() {
1034 if [ "${DEFAULT_MTU}" != "1500" ]; then
1035 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1036 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1037 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1038 fi
1039 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1040 return 0
1041 }
1042
1043 function create_docker_network() {
1044 echo "creating network"
1045 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1046 echo "creating network DONE"
1047 }
1048
1049 function deploy_lightweight() {
1050
1051 echo "Deploying lightweight build"
1052 OSM_NBI_PORT=9999
1053 OSM_RO_PORT=9090
1054 OSM_KEYSTONE_PORT=5000
1055 OSM_UI_PORT=80
1056 OSM_MON_PORT=8662
1057 OSM_PROM_PORT=9090
1058 OSM_PROM_CADVISOR_PORT=8080
1059 OSM_PROM_HOSTPORT=9091
1060 OSM_GRAFANA_PORT=3000
1061 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1062 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1063
1064 if [ -n "$NO_HOST_PORTS" ]; then
1065 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1066 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1067 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1068 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1069 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1070 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1071 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1072 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1073 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1074 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1075 else
1076 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1077 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1078 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1079 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1080 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1081 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1082 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1083 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1084 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1085 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1086 fi
1087 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1088 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1089 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1090 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1091 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1092 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1093 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1094 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1095 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1096
1097 pushd $OSM_DOCKER_WORK_DIR
1098 if [ -n "$INSTALL_PLA" ]; then
1099 track deploy_osm_pla
1100 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1101 else
1102 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1103 fi
1104 popd
1105
1106 echo "Finished deployment of lightweight build"
1107 }
1108
1109 function deploy_elk() {
1110 echo "Pulling docker images for ELK"
1111 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1112 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1113 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1114 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1115 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1116 echo "Finished pulling elk docker images"
1117 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1118 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1119 remove_stack osm_elk
1120 echo "Deploying ELK stack"
1121 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1122 echo "Waiting for ELK stack to be up and running"
1123 time=0
1124 step=5
1125 timelength=40
1126 elk_is_up=1
1127 while [ $time -le $timelength ]; do
1128 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1129 elk_is_up=0
1130 break
1131 fi
1132 sleep $step
1133 time=$((time+step))
1134 done
1135 if [ $elk_is_up -eq 0 ]; then
1136 echo "ELK is up and running. Trying to create index pattern..."
1137 #Create index pattern
1138 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1139 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1140 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1141 #Make it the default index
1142 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1143 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1144 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1145 else
1146 echo "Cannot connect to Kibana to create index pattern."
1147 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1148 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1149 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1150 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1151 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1152 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1153 -d"{\"value\":\"filebeat-*\"}"'
1154 fi
1155 echo "Finished deployment of ELK stack"
1156 return 0
1157 }
1158
1159 function add_local_k8scluster() {
1160 /usr/bin/osm --all-projects vim-create \
1161 --name _system-osm-vim \
1162 --account_type dummy \
1163 --auth_url http://dummy \
1164 --user osm --password osm --tenant osm \
1165 --description "dummy" \
1166 --config '{management_network_name: mgmt}'
1167 /usr/bin/osm --all-projects k8scluster-add \
1168 --creds ${HOME}/.kube/config \
1169 --vim _system-osm-vim \
1170 --k8s-nets '{"net1": null}' \
1171 --version '1.15' \
1172 --description "OSM Internal Cluster" \
1173 _system-osm-k8s
1174 }
1175
1176 function install_lightweight() {
1177 track checkingroot
1178 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1179 track noroot
1180
1181 if [ -n "$KUBERNETES" ]; then
1182 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1183 1. Install and configure LXD
1184 2. Install juju
1185 3. Install docker CE
1186 4. Disable swap space
1187 5. Install and initialize Kubernetes
1188 as pre-requirements.
1189 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1190
1191 else
1192 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1193 fi
1194 track proceed
1195
1196 echo "Installing lightweight build of OSM"
1197 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1198 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1199 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1200 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1201 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1202 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1203 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1204 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1205
1206 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1207 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1208 need_packages_lw="snapd"
1209 echo -e "Checking required packages: $need_packages_lw"
1210 dpkg -l $need_packages_lw &>/dev/null \
1211 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1212 || sudo apt-get update \
1213 || FATAL "failed to run apt-get update"
1214 dpkg -l $need_packages_lw &>/dev/null \
1215 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1216 || sudo apt-get install -y $need_packages_lw \
1217 || FATAL "failed to install $need_packages_lw"
1218 install_lxd
1219 fi
1220
1221 track prereqok
1222
1223 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1224
1225 echo "Creating folders for installation"
1226 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1227 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1228 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1229
1230 #Installs Kubernetes
1231 if [ -n "$KUBERNETES" ]; then
1232 install_kube
1233 track install_k8s
1234 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1235 kube_config_dir
1236 track init_k8s
1237 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1238 # uninstall OSM MONITORING
1239 uninstall_k8s_monitoring
1240 track uninstall_k8s_monitoring
1241 fi
1242 #remove old namespace
1243 remove_k8s_namespace $OSM_STACK_NAME
1244 deploy_cni_provider
1245 taint_master_node
1246 install_k8s_storageclass
1247 track k8s_storageclass
1248 install_k8s_metallb
1249 track k8s_metallb
1250 else
1251 #install_docker_compose
1252 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1253 track docker_swarm
1254 fi
1255
1256 [ -z "$INSTALL_NOJUJU" ] && install_juju
1257 track juju_install
1258
1259 if [ -z "$OSM_VCA_HOST" ]; then
1260 if [ -z "$CONTROLLER_NAME" ]; then
1261
1262 if [ -n "$KUBERNETES" ]; then
1263 juju_createcontroller_k8s
1264 juju_addlxd_cloud
1265 else
1266 if [ -n "$LXD_CLOUD_FILE" ]; then
1267 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1268 OSM_VCA_CLOUDNAME="lxd-cloud"
1269 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1270 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1271 fi
1272 juju_createcontroller
1273 juju_createproxy
1274 fi
1275 else
1276 OSM_VCA_CLOUDNAME="lxd-cloud"
1277 if [ -n "$LXD_CLOUD_FILE" ]; then
1278 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1279 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1280 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1281 else
1282 mkdir -p ~/.osm
1283 cat << EOF > ~/.osm/lxd-cloud.yaml
1284 clouds:
1285 lxd-cloud:
1286 type: lxd
1287 auth-types: [certificate]
1288 endpoint: "https://$DEFAULT_IP:8443"
1289 config:
1290 ssl-hostname-verification: false
1291 EOF
1292 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1293 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1294 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1295 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1296 cat << EOF > ~/.osm/lxd-credentials.yaml
1297 credentials:
1298 lxd-cloud:
1299 lxd-cloud:
1300 auth-type: certificate
1301 server-cert: |
1302 $server_cert
1303 client-cert: |
1304 $client_cert
1305 client-key: |
1306 $client_key
1307 EOF
1308 lxc config trust add local: ~/.osm/client.crt
1309 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1310 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1311 fi
1312 fi
1313 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1314 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1315 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1316 fi
1317 track juju_controller
1318
1319 if [ -z "$OSM_VCA_SECRET" ]; then
1320 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1321 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1322 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1323 fi
1324 if [ -z "$OSM_VCA_PUBKEY" ]; then
1325 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1326 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1327 fi
1328 if [ -z "$OSM_VCA_CACERT" ]; then
1329 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1330 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1331 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1332 fi
1333
1334 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1335 if [ -z "$KUBERNETES" ]; then
1336 if [ -z "$OSM_VCA_APIPROXY" ]; then
1337 OSM_VCA_APIPROXY=$DEFAULT_IP
1338 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1339 fi
1340 juju_createproxy
1341 fi
1342 track juju
1343
1344 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1345 OSM_DATABASE_COMMONKEY=$(generate_secret)
1346 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1347 fi
1348
1349 # Deploy OSM services
1350 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1351 track docker_build
1352
1353 if [ -n "$KUBERNETES" ]; then
1354 generate_k8s_manifest_files
1355 else
1356 generate_docker_compose_files
1357 fi
1358 track manifest_files
1359 generate_prometheus_grafana_files
1360 generate_docker_env_files
1361 track env_files
1362
1363 if [ -n "$KUBERNETES" ]; then
1364 deploy_charmed_services
1365 kube_secrets
1366 update_manifest_files
1367 namespace_vol
1368 deploy_osm_services
1369 if [ -n "$INSTALL_PLA"]; then
1370 # optional PLA install
1371 deploy_osm_pla_service
1372 track deploy_osm_pla
1373 fi
1374 track deploy_osm_services_k8s
1375 install_helm
1376 track install_helm
1377 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1378 # install OSM MONITORING
1379 install_k8s_monitoring
1380 track install_k8s_monitoring
1381 fi
1382 else
1383 # remove old stack
1384 remove_stack $OSM_STACK_NAME
1385 create_docker_network
1386 deploy_lightweight
1387 generate_osmclient_script
1388 track docker_deploy
1389 install_prometheus_nodeexporter
1390 track nodeexporter
1391 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1392 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1393 fi
1394
1395 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1396 track osmclient
1397
1398 echo -e "Checking OSM health state..."
1399 if [ -n "$KUBERNETES" ]; then
1400 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1401 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1402 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1403 track osm_unhealthy
1404 else
1405 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1406 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1407 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1408 track osm_unhealthy
1409 fi
1410 track after_healthcheck
1411
1412 [ -n "$KUBERNETES" ] && add_local_k8scluster
1413 track add_local_k8scluster
1414
1415 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1416 track end
1417 return 0
1418 }
1419
1420 function install_to_openstack() {
1421
1422 if [ -z "$2" ]; then
1423 FATAL "OpenStack installer requires a valid external network name"
1424 fi
1425
1426 # Install Pip for Python3
1427 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1428 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1429
1430 # Create a venv to avoid conflicts with the host installation
1431 python3 -m venv $OPENSTACK_PYTHON_VENV
1432
1433 source $OPENSTACK_PYTHON_VENV/bin/activate
1434
1435 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1436 python -m pip install -U wheel
1437 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1438
1439 # Install the Openstack cloud module (ansible>=2.10)
1440 ansible-galaxy collection install openstack.cloud
1441
1442 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1443
1444 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1445
1446 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1447
1448 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1449 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1450 fi
1451
1452 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1453 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1454 fi
1455
1456 # Execute the Ansible playbook based on openrc or clouds.yaml
1457 if [ -e "$1" ]; then
1458 . $1
1459 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1460 $OSM_DEVOPS/installers/openstack/site.yml
1461 else
1462 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1463 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1464 fi
1465
1466 # Exit from venv
1467 deactivate
1468
1469 return 0
1470 }
1471
1472 function install_vimemu() {
1473 echo "\nInstalling vim-emu"
1474 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1475 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1476 # install prerequisites (OVS is a must for the emulator to work)
1477 sudo apt-get install openvswitch-switch
1478 # clone vim-emu repository (attention: branch is currently master only)
1479 echo "Cloning vim-emu repository ..."
1480 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1481 # build vim-emu docker
1482 echo "Building vim-emu Docker container..."
1483
1484 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1485 # start vim-emu container as daemon
1486 echo "Starting vim-emu Docker container 'vim-emu' ..."
1487 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1488 # in lightweight mode, the emulator needs to be attached to netOSM
1489 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1490 else
1491 # classic build mode
1492 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1493 fi
1494 echo "Waiting for 'vim-emu' container to start ..."
1495 sleep 5
1496 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1497 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1498 # print vim-emu connection info
1499 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1500 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1501 echo -e "To add the emulated VIM to OSM you should do:"
1502 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1503 }
1504
1505 function install_k8s_monitoring() {
1506 # install OSM monitoring
1507 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1508 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1509 }
1510
1511 function uninstall_k8s_monitoring() {
1512 # uninstall OSM monitoring
1513 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1514 }
1515
1516 function dump_vars(){
1517 echo "DEVELOP=$DEVELOP"
1518 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1519 echo "UNINSTALL=$UNINSTALL"
1520 echo "UPDATE=$UPDATE"
1521 echo "RECONFIGURE=$RECONFIGURE"
1522 echo "TEST_INSTALLER=$TEST_INSTALLER"
1523 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1524 echo "INSTALL_PLA=$INSTALL_PLA"
1525 echo "INSTALL_LXD=$INSTALL_LXD"
1526 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1527 echo "INSTALL_ONLY=$INSTALL_ONLY"
1528 echo "INSTALL_ELK=$INSTALL_ELK"
1529 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1530 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1531 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1532 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1533 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1534 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1535 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1536 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1537 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1538 echo "TO_REBUILD=$TO_REBUILD"
1539 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1540 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1541 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1542 echo "RELEASE=$RELEASE"
1543 echo "REPOSITORY=$REPOSITORY"
1544 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1545 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1546 echo "OSM_DEVOPS=$OSM_DEVOPS"
1547 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1548 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1549 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1550 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1551 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1552 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1553 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1554 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1555 echo "DOCKER_USER=$DOCKER_USER"
1556 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1557 echo "PULL_IMAGES=$PULL_IMAGES"
1558 echo "KUBERNETES=$KUBERNETES"
1559 echo "NGUI=$NGUI"
1560 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1561 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1562 echo "SHOWOPTS=$SHOWOPTS"
1563 echo "Install from specific refspec (-b): $COMMIT_ID"
1564 }
1565
1566 function track(){
1567 ctime=`date +%s`
1568 duration=$((ctime - SESSION_ID))
1569 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1570 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1571 event_name="bin"
1572 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1573 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1574 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1575 event_name="${event_name}_$1"
1576 url="${url}&event=${event_name}&ce_duration=${duration}"
1577 wget -q -O /dev/null $url
1578 }
1579
1580 function parse_docker_registry_url() {
1581 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1582 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1583 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1584 }
1585
1586 JUJU_AGENT_VERSION=2.8.6
1587 UNINSTALL=""
1588 DEVELOP=""
1589 UPDATE=""
1590 RECONFIGURE=""
1591 TEST_INSTALLER=""
1592 INSTALL_LXD=""
1593 SHOWOPTS=""
1594 COMMIT_ID=""
1595 ASSUME_YES=""
1596 INSTALL_FROM_SOURCE=""
1597 RELEASE="ReleaseNINE"
1598 REPOSITORY="stable"
1599 INSTALL_VIMEMU=""
1600 INSTALL_PLA=""
1601 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1602 LXD_REPOSITORY_PATH=""
1603 INSTALL_LIGHTWEIGHT="y"
1604 INSTALL_TO_OPENSTACK=""
1605 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1606 OPENSTACK_PUBLIC_NET_NAME=""
1607 OPENSTACK_ATTACH_VOLUME="false"
1608 OPENSTACK_SSH_KEY_FILE=""
1609 OPENSTACK_USERDATA_FILE=""
1610 OPENSTACK_VM_NAME="server-osm"
1611 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1612 INSTALL_ONLY=""
1613 INSTALL_ELK=""
1614 TO_REBUILD=""
1615 INSTALL_NOLXD=""
1616 INSTALL_NODOCKER=""
1617 INSTALL_NOJUJU=""
1618 KUBERNETES="y"
1619 NGUI="y"
1620 INSTALL_K8S_MONITOR=""
1621 INSTALL_NOHOSTCLIENT=""
1622 SESSION_ID=`date +%s`
1623 OSM_DEVOPS=
1624 OSM_VCA_HOST=
1625 OSM_VCA_SECRET=
1626 OSM_VCA_PUBKEY=
1627 OSM_VCA_CLOUDNAME="localhost"
1628 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1629 OSM_STACK_NAME=osm
1630 NO_HOST_PORTS=""
1631 DOCKER_NOBUILD=""
1632 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1633 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1634 WORKDIR_SUDO=sudo
1635 OSM_WORK_DIR="/etc/osm"
1636 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1637 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1638 OSM_HOST_VOL="/var/lib/osm"
1639 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1640 OSM_DOCKER_TAG=latest
1641 DOCKER_USER=opensourcemano
1642 PULL_IMAGES="y"
1643 KAFKA_TAG=2.11-1.0.2
1644 PROMETHEUS_TAG=v2.4.3
1645 GRAFANA_TAG=latest
1646 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1647 PROMETHEUS_CADVISOR_TAG=latest
1648 KEYSTONEDB_TAG=10
1649 OSM_DATABASE_COMMONKEY=
1650 ELASTIC_VERSION=6.4.2
1651 ELASTIC_CURATOR_VERSION=5.5.4
1652 POD_NETWORK_CIDR=10.244.0.0/16
1653 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1654 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1655 DOCKER_REGISTRY_URL=
1656 DOCKER_PROXY_URL=
1657 MODULE_DOCKER_TAG=
1658
1659 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1660 case "${o}" in
1661 b)
1662 COMMIT_ID=${OPTARG}
1663 PULL_IMAGES=""
1664 ;;
1665 r)
1666 REPOSITORY="${OPTARG}"
1667 REPO_ARGS+=(-r "$REPOSITORY")
1668 ;;
1669 c)
1670 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1671 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1672 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1673 usage && exit 1
1674 ;;
1675 n)
1676 [ "${OPTARG}" == "lwui" ] && NGUI="" && REPO_ARGS+=(-n "${OPTARG}") && continue
1677 [ "${OPTARG}" == "ngui" ] && continue
1678 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1679 usage && exit 1
1680 ;;
1681 k)
1682 REPOSITORY_KEY="${OPTARG}"
1683 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1684 ;;
1685 u)
1686 REPOSITORY_BASE="${OPTARG}"
1687 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1688 ;;
1689 R)
1690 RELEASE="${OPTARG}"
1691 REPO_ARGS+=(-R "$RELEASE")
1692 ;;
1693 D)
1694 OSM_DEVOPS="${OPTARG}"
1695 ;;
1696 o)
1697 INSTALL_ONLY="y"
1698 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1699 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1700 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1701 ;;
1702 O)
1703 INSTALL_TO_OPENSTACK="y"
1704 if [ -n "${OPTARG}" ]; then
1705 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1706 else
1707 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1708 usage && exit 1
1709 fi
1710 ;;
1711 f)
1712 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1713 ;;
1714 F)
1715 OPENSTACK_USERDATA_FILE="${OPTARG}"
1716 ;;
1717 N)
1718 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1719 ;;
1720 m)
1721 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1722 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1723 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1724 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1725 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1726 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1727 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1728 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1729 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1730 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1731 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1732 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1733 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1734 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1735 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1736 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1737 ;;
1738 H)
1739 OSM_VCA_HOST="${OPTARG}"
1740 ;;
1741 S)
1742 OSM_VCA_SECRET="${OPTARG}"
1743 ;;
1744 s)
1745 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1746 ;;
1747 w)
1748 # when specifying workdir, do not use sudo for access
1749 WORKDIR_SUDO=
1750 OSM_WORK_DIR="${OPTARG}"
1751 ;;
1752 t)
1753 OSM_DOCKER_TAG="${OPTARG}"
1754 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1755 ;;
1756 U)
1757 DOCKER_USER="${OPTARG}"
1758 ;;
1759 P)
1760 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1761 ;;
1762 A)
1763 OSM_VCA_APIPROXY="${OPTARG}"
1764 ;;
1765 l)
1766 LXD_CLOUD_FILE="${OPTARG}"
1767 ;;
1768 L)
1769 LXD_CRED_FILE="${OPTARG}"
1770 ;;
1771 K)
1772 CONTROLLER_NAME="${OPTARG}"
1773 ;;
1774 d)
1775 DOCKER_REGISTRY_URL="${OPTARG}"
1776 ;;
1777 p)
1778 DOCKER_PROXY_URL="${OPTARG}"
1779 ;;
1780 T)
1781 MODULE_DOCKER_TAG="${OPTARG}"
1782 ;;
1783 -)
1784 [ "${OPTARG}" == "help" ] && usage && exit 0
1785 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1786 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1787 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1788 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1789 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1790 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1791 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1792 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1793 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1794 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1795 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1796 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1797 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1798 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1799 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1800 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1801 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1802 [ "${OPTARG}" == "pullimages" ] && continue
1803 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1804 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1805 [ "${OPTARG}" == "bundle" ] && continue
1806 [ "${OPTARG}" == "k8s" ] && continue
1807 [ "${OPTARG}" == "lxd" ] && continue
1808 [ "${OPTARG}" == "lxd-cred" ] && continue
1809 [ "${OPTARG}" == "microstack" ] && continue
1810 [ "${OPTARG}" == "vca" ] && continue
1811 [ "${OPTARG}" == "ha" ] && continue
1812 [ "${OPTARG}" == "tag" ] && continue
1813 [ "${OPTARG}" == "registry" ] && continue
1814 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1815 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1816 echo -e "Invalid option: '--$OPTARG'\n" >&2
1817 usage && exit 1
1818 ;;
1819 :)
1820 echo "Option -$OPTARG requires an argument" >&2
1821 usage && exit 1
1822 ;;
1823 \?)
1824 echo -e "Invalid option: '-$OPTARG'\n" >&2
1825 usage && exit 1
1826 ;;
1827 h)
1828 usage && exit 0
1829 ;;
1830 y)
1831 ASSUME_YES="y"
1832 ;;
1833 *)
1834 usage && exit 1
1835 ;;
1836 esac
1837 done
1838
1839 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1840 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1841 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1842
1843 if [ -n "$SHOWOPTS" ]; then
1844 dump_vars
1845 exit 0
1846 fi
1847
1848 if [ -n "$CHARMED" ]; then
1849 if [ -n "$UNINSTALL" ]; then
1850 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1851 else
1852 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1853 fi
1854
1855 exit 0
1856 fi
1857
1858 # if develop, we force master
1859 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1860
1861 need_packages="git wget curl tar"
1862
1863 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1864
1865 echo -e "Checking required packages: $need_packages"
1866 dpkg -l $need_packages &>/dev/null \
1867 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1868 || sudo apt-get update \
1869 || FATAL "failed to run apt-get update"
1870 dpkg -l $need_packages &>/dev/null \
1871 || ! echo -e "Installing $need_packages requires root privileges." \
1872 || sudo apt-get install -y $need_packages \
1873 || FATAL "failed to install $need_packages"
1874 sudo snap install jq
1875 if [ -z "$OSM_DEVOPS" ]; then
1876 if [ -n "$TEST_INSTALLER" ]; then
1877 echo -e "\nUsing local devops repo for OSM installation"
1878 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1879 else
1880 echo -e "\nCreating temporary dir for OSM installation"
1881 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1882 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1883
1884 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1885
1886 if [ -z "$COMMIT_ID" ]; then
1887 echo -e "\nGuessing the current stable release"
1888 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1889 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1890
1891 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1892 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1893 else
1894 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1895 fi
1896 git -C $OSM_DEVOPS checkout $COMMIT_ID
1897 fi
1898 fi
1899
1900 . $OSM_DEVOPS/common/all_funcs
1901
1902 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1903 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1904 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1905 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1906 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1907 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1908 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1909 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1910
1911 #Installation starts here
1912 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1913 track start
1914
1915 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1916 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1917 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1918 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1919 fi
1920
1921 echo -e "Checking required packages: lxd"
1922 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1923 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1924
1925 # use local devops for containers
1926 export OSM_USE_LOCAL_DEVOPS=true
1927
1928 #Install osmclient
1929
1930 #Install vim-emu (optional)
1931 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1932
1933 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1934 track end
1935 echo -e "\nDONE"