0eb184c78db4d52ecb6ff95dc8eef9bcc028e619
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
33 echo -e " -H <VCA host> use specific juju host controller IP"
34 echo -e " -S <VCA secret> use VCA/juju secret key"
35 echo -e " -P <VCA pubkey> use VCA/juju public key file"
36 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
39 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
40 echo -e " --pla: install the PLA module for placement support"
41 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
42 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
43 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
44 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
45 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
46 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
47 echo -e " -D <devops path> use local devops installation path"
48 echo -e " -w <work dir> Location to store runtime installation"
49 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
50 echo -e " -l: LXD cloud yaml file"
51 echo -e " -L: LXD credentials yaml file"
52 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
53 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
54 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
55 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83
84 }
85
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password {
90 password_file="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name=$1
92 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller=$controller_name '{
97 indent = length($1)/2;
98 vname[indent] = $2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
103 printf("%s",$3);
104 }
105 }
106 }'
107 }
108
109 function generate_secret() {
110 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
111 }
112
113 function remove_volumes() {
114 if [ -n "$KUBERNETES" ]; then
115 k8_volume=$1
116 echo "Removing ${k8_volume}"
117 $WORKDIR_SUDO rm -rf ${k8_volume}
118 else
119 stack=$1
120 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
121 for volume in $volumes; do
122 sg docker -c "docker volume rm ${stack}_${volume}"
123 done
124 fi
125 }
126
127 function remove_network() {
128 stack=$1
129 sg docker -c "docker network rm net${stack}"
130 }
131
132 function remove_iptables() {
133 stack=$1
134 if [ -z "$OSM_VCA_HOST" ]; then
135 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
136 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
137 fi
138
139 if [ -z "$DEFAULT_IP" ]; then
140 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
143 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
144 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
145 fi
146
147 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
148 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
149 sudo netfilter-persistent save
150 fi
151 }
152
153 function remove_stack() {
154 stack=$1
155 if sg docker -c "docker stack ps ${stack}" ; then
156 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
157 COUNTER=0
158 result=1
159 while [ ${COUNTER} -lt 30 ]; do
160 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
161 #echo "Dockers running: $result"
162 if [ "${result}" == "0" ]; then
163 break
164 fi
165 let COUNTER=COUNTER+1
166 sleep 1
167 done
168 if [ "${result}" == "0" ]; then
169 echo "All dockers of the stack ${stack} were removed"
170 else
171 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 fi
173 sleep 5
174 fi
175 }
176
177 #removes osm deployments and services
178 function remove_k8s_namespace() {
179 kubectl delete ns $1
180 }
181
182 #removes helm only if there is nothing deployed in helm
183 function remove_helm() {
184 if [ "$(helm ls -q)" == "" ] ; then
185 sudo helm reset --force
186 kubectl delete --namespace kube-system serviceaccount tiller
187 kubectl delete clusterrolebinding tiller-cluster-rule
188 sudo rm /usr/local/bin/helm
189 rm -rf $HOME/.helm
190 fi
191 }
192
193 function remove_crontab_job() {
194 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
195 }
196
197 #Uninstall osmclient
198 function uninstall_osmclient() {
199 sudo apt-get remove --purge -y python-osmclient
200 sudo apt-get remove --purge -y python3-osmclient
201 }
202
203 #Uninstall lightweight OSM: remove dockers
204 function uninstall_lightweight() {
205 if [ -n "$INSTALL_ONLY" ]; then
206 if [ -n "$INSTALL_ELK" ]; then
207 echo -e "\nUninstalling OSM ELK stack"
208 remove_stack osm_elk
209 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
210 fi
211 else
212 echo -e "\nUninstalling OSM"
213 if [ -n "$KUBERNETES" ]; then
214 if [ -n "$INSTALL_K8S_MONITOR" ]; then
215 # uninstall OSM MONITORING
216 uninstall_k8s_monitoring
217 fi
218 remove_k8s_namespace $OSM_STACK_NAME
219 else
220 remove_stack $OSM_STACK_NAME
221 remove_stack osm_elk
222 fi
223 echo "Now osm docker images and volumes will be deleted"
224 # TODO: clean-up of images should take into account if other tags were used for specific modules
225 newgrp docker << EONG
226 for module in ro lcm keystone nbi mon pol pla osmclient; do
227 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
228 done
229 EONG
230
231 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
232
233 if [ -n "$KUBERNETES" ]; then
234 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
235 remove_volumes $OSM_NAMESPACE_VOL
236 else
237 remove_volumes $OSM_STACK_NAME
238 remove_network $OSM_STACK_NAME
239 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
240 fi
241 echo "Removing $OSM_DOCKER_WORK_DIR"
242 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
243 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
244 fi
245 remove_crontab_job
246
247 # Cleanup Openstack installer venv
248 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
249 rm -r $OPENSTACK_PYTHON_VENV
250 fi
251
252 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
253 echo "Some docker images will be kept in case they are used by other docker stacks"
254 echo "To remove them, just run 'docker image prune' in a terminal"
255 return 0
256 }
257
258 #Safe unattended install of iptables-persistent
259 function check_install_iptables_persistent(){
260 echo -e "\nChecking required packages: iptables-persistent"
261 if ! dpkg -l iptables-persistent &>/dev/null; then
262 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
263 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
264 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
265 sudo apt-get -yq install iptables-persistent
266 fi
267 }
268
269 #Configure NAT rules, based on the current IP addresses of containers
270 function nat(){
271 check_install_iptables_persistent
272
273 echo -e "\nConfiguring NAT rules"
274 echo -e " Required root privileges"
275 sudo $OSM_DEVOPS/installers/nat_osm
276 }
277
278 function FATAL(){
279 echo "FATAL error: Cannot install OSM due to \"$1\""
280 exit 1
281 }
282
283 function update_juju_images(){
284 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
285 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
286 }
287
288 function install_lxd() {
289 # Apply sysctl production values for optimal performance
290 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
291 sudo sysctl --system
292
293 # Install LXD snap
294 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
295 sudo snap install lxd
296
297 # Configure LXD
298 sudo usermod -a -G lxd `whoami`
299 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
300 sg lxd -c "lxd waitready"
301 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
302 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
303 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
304 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
305 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
306 #sudo systemctl stop lxd-bridge
307 #sudo systemctl --system daemon-reload
308 #sudo systemctl enable lxd-bridge
309 #sudo systemctl start lxd-bridge
310 }
311
312 function ask_user(){
313 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
314 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
315 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
316 read -e -p "$1" USER_CONFIRMATION
317 while true ; do
318 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
319 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
320 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
321 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
322 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
323 done
324 }
325
326 function install_osmclient(){
327 CLIENT_RELEASE=${RELEASE#"-R "}
328 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
329 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
330 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
331 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
332 curl $key_location | sudo apt-key add -
333 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
334 sudo apt-get update
335 sudo apt-get install -y python3-pip
336 sudo -H LC_ALL=C python3 -m pip install -U pip
337 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
338 sudo apt-get install -y python3-osm-im python3-osmclient
339 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
340 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
341 fi
342 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
343 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
344 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
345 fi
346 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
347 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
348 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
349 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
350 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
351 echo -e "\nOSM client installed"
352 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
353 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
354 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
355 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
356 else
357 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
358 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
359 echo " export OSM_HOSTNAME=<OSM_host>"
360 fi
361 return 0
362 }
363
364 function install_prometheus_nodeexporter(){
365 if (systemctl -q is-active node_exporter)
366 then
367 echo "Node Exporter is already running."
368 else
369 echo "Node Exporter is not active, installing..."
370 if getent passwd node_exporter > /dev/null 2>&1; then
371 echo "node_exporter user exists"
372 else
373 echo "Creating user node_exporter"
374 sudo useradd --no-create-home --shell /bin/false node_exporter
375 fi
376 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
377 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
378 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
379 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
380 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
381 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
382 sudo systemctl daemon-reload
383 sudo systemctl restart node_exporter
384 sudo systemctl enable node_exporter
385 echo "Node Exporter has been activated in this host."
386 fi
387 return 0
388 }
389
390 function uninstall_prometheus_nodeexporter(){
391 sudo systemctl stop node_exporter
392 sudo systemctl disable node_exporter
393 sudo rm /etc/systemd/system/node_exporter.service
394 sudo systemctl daemon-reload
395 sudo userdel node_exporter
396 sudo rm /usr/local/bin/node_exporter
397 return 0
398 }
399
400 function install_docker_ce() {
401 # installs and configures Docker CE
402 echo "Installing Docker CE ..."
403 sudo apt-get -qq update
404 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
405 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
406 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
407 sudo apt-get -qq update
408 sudo apt-get install -y docker-ce
409 echo "Adding user to group 'docker'"
410 sudo groupadd -f docker
411 sudo usermod -aG docker $USER
412 sleep 2
413 sudo service docker restart
414 echo "... restarted Docker service"
415 if [ -n "${DOCKER_PROXY_URL}" ]; then
416 echo "Configuring docker proxy ..."
417 if [ -f /etc/docker/daemon.json ]; then
418 if grep -q registry-mirrors /etc/docker/daemon.json; then
419 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
420 else
421 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
422 fi
423 else
424 sudo bash -c "cat << EOF > /etc/docker/daemon.json
425 {
426 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
427 }
428 EOF"
429 fi
430 sudo systemctl daemon-reload
431 sudo service docker restart
432 echo "... restarted Docker service again"
433 fi
434 sg docker -c "docker version" || FATAL "Docker installation failed"
435 echo "... Docker CE installation done"
436 return 0
437 }
438
439 function install_docker_compose() {
440 # installs and configures docker-compose
441 echo "Installing Docker Compose ..."
442 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
443 sudo chmod +x /usr/local/bin/docker-compose
444 echo "... Docker Compose installation done"
445 }
446
447 function install_juju() {
448 echo "Installing juju"
449 sudo snap install juju --classic --channel=2.8/stable
450 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
451 [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images
452 echo "Finished installation of juju"
453 return 0
454 }
455
456 function juju_createcontroller() {
457 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
458 # Not found created, create the controller
459 sudo usermod -a -G lxd ${USER}
460 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
461 fi
462 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
463 juju controller-config features=[k8s-operators]
464 }
465
466 function juju_addk8s() {
467 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
468 }
469
470 function juju_createcontroller_k8s(){
471 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
472 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
473 --config controller-service-type=loadbalancer \
474 --agent-version=$JUJU_AGENT_VERSION
475 }
476
477
478 function juju_addlxd_cloud(){
479 mkdir -p /tmp/.osm
480 OSM_VCA_CLOUDNAME="lxd-cloud"
481 LXDENDPOINT=$DEFAULT_IP
482 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
483 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
484
485 cat << EOF > $LXD_CLOUD
486 clouds:
487 $OSM_VCA_CLOUDNAME:
488 type: lxd
489 auth-types: [certificate]
490 endpoint: "https://$LXDENDPOINT:8443"
491 config:
492 ssl-hostname-verification: false
493 EOF
494 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
495 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
496 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
497 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
498
499 cat << EOF > $LXD_CREDENTIALS
500 credentials:
501 $OSM_VCA_CLOUDNAME:
502 lxd-cloud:
503 auth-type: certificate
504 server-cert: |
505 $server_cert
506 client-cert: |
507 $client_cert
508 client-key: |
509 $client_key
510 EOF
511 lxc config trust add local: /tmp/.osm/client.crt
512 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
513 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
514 sg lxd -c "lxd waitready"
515 juju controller-config features=[k8s-operators]
516 }
517
518
519 function juju_createproxy() {
520 check_install_iptables_persistent
521
522 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
523 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
524 sudo netfilter-persistent save
525 fi
526 }
527
528 function docker_login() {
529 echo "Docker login"
530 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
531 }
532
533 function generate_docker_images() {
534 cat << EOF
535 ========================================================================================
536
537
538
539
540
541 ========================================================================================
542 EOF
543 set -x
544 sleep 5
545 echo "Pulling and generating docker images"
546 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
547
548 echo "Pulling docker images"
549
550 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
551 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
552 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
553 fi
554
555 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
556 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
557 fi
558
559 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
560 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
561 fi
562
563 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
564 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
565 fi
566
567 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
568 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
569 fi
570
571 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
572 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
573 fi
574
575 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
576 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
577 fi
578
579 if [ -n "$PULL_IMAGES" ]; then
580 echo "Pulling OSM docker images"
581 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
582 module_lower=${module,,}
583 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
584 continue
585 fi
586 module_tag="${OSM_DOCKER_TAG}"
587 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
588 module_tag="${MODULE_DOCKER_TAG}"
589 fi
590 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
591 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
592 done
593 else
594 _build_from=$COMMIT_ID
595 [ -z "$_build_from" ] && _build_from="latest"
596 echo "OSM Docker images generated from $_build_from"
597
598 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
599 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
600 module_lower=${module,,}
601 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
602 continue
603 fi
604 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
605 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
606 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
607 fi
608 done
609 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
610 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
611 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
612 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
613 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
614 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
615 fi
616 echo "Finished generation of docker images"
617 fi
618
619 echo "Finished pulling and generating docker images"
620 }
621
622 function cmp_overwrite() {
623 file1="$1"
624 file2="$2"
625 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
626 if [ -f "${file2}" ]; then
627 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
628 else
629 cp -b ${file1} ${file2}
630 fi
631 fi
632 }
633
634 function generate_docker_compose_files() {
635 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
636 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
637 if [ -n "$INSTALL_PLA" ]; then
638 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
639 fi
640 }
641
642 function generate_k8s_manifest_files() {
643 #Kubernetes resources
644 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
645 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
646 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
647 }
648
649 function generate_prometheus_grafana_files() {
650 [ -n "$KUBERNETES" ] && return
651 # Prometheus files
652 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
653 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
654
655 # Grafana files
656 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
657 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
658 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
659 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
660 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
661
662 # Prometheus Exporters files
663 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
664 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
665 }
666
667 function generate_docker_env_files() {
668 echo "Doing a backup of existing env files"
669 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
670 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
671 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
672 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
673 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
674 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
675 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
676 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
677
678 echo "Generating docker env files"
679 # LCM
680 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
681 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
682 fi
683
684 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
685 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
686 else
687 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
688 fi
689
690 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
691 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
692 else
693 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
694 fi
695
696 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
697 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
698 else
699 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
700 fi
701
702 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
703 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
704 else
705 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
706 fi
707
708 if [ -n "$OSM_VCA_APIPROXY" ]; then
709 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
710 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
711 else
712 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
713 fi
714 fi
715
716 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
717 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
718 fi
719
720 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
721 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
722 fi
723
724 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
725 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
726 else
727 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
728 fi
729
730 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
731 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
732 else
733 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
734 fi
735
736 # RO
737 MYSQL_ROOT_PASSWORD=$(generate_secret)
738 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
739 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
740 fi
741 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
742 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
743 fi
744 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
745 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
746 fi
747
748 # Keystone
749 KEYSTONE_DB_PASSWORD=$(generate_secret)
750 SERVICE_PASSWORD=$(generate_secret)
751 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
752 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
753 fi
754 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
755 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
756 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
757 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
758 fi
759
760 # NBI
761 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
762 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
763 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
764 fi
765
766 # MON
767 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
768 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
769 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
770 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
771 fi
772
773 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
774 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
775 else
776 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
777 fi
778
779 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
780 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
781 else
782 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
783 fi
784
785 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
786 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
787 else
788 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
789 fi
790
791 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
792 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
793 else
794 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
795 fi
796
797
798 # POL
799 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
800 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
801 fi
802
803 echo "Finished generation of docker env files"
804 }
805
806 function generate_osmclient_script () {
807 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
808 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
809 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
810 }
811
812 #installs kubernetes packages
813 function install_kube() {
814 sudo apt-get update && sudo apt-get install -y apt-transport-https
815 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
816 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
817 sudo apt-get update
818 echo "Installing Kubernetes Packages ..."
819 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
820 sudo apt-mark hold kubelet kubeadm kubectl
821 }
822
823 #initializes kubernetes control plane
824 function init_kubeadm() {
825 sudo swapoff -a
826 sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab
827 sudo kubeadm init --config $1
828 sleep 5
829 }
830
831 function kube_config_dir() {
832 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
833 mkdir -p $HOME/.kube
834 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
835 sudo chown $(id -u):$(id -g) $HOME/.kube/config
836 }
837
838 function install_k8s_storageclass() {
839 OPENEBS_DIR="$(mktemp -d -q --tmpdir "openebs.XXXXXX")"
840 trap 'rm -rf "${OPENEBS_DIR}"' EXIT
841 wget -q https://openebs.github.io/charts/openebs-operator-1.6.0.yaml -P $OPENEBS_DIR
842 kubectl apply -f $OPENEBS_DIR
843 local storageclass_timeout=400
844 local counter=0
845 local storageclass_ready=""
846 echo "Waiting for storageclass"
847 while (( counter < storageclass_timeout ))
848 do
849 kubectl get storageclass openebs-hostpath &> /dev/null
850
851 if [ $? -eq 0 ] ; then
852 echo "Storageclass available"
853 storageclass_ready="y"
854 break
855 else
856 counter=$((counter + 15))
857 sleep 15
858 fi
859 done
860 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
861 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
862 }
863
864 function install_k8s_metallb() {
865 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
866 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
867 echo "apiVersion: v1
868 kind: ConfigMap
869 metadata:
870 namespace: metallb-system
871 name: config
872 data:
873 config: |
874 address-pools:
875 - name: default
876 protocol: layer2
877 addresses:
878 - $METALLB_IP_RANGE" | kubectl apply -f -
879 }
880 #deploys flannel as daemonsets
881 function deploy_cni_provider() {
882 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
883 trap 'rm -rf "${CNI_DIR}"' EXIT
884 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
885 kubectl apply -f $CNI_DIR
886 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
887 }
888
889 #creates secrets from env files which will be used by containers
890 function kube_secrets(){
891 kubectl create ns $OSM_STACK_NAME
892 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
893 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
894 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
895 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
896 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
897 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
898 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
899 }
900
901 #taints K8s master node
902 function taint_master_node() {
903 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
904 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
905 sleep 5
906 }
907
908 #deploys osm pods and services
909 function deploy_osm_services() {
910 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
911 }
912
913 #deploy charmed services
914 function deploy_charmed_services() {
915 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
916 # deploy mongodb charm
917 namespace=$OSM_STACK_NAME
918 juju deploy cs:~charmed-osm/mongodb-k8s \
919 --config enable-sidecar=true \
920 --config replica-set=rs0 \
921 --config namespace=$namespace \
922 -m $namespace
923 }
924
925 function deploy_osm_pla_service() {
926 # corresponding to namespace_vol
927 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
928 # corresponding to deploy_osm_services
929 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
930 }
931
932 #Install helm and tiller
933 function install_helm() {
934 helm > /dev/null 2>&1
935 if [ $? != 0 ] ; then
936 # Helm is not installed. Install helm
937 echo "Helm is not installed, installing ..."
938 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
939 tar -zxvf helm-v2.15.2.tar.gz
940 sudo mv linux-amd64/helm /usr/local/bin/helm
941 rm -r linux-amd64
942 rm helm-v2.15.2.tar.gz
943 fi
944
945 # Checking if tiller has being configured
946 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
947 if [ $? == 1 ] ; then
948 # tiller account for kubernetes
949 kubectl --namespace kube-system create serviceaccount tiller
950 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
951 # HELM initialization
952 helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller
953
954 # Wait for Tiller to be up and running. If timeout expires, continue installing
955 tiller_timeout=120;
956 counter=0;
957 tiller_status=""
958 while (( counter < tiller_timeout ))
959 do
960 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
961 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
962 counter=$((counter + 5))
963 sleep 5
964 done
965 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
966 fi
967 }
968
969 function parse_yaml() {
970 TAG=$1
971 shift
972 services=$@
973 for module in $services; do
974 if [ "$module" == "pla" ]; then
975 if [ -n "$INSTALL_PLA" ]; then
976 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
977 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
978 fi
979 else
980 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
981 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
982 fi
983 done
984 }
985
986 function update_manifest_files() {
987 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
988 list_of_services=""
989 for module in $osm_services; do
990 module_upper="${module^^}"
991 if ! echo $TO_REBUILD | grep -q $module_upper ; then
992 list_of_services="$list_of_services $module"
993 fi
994 done
995 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
996 parse_yaml $OSM_DOCKER_TAG $list_of_services
997 fi
998 if [ -n "$MODULE_DOCKER_TAG" ]; then
999 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1000 fi
1001 }
1002
1003 function namespace_vol() {
1004 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1005 for osm in $osm_services; do
1006 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1007 done
1008 }
1009
1010 function init_docker_swarm() {
1011 if [ "${DEFAULT_MTU}" != "1500" ]; then
1012 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1013 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1014 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1015 fi
1016 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1017 return 0
1018 }
1019
1020 function create_docker_network() {
1021 echo "creating network"
1022 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1023 echo "creating network DONE"
1024 }
1025
1026 function deploy_lightweight() {
1027
1028 echo "Deploying lightweight build"
1029 OSM_NBI_PORT=9999
1030 OSM_RO_PORT=9090
1031 OSM_KEYSTONE_PORT=5000
1032 OSM_UI_PORT=80
1033 OSM_MON_PORT=8662
1034 OSM_PROM_PORT=9090
1035 OSM_PROM_CADVISOR_PORT=8080
1036 OSM_PROM_HOSTPORT=9091
1037 OSM_GRAFANA_PORT=3000
1038 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1039 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1040
1041 if [ -n "$NO_HOST_PORTS" ]; then
1042 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1043 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1044 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1045 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1046 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1047 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1048 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1049 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1050 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1051 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1052 else
1053 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1054 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1055 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1056 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1057 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1058 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1059 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1060 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1061 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1062 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1063 fi
1064 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1065 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1066 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1067 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1068 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1069 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1070 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1071 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1072 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1073
1074 pushd $OSM_DOCKER_WORK_DIR
1075 if [ -n "$INSTALL_PLA" ]; then
1076 track deploy_osm_pla
1077 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1078 else
1079 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1080 fi
1081 popd
1082
1083 echo "Finished deployment of lightweight build"
1084 }
1085
1086 function deploy_elk() {
1087 echo "Pulling docker images for ELK"
1088 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1089 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1090 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1091 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1092 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1093 echo "Finished pulling elk docker images"
1094 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1095 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1096 remove_stack osm_elk
1097 echo "Deploying ELK stack"
1098 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1099 echo "Waiting for ELK stack to be up and running"
1100 time=0
1101 step=5
1102 timelength=40
1103 elk_is_up=1
1104 while [ $time -le $timelength ]; do
1105 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1106 elk_is_up=0
1107 break
1108 fi
1109 sleep $step
1110 time=$((time+step))
1111 done
1112 if [ $elk_is_up -eq 0 ]; then
1113 echo "ELK is up and running. Trying to create index pattern..."
1114 #Create index pattern
1115 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1116 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1117 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1118 #Make it the default index
1119 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1120 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1121 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1122 else
1123 echo "Cannot connect to Kibana to create index pattern."
1124 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1125 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1126 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1127 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1128 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1129 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1130 -d"{\"value\":\"filebeat-*\"}"'
1131 fi
1132 echo "Finished deployment of ELK stack"
1133 return 0
1134 }
1135
1136 function add_local_k8scluster() {
1137 /usr/bin/osm --all-projects vim-create \
1138 --name _system-osm-vim \
1139 --account_type dummy \
1140 --auth_url http://dummy \
1141 --user osm --password osm --tenant osm \
1142 --description "dummy" \
1143 --config '{management_network_name: mgmt}'
1144 /usr/bin/osm --all-projects k8scluster-add \
1145 --creds ${HOME}/.kube/config \
1146 --vim _system-osm-vim \
1147 --k8s-nets '{"net1": null}' \
1148 --version '1.15' \
1149 --description "OSM Internal Cluster" \
1150 _system-osm-k8s
1151 }
1152
1153 function install_lightweight() {
1154 track checkingroot
1155 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1156 track noroot
1157
1158 if [ -n "$KUBERNETES" ]; then
1159 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1160 1. Install and configure LXD
1161 2. Install juju
1162 3. Install docker CE
1163 4. Disable swap space
1164 5. Install and initialize Kubernetes
1165 as pre-requirements.
1166 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1167
1168 else
1169 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1170 fi
1171 track proceed
1172
1173 echo "Installing lightweight build of OSM"
1174 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1175 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1176 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1177 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1178 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1179 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1180 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1181 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1182
1183 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1184 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1185 need_packages_lw="snapd"
1186 echo -e "Checking required packages: $need_packages_lw"
1187 dpkg -l $need_packages_lw &>/dev/null \
1188 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1189 || sudo apt-get update \
1190 || FATAL "failed to run apt-get update"
1191 dpkg -l $need_packages_lw &>/dev/null \
1192 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1193 || sudo apt-get install -y $need_packages_lw \
1194 || FATAL "failed to install $need_packages_lw"
1195 install_lxd
1196 fi
1197
1198 track prereqok
1199
1200 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1201
1202 echo "Creating folders for installation"
1203 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1204 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1205 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1206
1207 #Installs Kubernetes
1208 if [ -n "$KUBERNETES" ]; then
1209 install_kube
1210 track install_k8s
1211 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1212 kube_config_dir
1213 track init_k8s
1214 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1215 # uninstall OSM MONITORING
1216 uninstall_k8s_monitoring
1217 track uninstall_k8s_monitoring
1218 fi
1219 #remove old namespace
1220 remove_k8s_namespace $OSM_STACK_NAME
1221 deploy_cni_provider
1222 taint_master_node
1223 install_k8s_storageclass
1224 track k8s_storageclass
1225 install_k8s_metallb
1226 track k8s_metallb
1227 else
1228 #install_docker_compose
1229 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1230 track docker_swarm
1231 fi
1232
1233 [ -z "$INSTALL_NOJUJU" ] && install_juju
1234 track juju_install
1235
1236 if [ -z "$OSM_VCA_HOST" ]; then
1237 if [ -z "$CONTROLLER_NAME" ]; then
1238
1239 if [ -n "$KUBERNETES" ]; then
1240 juju_createcontroller_k8s
1241 juju_addlxd_cloud
1242 else
1243 if [ -n "$LXD_CLOUD_FILE" ]; then
1244 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1245 OSM_VCA_CLOUDNAME="lxd-cloud"
1246 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1247 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1248 fi
1249 juju_createcontroller
1250 juju_createproxy
1251 fi
1252 else
1253 OSM_VCA_CLOUDNAME="lxd-cloud"
1254 if [ -n "$LXD_CLOUD_FILE" ]; then
1255 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1256 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1257 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1258 else
1259 mkdir -p ~/.osm
1260 cat << EOF > ~/.osm/lxd-cloud.yaml
1261 clouds:
1262 lxd-cloud:
1263 type: lxd
1264 auth-types: [certificate]
1265 endpoint: "https://$DEFAULT_IP:8443"
1266 config:
1267 ssl-hostname-verification: false
1268 EOF
1269 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1270 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1271 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1272 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1273 cat << EOF > ~/.osm/lxd-credentials.yaml
1274 credentials:
1275 lxd-cloud:
1276 lxd-cloud:
1277 auth-type: certificate
1278 server-cert: |
1279 $server_cert
1280 client-cert: |
1281 $client_cert
1282 client-key: |
1283 $client_key
1284 EOF
1285 lxc config trust add local: ~/.osm/client.crt
1286 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1287 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1288 fi
1289 fi
1290 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1291 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1292 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1293 fi
1294 track juju_controller
1295
1296 if [ -z "$OSM_VCA_SECRET" ]; then
1297 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1298 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1299 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1300 fi
1301 if [ -z "$OSM_VCA_PUBKEY" ]; then
1302 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1303 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1304 fi
1305 if [ -z "$OSM_VCA_CACERT" ]; then
1306 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1307 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1308 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1309 fi
1310
1311 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1312 if [ -z "$KUBERNETES" ]; then
1313 if [ -z "$OSM_VCA_APIPROXY" ]; then
1314 OSM_VCA_APIPROXY=$DEFAULT_IP
1315 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1316 fi
1317 juju_createproxy
1318 fi
1319 track juju
1320
1321 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1322 OSM_DATABASE_COMMONKEY=$(generate_secret)
1323 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1324 fi
1325
1326 # Deploy OSM services
1327 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1328 track docker_build
1329
1330 if [ -n "$KUBERNETES" ]; then
1331 generate_k8s_manifest_files
1332 else
1333 generate_docker_compose_files
1334 fi
1335 track manifest_files
1336 generate_prometheus_grafana_files
1337 generate_docker_env_files
1338 track env_files
1339
1340 if [ -n "$KUBERNETES" ]; then
1341 deploy_charmed_services
1342 kube_secrets
1343 update_manifest_files
1344 namespace_vol
1345 deploy_osm_services
1346 if [ -n "$INSTALL_PLA"]; then
1347 # optional PLA install
1348 deploy_osm_pla_service
1349 track deploy_osm_pla
1350 fi
1351 track deploy_osm_services_k8s
1352 install_helm
1353 track install_helm
1354 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1355 # install OSM MONITORING
1356 install_k8s_monitoring
1357 track install_k8s_monitoring
1358 fi
1359 else
1360 # remove old stack
1361 remove_stack $OSM_STACK_NAME
1362 create_docker_network
1363 deploy_lightweight
1364 generate_osmclient_script
1365 track docker_deploy
1366 install_prometheus_nodeexporter
1367 track nodeexporter
1368 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1369 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1370 fi
1371
1372 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1373 track osmclient
1374
1375 echo -e "Checking OSM health state..."
1376 if [ -n "$KUBERNETES" ]; then
1377 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1378 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1379 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1380 track osm_unhealthy
1381 else
1382 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1383 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1384 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1385 track osm_unhealthy
1386 fi
1387 track after_healthcheck
1388
1389 [ -n "$KUBERNETES" ] && add_local_k8scluster
1390 track add_local_k8scluster
1391
1392 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1393 track end
1394 return 0
1395 }
1396
1397 function install_to_openstack() {
1398
1399 if [ -z "$2" ]; then
1400 FATAL "OpenStack installer requires a valid external network name"
1401 fi
1402
1403 # Install Pip for Python3
1404 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1405 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1406
1407 # Create a venv to avoid conflicts with the host installation
1408 python3 -m venv $OPENSTACK_PYTHON_VENV
1409
1410 source $OPENSTACK_PYTHON_VENV/bin/activate
1411
1412 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1413 python -m pip install -U wheel
1414 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1415
1416 # Install the Openstack cloud module (ansible>=2.10)
1417 ansible-galaxy collection install openstack.cloud
1418
1419 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1420
1421 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1422
1423 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1424
1425 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1426 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1427 fi
1428
1429 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1430 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1431 fi
1432
1433 # Execute the Ansible playbook based on openrc or clouds.yaml
1434 if [ -e "$1" ]; then
1435 . $1
1436 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1437 $OSM_DEVOPS/installers/openstack/site.yml
1438 else
1439 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1440 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1441 fi
1442
1443 # Exit from venv
1444 deactivate
1445
1446 return 0
1447 }
1448
1449 function install_vimemu() {
1450 echo "\nInstalling vim-emu"
1451 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1452 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1453 # install prerequisites (OVS is a must for the emulator to work)
1454 sudo apt-get install openvswitch-switch
1455 # clone vim-emu repository (attention: branch is currently master only)
1456 echo "Cloning vim-emu repository ..."
1457 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1458 # build vim-emu docker
1459 echo "Building vim-emu Docker container..."
1460
1461 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1462 # start vim-emu container as daemon
1463 echo "Starting vim-emu Docker container 'vim-emu' ..."
1464 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1465 # in lightweight mode, the emulator needs to be attached to netOSM
1466 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1467 else
1468 # classic build mode
1469 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1470 fi
1471 echo "Waiting for 'vim-emu' container to start ..."
1472 sleep 5
1473 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1474 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1475 # print vim-emu connection info
1476 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1477 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1478 echo -e "To add the emulated VIM to OSM you should do:"
1479 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1480 }
1481
1482 function install_k8s_monitoring() {
1483 # install OSM monitoring
1484 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1485 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1486 }
1487
1488 function uninstall_k8s_monitoring() {
1489 # uninstall OSM monitoring
1490 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1491 }
1492
1493 function dump_vars(){
1494 echo "DEVELOP=$DEVELOP"
1495 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1496 echo "UNINSTALL=$UNINSTALL"
1497 echo "UPDATE=$UPDATE"
1498 echo "RECONFIGURE=$RECONFIGURE"
1499 echo "TEST_INSTALLER=$TEST_INSTALLER"
1500 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1501 echo "INSTALL_PLA=$INSTALL_PLA"
1502 echo "INSTALL_LXD=$INSTALL_LXD"
1503 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1504 echo "INSTALL_ONLY=$INSTALL_ONLY"
1505 echo "INSTALL_ELK=$INSTALL_ELK"
1506 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1507 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1508 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1509 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1510 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1511 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1512 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1513 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1514 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1515 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1516 echo "TO_REBUILD=$TO_REBUILD"
1517 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1518 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1519 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1520 echo "RELEASE=$RELEASE"
1521 echo "REPOSITORY=$REPOSITORY"
1522 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1523 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1524 echo "OSM_DEVOPS=$OSM_DEVOPS"
1525 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1526 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1527 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1528 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1529 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1530 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1531 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1532 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1533 echo "DOCKER_USER=$DOCKER_USER"
1534 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1535 echo "PULL_IMAGES=$PULL_IMAGES"
1536 echo "KUBERNETES=$KUBERNETES"
1537 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1538 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1539 echo "SHOWOPTS=$SHOWOPTS"
1540 echo "Install from specific refspec (-b): $COMMIT_ID"
1541 }
1542
1543 function track(){
1544 ctime=`date +%s`
1545 duration=$((ctime - SESSION_ID))
1546 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1547 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1548 event_name="bin"
1549 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1550 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1551 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1552 event_name="${event_name}_$1"
1553 url="${url}&event=${event_name}&ce_duration=${duration}"
1554 wget -q -O /dev/null $url
1555 }
1556
1557 function parse_docker_registry_url() {
1558 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1559 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1560 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1561 }
1562
1563 JUJU_AGENT_VERSION=2.8.6
1564 UNINSTALL=""
1565 DEVELOP=""
1566 UPDATE=""
1567 RECONFIGURE=""
1568 TEST_INSTALLER=""
1569 INSTALL_LXD=""
1570 SHOWOPTS=""
1571 COMMIT_ID=""
1572 ASSUME_YES=""
1573 INSTALL_FROM_SOURCE=""
1574 RELEASE="ReleaseNINE"
1575 REPOSITORY="stable"
1576 INSTALL_VIMEMU=""
1577 INSTALL_PLA=""
1578 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1579 LXD_REPOSITORY_PATH=""
1580 INSTALL_LIGHTWEIGHT="y"
1581 INSTALL_TO_OPENSTACK=""
1582 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1583 OPENSTACK_PUBLIC_NET_NAME=""
1584 OPENSTACK_ATTACH_VOLUME="false"
1585 OPENSTACK_SSH_KEY_FILE=""
1586 OPENSTACK_USERDATA_FILE=""
1587 OPENSTACK_VM_NAME="server-osm"
1588 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1589 INSTALL_ONLY=""
1590 INSTALL_ELK=""
1591 TO_REBUILD=""
1592 INSTALL_NOLXD=""
1593 INSTALL_NODOCKER=""
1594 INSTALL_NOJUJU=""
1595 KUBERNETES="y"
1596 INSTALL_K8S_MONITOR=""
1597 INSTALL_NOHOSTCLIENT=""
1598 INSTALL_NOCACHELXDIMAGES=""
1599 SESSION_ID=`date +%s`
1600 OSM_DEVOPS=
1601 OSM_VCA_HOST=
1602 OSM_VCA_SECRET=
1603 OSM_VCA_PUBKEY=
1604 OSM_VCA_CLOUDNAME="localhost"
1605 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1606 OSM_STACK_NAME=osm
1607 NO_HOST_PORTS=""
1608 DOCKER_NOBUILD=""
1609 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1610 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1611 WORKDIR_SUDO=sudo
1612 OSM_WORK_DIR="/etc/osm"
1613 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1614 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1615 OSM_HOST_VOL="/var/lib/osm"
1616 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1617 OSM_DOCKER_TAG=latest
1618 DOCKER_USER=opensourcemano
1619 PULL_IMAGES="y"
1620 KAFKA_TAG=2.11-1.0.2
1621 PROMETHEUS_TAG=v2.4.3
1622 GRAFANA_TAG=latest
1623 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1624 PROMETHEUS_CADVISOR_TAG=latest
1625 KEYSTONEDB_TAG=10
1626 OSM_DATABASE_COMMONKEY=
1627 ELASTIC_VERSION=6.4.2
1628 ELASTIC_CURATOR_VERSION=5.5.4
1629 POD_NETWORK_CIDR=10.244.0.0/16
1630 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1631 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1632 DOCKER_REGISTRY_URL=
1633 DOCKER_PROXY_URL=
1634 MODULE_DOCKER_TAG=
1635
1636 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1637 case "${o}" in
1638 b)
1639 COMMIT_ID=${OPTARG}
1640 PULL_IMAGES=""
1641 ;;
1642 r)
1643 REPOSITORY="${OPTARG}"
1644 REPO_ARGS+=(-r "$REPOSITORY")
1645 ;;
1646 c)
1647 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1648 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1649 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1650 usage && exit 1
1651 ;;
1652 k)
1653 REPOSITORY_KEY="${OPTARG}"
1654 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1655 ;;
1656 u)
1657 REPOSITORY_BASE="${OPTARG}"
1658 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1659 ;;
1660 R)
1661 RELEASE="${OPTARG}"
1662 REPO_ARGS+=(-R "$RELEASE")
1663 ;;
1664 D)
1665 OSM_DEVOPS="${OPTARG}"
1666 ;;
1667 o)
1668 INSTALL_ONLY="y"
1669 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1670 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1671 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1672 ;;
1673 O)
1674 INSTALL_TO_OPENSTACK="y"
1675 if [ -n "${OPTARG}" ]; then
1676 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1677 else
1678 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1679 usage && exit 1
1680 fi
1681 ;;
1682 f)
1683 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1684 ;;
1685 F)
1686 OPENSTACK_USERDATA_FILE="${OPTARG}"
1687 ;;
1688 N)
1689 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1690 ;;
1691 m)
1692 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1693 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1694 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1695 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1696 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1697 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1698 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1699 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1700 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1701 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1702 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1703 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1704 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1705 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1706 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1707 ;;
1708 H)
1709 OSM_VCA_HOST="${OPTARG}"
1710 ;;
1711 S)
1712 OSM_VCA_SECRET="${OPTARG}"
1713 ;;
1714 s)
1715 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1716 ;;
1717 w)
1718 # when specifying workdir, do not use sudo for access
1719 WORKDIR_SUDO=
1720 OSM_WORK_DIR="${OPTARG}"
1721 ;;
1722 t)
1723 OSM_DOCKER_TAG="${OPTARG}"
1724 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1725 ;;
1726 U)
1727 DOCKER_USER="${OPTARG}"
1728 ;;
1729 P)
1730 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1731 ;;
1732 A)
1733 OSM_VCA_APIPROXY="${OPTARG}"
1734 ;;
1735 l)
1736 LXD_CLOUD_FILE="${OPTARG}"
1737 ;;
1738 L)
1739 LXD_CRED_FILE="${OPTARG}"
1740 ;;
1741 K)
1742 CONTROLLER_NAME="${OPTARG}"
1743 ;;
1744 d)
1745 DOCKER_REGISTRY_URL="${OPTARG}"
1746 ;;
1747 p)
1748 DOCKER_PROXY_URL="${OPTARG}"
1749 ;;
1750 T)
1751 MODULE_DOCKER_TAG="${OPTARG}"
1752 ;;
1753 -)
1754 [ "${OPTARG}" == "help" ] && usage && exit 0
1755 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1756 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1757 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1758 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1759 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1760 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1761 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1762 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1763 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1764 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1765 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1766 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1767 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1768 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1769 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1770 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1771 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1772 [ "${OPTARG}" == "pullimages" ] && continue
1773 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1774 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1775 [ "${OPTARG}" == "bundle" ] && continue
1776 [ "${OPTARG}" == "k8s" ] && continue
1777 [ "${OPTARG}" == "lxd" ] && continue
1778 [ "${OPTARG}" == "lxd-cred" ] && continue
1779 [ "${OPTARG}" == "microstack" ] && continue
1780 [ "${OPTARG}" == "overlay" ] && continue
1781 [ "${OPTARG}" == "only-vca" ] && continue
1782 [ "${OPTARG}" == "vca" ] && continue
1783 [ "${OPTARG}" == "ha" ] && continue
1784 [ "${OPTARG}" == "tag" ] && continue
1785 [ "${OPTARG}" == "registry" ] && continue
1786 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1787 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1788 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue
1789 echo -e "Invalid option: '--$OPTARG'\n" >&2
1790 usage && exit 1
1791 ;;
1792 :)
1793 echo "Option -$OPTARG requires an argument" >&2
1794 usage && exit 1
1795 ;;
1796 \?)
1797 echo -e "Invalid option: '-$OPTARG'\n" >&2
1798 usage && exit 1
1799 ;;
1800 h)
1801 usage && exit 0
1802 ;;
1803 y)
1804 ASSUME_YES="y"
1805 ;;
1806 *)
1807 usage && exit 1
1808 ;;
1809 esac
1810 done
1811
1812 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1813 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1814 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1815
1816 if [ -n "$SHOWOPTS" ]; then
1817 dump_vars
1818 exit 0
1819 fi
1820
1821 if [ -n "$CHARMED" ]; then
1822 if [ -n "$UNINSTALL" ]; then
1823 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1824 else
1825 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1826 fi
1827
1828 exit 0
1829 fi
1830
1831 # if develop, we force master
1832 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1833
1834 need_packages="git wget curl tar"
1835
1836 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1837
1838 echo -e "Checking required packages: $need_packages"
1839 dpkg -l $need_packages &>/dev/null \
1840 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1841 || sudo apt-get update \
1842 || FATAL "failed to run apt-get update"
1843 dpkg -l $need_packages &>/dev/null \
1844 || ! echo -e "Installing $need_packages requires root privileges." \
1845 || sudo apt-get install -y $need_packages \
1846 || FATAL "failed to install $need_packages"
1847 sudo snap install jq
1848 if [ -z "$OSM_DEVOPS" ]; then
1849 if [ -n "$TEST_INSTALLER" ]; then
1850 echo -e "\nUsing local devops repo for OSM installation"
1851 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1852 else
1853 echo -e "\nCreating temporary dir for OSM installation"
1854 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1855 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1856
1857 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1858
1859 if [ -z "$COMMIT_ID" ]; then
1860 echo -e "\nGuessing the current stable release"
1861 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1862 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1863
1864 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1865 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1866 else
1867 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1868 fi
1869 git -C $OSM_DEVOPS checkout $COMMIT_ID
1870 fi
1871 fi
1872
1873 . $OSM_DEVOPS/common/all_funcs
1874
1875 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1876 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1877 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1878 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1879 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1880 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1881 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1882 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1883
1884 #Installation starts here
1885 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1886 track start
1887
1888 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1889 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1890 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1891 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1892 fi
1893
1894 echo -e "Checking required packages: lxd"
1895 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1896 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1897
1898 # use local devops for containers
1899 export OSM_USE_LOCAL_DEVOPS=true
1900
1901 #Install osmclient
1902
1903 #Install vim-emu (optional)
1904 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1905
1906 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1907 track end
1908 echo -e "\nDONE"