Add option to OSM installer to skip caching juju lxd images
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83
84 }
85
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password {
90 password_file="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name=$1
92 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller=$controller_name '{
97 indent = length($1)/2;
98 vname[indent] = $2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
103 printf("%s",$3);
104 }
105 }
106 }'
107 }
108
109 function generate_secret() {
110 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
111 }
112
113 function remove_volumes() {
114 if [ -n "$KUBERNETES" ]; then
115 k8_volume=$1
116 echo "Removing ${k8_volume}"
117 $WORKDIR_SUDO rm -rf ${k8_volume}
118 else
119 stack=$1
120 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
121 for volume in $volumes; do
122 sg docker -c "docker volume rm ${stack}_${volume}"
123 done
124 fi
125 }
126
127 function remove_network() {
128 stack=$1
129 sg docker -c "docker network rm net${stack}"
130 }
131
132 function remove_iptables() {
133 stack=$1
134 if [ -z "$OSM_VCA_HOST" ]; then
135 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
136 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
137 fi
138
139 if [ -z "$DEFAULT_IP" ]; then
140 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
143 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
144 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
145 fi
146
147 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
148 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
149 sudo netfilter-persistent save
150 fi
151 }
152
153 function remove_stack() {
154 stack=$1
155 if sg docker -c "docker stack ps ${stack}" ; then
156 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
157 COUNTER=0
158 result=1
159 while [ ${COUNTER} -lt 30 ]; do
160 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
161 #echo "Dockers running: $result"
162 if [ "${result}" == "0" ]; then
163 break
164 fi
165 let COUNTER=COUNTER+1
166 sleep 1
167 done
168 if [ "${result}" == "0" ]; then
169 echo "All dockers of the stack ${stack} were removed"
170 else
171 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 fi
173 sleep 5
174 fi
175 }
176
177 #removes osm deployments and services
178 function remove_k8s_namespace() {
179 kubectl delete ns $1
180 }
181
182 #removes helm only if there is nothing deployed in helm
183 function remove_helm() {
184 if [ "$(helm ls -q)" == "" ] ; then
185 sudo helm reset --force
186 kubectl delete --namespace kube-system serviceaccount tiller
187 kubectl delete clusterrolebinding tiller-cluster-rule
188 sudo rm /usr/local/bin/helm
189 rm -rf $HOME/.helm
190 fi
191 }
192
193 function remove_crontab_job() {
194 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
195 }
196
197 #Uninstall osmclient
198 function uninstall_osmclient() {
199 sudo apt-get remove --purge -y python-osmclient
200 sudo apt-get remove --purge -y python3-osmclient
201 }
202
203 #Uninstall lightweight OSM: remove dockers
204 function uninstall_lightweight() {
205 if [ -n "$INSTALL_ONLY" ]; then
206 if [ -n "$INSTALL_ELK" ]; then
207 echo -e "\nUninstalling OSM ELK stack"
208 remove_stack osm_elk
209 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
210 fi
211 else
212 echo -e "\nUninstalling OSM"
213 if [ -n "$KUBERNETES" ]; then
214 if [ -n "$INSTALL_K8S_MONITOR" ]; then
215 # uninstall OSM MONITORING
216 uninstall_k8s_monitoring
217 fi
218 remove_k8s_namespace $OSM_STACK_NAME
219 else
220 remove_stack $OSM_STACK_NAME
221 remove_stack osm_elk
222 fi
223 echo "Now osm docker images and volumes will be deleted"
224 # TODO: clean-up of images should take into account if other tags were used for specific modules
225 newgrp docker << EONG
226 for module in ro lcm keystone nbi mon pol pla osmclient; do
227 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
228 done
229 EONG
230
231 if [ -n "$NGUI" ]; then
232 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
233 else
234 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
235 fi
236
237 if [ -n "$KUBERNETES" ]; then
238 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
239 remove_volumes $OSM_NAMESPACE_VOL
240 else
241 remove_volumes $OSM_STACK_NAME
242 remove_network $OSM_STACK_NAME
243 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
244 fi
245 echo "Removing $OSM_DOCKER_WORK_DIR"
246 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
247 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
248 fi
249 remove_crontab_job
250
251 # Cleanup Openstack installer venv
252 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
253 rm -r $OPENSTACK_PYTHON_VENV
254 fi
255
256 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
257 echo "Some docker images will be kept in case they are used by other docker stacks"
258 echo "To remove them, just run 'docker image prune' in a terminal"
259 return 0
260 }
261
262 #Safe unattended install of iptables-persistent
263 function check_install_iptables_persistent(){
264 echo -e "\nChecking required packages: iptables-persistent"
265 if ! dpkg -l iptables-persistent &>/dev/null; then
266 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
267 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
268 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
269 sudo apt-get -yq install iptables-persistent
270 fi
271 }
272
273 #Configure NAT rules, based on the current IP addresses of containers
274 function nat(){
275 check_install_iptables_persistent
276
277 echo -e "\nConfiguring NAT rules"
278 echo -e " Required root privileges"
279 sudo $OSM_DEVOPS/installers/nat_osm
280 }
281
282 function FATAL(){
283 echo "FATAL error: Cannot install OSM due to \"$1\""
284 exit 1
285 }
286
287 function update_juju_images(){
288 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
289 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
290 }
291
292 function install_lxd() {
293 # Apply sysctl production values for optimal performance
294 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
295 sudo sysctl --system
296
297 # Install LXD snap
298 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
299 sudo snap install lxd
300
301 # Configure LXD
302 sudo usermod -a -G lxd `whoami`
303 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
304 sg lxd -c "lxd waitready"
305 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
306 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
307 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
308 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
309 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
310 #sudo systemctl stop lxd-bridge
311 #sudo systemctl --system daemon-reload
312 #sudo systemctl enable lxd-bridge
313 #sudo systemctl start lxd-bridge
314 }
315
316 function ask_user(){
317 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
318 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
319 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
320 read -e -p "$1" USER_CONFIRMATION
321 while true ; do
322 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
323 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
324 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
325 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
326 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
327 done
328 }
329
330 function install_osmclient(){
331 CLIENT_RELEASE=${RELEASE#"-R "}
332 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
333 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
334 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
335 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
336 curl $key_location | sudo apt-key add -
337 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
338 sudo apt-get update
339 sudo apt-get install -y python3-pip
340 sudo -H LC_ALL=C python3 -m pip install -U pip
341 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
342 sudo apt-get install -y python3-osm-im python3-osmclient
343 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
344 sudo -H LC_ALL=C python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
345 fi
346 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
347 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
348 sudo -H LC_ALL=C python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
349 fi
350 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
351 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
352 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
353 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
354 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
355 echo -e "\nOSM client installed"
356 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
357 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
358 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
359 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
360 else
361 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
362 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
363 echo " export OSM_HOSTNAME=<OSM_host>"
364 fi
365 return 0
366 }
367
368 function install_prometheus_nodeexporter(){
369 if (systemctl -q is-active node_exporter)
370 then
371 echo "Node Exporter is already running."
372 else
373 echo "Node Exporter is not active, installing..."
374 if getent passwd node_exporter > /dev/null 2>&1; then
375 echo "node_exporter user exists"
376 else
377 echo "Creating user node_exporter"
378 sudo useradd --no-create-home --shell /bin/false node_exporter
379 fi
380 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
381 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
382 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
383 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
384 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
385 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
386 sudo systemctl daemon-reload
387 sudo systemctl restart node_exporter
388 sudo systemctl enable node_exporter
389 echo "Node Exporter has been activated in this host."
390 fi
391 return 0
392 }
393
394 function uninstall_prometheus_nodeexporter(){
395 sudo systemctl stop node_exporter
396 sudo systemctl disable node_exporter
397 sudo rm /etc/systemd/system/node_exporter.service
398 sudo systemctl daemon-reload
399 sudo userdel node_exporter
400 sudo rm /usr/local/bin/node_exporter
401 return 0
402 }
403
404 function install_docker_ce() {
405 # installs and configures Docker CE
406 echo "Installing Docker CE ..."
407 sudo apt-get -qq update
408 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
409 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
410 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
411 sudo apt-get -qq update
412 sudo apt-get install -y docker-ce
413 echo "Adding user to group 'docker'"
414 sudo groupadd -f docker
415 sudo usermod -aG docker $USER
416 sleep 2
417 sudo service docker restart
418 echo "... restarted Docker service"
419 if [ -n "${DOCKER_PROXY_URL}" ]; then
420 echo "Configuring docker proxy ..."
421 if [ -f /etc/docker/daemon.json ]; then
422 if grep -q registry-mirrors /etc/docker/daemon.json; then
423 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
424 else
425 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
426 fi
427 else
428 sudo bash -c "cat << EOF > /etc/docker/daemon.json
429 {
430 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
431 }
432 EOF"
433 fi
434 sudo systemctl daemon-reload
435 sudo service docker restart
436 echo "... restarted Docker service again"
437 fi
438 sg docker -c "docker version" || FATAL "Docker installation failed"
439 echo "... Docker CE installation done"
440 return 0
441 }
442
443 function install_docker_compose() {
444 # installs and configures docker-compose
445 echo "Installing Docker Compose ..."
446 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
447 sudo chmod +x /usr/local/bin/docker-compose
448 echo "... Docker Compose installation done"
449 }
450
451 function install_juju() {
452 echo "Installing juju"
453 sudo snap install juju --classic --channel=2.8/stable
454 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
455 [ -n "$INSTALL_NOCACHELXDIMAGES" ] && update_juju_images
456 echo "Finished installation of juju"
457 return 0
458 }
459
460 function juju_createcontroller() {
461 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
462 # Not found created, create the controller
463 sudo usermod -a -G lxd ${USER}
464 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
465 fi
466 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
467 juju controller-config features=[k8s-operators]
468 }
469
470 function juju_addk8s() {
471 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
472 }
473
474 function juju_createcontroller_k8s(){
475 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
476 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
477 --config controller-service-type=loadbalancer \
478 --agent-version=$JUJU_AGENT_VERSION
479 }
480
481
482 function juju_addlxd_cloud(){
483 mkdir -p /tmp/.osm
484 OSM_VCA_CLOUDNAME="lxd-cloud"
485 LXDENDPOINT=$DEFAULT_IP
486 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
487 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
488
489 cat << EOF > $LXD_CLOUD
490 clouds:
491 $OSM_VCA_CLOUDNAME:
492 type: lxd
493 auth-types: [certificate]
494 endpoint: "https://$LXDENDPOINT:8443"
495 config:
496 ssl-hostname-verification: false
497 EOF
498 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
499 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
500 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
501 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
502
503 cat << EOF > $LXD_CREDENTIALS
504 credentials:
505 $OSM_VCA_CLOUDNAME:
506 lxd-cloud:
507 auth-type: certificate
508 server-cert: |
509 $server_cert
510 client-cert: |
511 $client_cert
512 client-key: |
513 $client_key
514 EOF
515 lxc config trust add local: /tmp/.osm/client.crt
516 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
517 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
518 sg lxd -c "lxd waitready"
519 juju controller-config features=[k8s-operators]
520 }
521
522
523 function juju_createproxy() {
524 check_install_iptables_persistent
525
526 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
527 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
528 sudo netfilter-persistent save
529 fi
530 }
531
532 function docker_login() {
533 echo "Docker login"
534 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
535 }
536
537 function generate_docker_images() {
538 echo "Pulling and generating docker images"
539 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
540
541 echo "Pulling docker images"
542
543 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
544 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
545 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
546 fi
547
548 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
549 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
550 fi
551
552 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
553 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
554 fi
555
556 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
557 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
558 fi
559
560 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
561 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
562 fi
563
564 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
565 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
566 fi
567
568 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
569 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
570 fi
571
572 if [ -n "$PULL_IMAGES" ]; then
573 echo "Pulling OSM docker images"
574 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient; do
575 module_lower=${module,,}
576 if [ $module == "LW-UI" ]; then
577 if [ -n "$NGUI" ]; then
578 continue
579 else
580 module_lower="light-ui"
581 fi
582 fi
583 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
584 continue
585 fi
586 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
587 continue
588 fi
589 module_tag="${OSM_DOCKER_TAG}"
590 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
591 module_tag="${MODULE_DOCKER_TAG}"
592 fi
593 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
594 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
595 done
596 else
597 _build_from=$COMMIT_ID
598 [ -z "$_build_from" ] && _build_from="latest"
599 echo "OSM Docker images generated from $_build_from"
600
601 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA; do
602 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
603 module_lower=${module,,}
604 if [ $module == "LW-UI" ]; then
605 if [ -n "$NGUI" ]; then
606 continue
607 else
608 module_lower="light-ui"
609 fi
610 fi
611 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
612 continue
613 fi
614 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
615 continue
616 fi
617 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
618 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
619 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
620 fi
621 done
622 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
623 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
624 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
625 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
626 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
627 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
628 fi
629 echo "Finished generation of docker images"
630 fi
631
632 echo "Finished pulling and generating docker images"
633 }
634
635 function cmp_overwrite() {
636 file1="$1"
637 file2="$2"
638 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
639 if [ -f "${file2}" ]; then
640 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
641 else
642 cp -b ${file1} ${file2}
643 fi
644 fi
645 }
646
647 function generate_docker_compose_files() {
648 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
649 if [ -n "$NGUI" ]; then
650 # For NG-UI
651 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
652 else
653 # Docker-compose
654 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
655 fi
656 if [ -n "$INSTALL_PLA" ]; then
657 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
658 fi
659 }
660
661 function generate_k8s_manifest_files() {
662 #Kubernetes resources
663 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
664 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
665 if [ -n "$NGUI" ]; then
666 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
667 else
668 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
669 fi
670 }
671
672 function generate_prometheus_grafana_files() {
673 [ -n "$KUBERNETES" ] && return
674 # Prometheus files
675 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
676 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
677
678 # Grafana files
679 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
680 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
681 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
682 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
683 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
684
685 # Prometheus Exporters files
686 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
687 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
688 }
689
690 function generate_docker_env_files() {
691 echo "Doing a backup of existing env files"
692 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
693 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
694 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
695 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
696 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
697 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
698 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
699 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
700 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
701
702 echo "Generating docker env files"
703 # LCM
704 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
705 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
706 fi
707
708 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
709 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
710 else
711 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
712 fi
713
714 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
715 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
716 else
717 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
718 fi
719
720 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
721 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
722 else
723 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
724 fi
725
726 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
727 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
728 else
729 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
730 fi
731
732 if [ -n "$OSM_VCA_APIPROXY" ]; then
733 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
734 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
735 else
736 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
737 fi
738 fi
739
740 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
741 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
742 fi
743
744 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
745 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
746 fi
747
748 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
749 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
750 else
751 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
752 fi
753
754 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
755 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
756 else
757 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
758 fi
759
760 # RO
761 MYSQL_ROOT_PASSWORD=$(generate_secret)
762 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
763 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
764 fi
765 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
766 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
767 fi
768 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
769 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
770 fi
771
772 # Keystone
773 KEYSTONE_DB_PASSWORD=$(generate_secret)
774 SERVICE_PASSWORD=$(generate_secret)
775 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
776 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
777 fi
778 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
779 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
780 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
781 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
782 fi
783
784 # NBI
785 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
786 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
787 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
788 fi
789
790 # MON
791 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
792 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
793 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
794 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
795 fi
796
797 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
798 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
799 else
800 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
801 fi
802
803 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
804 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
805 else
806 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
807 fi
808
809 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
810 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
811 else
812 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
813 fi
814
815 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
816 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
817 else
818 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
819 fi
820
821
822 # POL
823 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
824 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
825 fi
826
827 # LW-UI
828 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
829 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
830 fi
831
832 echo "Finished generation of docker env files"
833 }
834
835 function generate_osmclient_script () {
836 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
837 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
838 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
839 }
840
841 #installs kubernetes packages
842 function install_kube() {
843 sudo apt-get update && sudo apt-get install -y apt-transport-https
844 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
845 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
846 sudo apt-get update
847 echo "Installing Kubernetes Packages ..."
848 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
849 sudo apt-mark hold kubelet kubeadm kubectl
850 }
851
852 #initializes kubernetes control plane
853 function init_kubeadm() {
854 sudo swapoff -a
855 sudo kubeadm init --config $1
856 sleep 5
857 }
858
859 function kube_config_dir() {
860 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
861 mkdir -p $HOME/.kube
862 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
863 sudo chown $(id -u):$(id -g) $HOME/.kube/config
864 }
865
866 function install_k8s_storageclass() {
867 OPENEBS_DIR="$(mktemp -d -q --tmpdir "openebs.XXXXXX")"
868 trap 'rm -rf "${OPENEBS_DIR}"' EXIT
869 wget -q https://openebs.github.io/charts/openebs-operator-1.6.0.yaml -P $OPENEBS_DIR
870 kubectl apply -f $OPENEBS_DIR
871 local storageclass_timeout=400
872 local counter=0
873 local storageclass_ready=""
874 echo "Waiting for storageclass"
875 while (( counter < storageclass_timeout ))
876 do
877 kubectl get storageclass openebs-hostpath &> /dev/null
878
879 if [ $? -eq 0 ] ; then
880 echo "Storageclass available"
881 storageclass_ready="y"
882 break
883 else
884 counter=$((counter + 15))
885 sleep 15
886 fi
887 done
888 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
889 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
890 }
891
892 function install_k8s_metallb() {
893 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
894 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
895 echo "apiVersion: v1
896 kind: ConfigMap
897 metadata:
898 namespace: metallb-system
899 name: config
900 data:
901 config: |
902 address-pools:
903 - name: default
904 protocol: layer2
905 addresses:
906 - $METALLB_IP_RANGE" | kubectl apply -f -
907 }
908 #deploys flannel as daemonsets
909 function deploy_cni_provider() {
910 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
911 trap 'rm -rf "${CNI_DIR}"' EXIT
912 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
913 kubectl apply -f $CNI_DIR
914 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
915 }
916
917 #creates secrets from env files which will be used by containers
918 function kube_secrets(){
919 kubectl create ns $OSM_STACK_NAME
920 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
921 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
922 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
923 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
924 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
925 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
926 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
927 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
928 }
929
930 #taints K8s master node
931 function taint_master_node() {
932 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
933 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
934 sleep 5
935 }
936
937 #deploys osm pods and services
938 function deploy_osm_services() {
939 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
940 }
941
942 #deploy charmed services
943 function deploy_charmed_services() {
944 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
945 # deploy mongodb charm
946 namespace=$OSM_STACK_NAME
947 juju deploy cs:~charmed-osm/mongodb-k8s \
948 --config enable-sidecar=true \
949 --config replica-set=rs0 \
950 --config namespace=$namespace \
951 -m $namespace
952 }
953
954 function deploy_osm_pla_service() {
955 # corresponding to namespace_vol
956 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
957 # corresponding to deploy_osm_services
958 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
959 }
960
961 #Install helm and tiller
962 function install_helm() {
963 helm > /dev/null 2>&1
964 if [ $? != 0 ] ; then
965 # Helm is not installed. Install helm
966 echo "Helm is not installed, installing ..."
967 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
968 tar -zxvf helm-v2.15.2.tar.gz
969 sudo mv linux-amd64/helm /usr/local/bin/helm
970 rm -r linux-amd64
971 rm helm-v2.15.2.tar.gz
972 fi
973
974 # Checking if tiller has being configured
975 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
976 if [ $? == 1 ] ; then
977 # tiller account for kubernetes
978 kubectl --namespace kube-system create serviceaccount tiller
979 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
980 # HELM initialization
981 helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller
982
983 # Wait for Tiller to be up and running. If timeout expires, continue installing
984 tiller_timeout=120;
985 counter=0;
986 tiller_status=""
987 while (( counter < tiller_timeout ))
988 do
989 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
990 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
991 counter=$((counter + 5))
992 sleep 5
993 done
994 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
995 fi
996 }
997
998 function parse_yaml() {
999 TAG=$1
1000 shift
1001 services=$@
1002 for module in $services; do
1003 if [ "$module" == "pla" ]; then
1004 if [ -n "$INSTALL_PLA" ]; then
1005 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1006 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
1007 fi
1008 else
1009 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1010 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
1011 fi
1012 done
1013 }
1014
1015 function update_manifest_files() {
1016 if [ -n "$NGUI" ]; then
1017 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1018 else
1019 osm_services="nbi lcm ro pol mon light-ui keystone pla"
1020 fi
1021 list_of_services=""
1022 for module in $osm_services; do
1023 module_upper="${module^^}"
1024 if [ "$module_upper" == "LIGHT-UI" ]; then
1025 module_upper="LW-UI"
1026 fi
1027 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1028 list_of_services="$list_of_services $module"
1029 fi
1030 done
1031 list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
1032 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
1033 parse_yaml $OSM_DOCKER_TAG $list_of_services
1034 fi
1035 if [ -n "$MODULE_DOCKER_TAG" ]; then
1036 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1037 fi
1038 }
1039
1040 function namespace_vol() {
1041 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1042 for osm in $osm_services; do
1043 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1044 done
1045 }
1046
1047 function init_docker_swarm() {
1048 if [ "${DEFAULT_MTU}" != "1500" ]; then
1049 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1050 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1051 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1052 fi
1053 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1054 return 0
1055 }
1056
1057 function create_docker_network() {
1058 echo "creating network"
1059 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1060 echo "creating network DONE"
1061 }
1062
1063 function deploy_lightweight() {
1064
1065 echo "Deploying lightweight build"
1066 OSM_NBI_PORT=9999
1067 OSM_RO_PORT=9090
1068 OSM_KEYSTONE_PORT=5000
1069 OSM_UI_PORT=80
1070 OSM_MON_PORT=8662
1071 OSM_PROM_PORT=9090
1072 OSM_PROM_CADVISOR_PORT=8080
1073 OSM_PROM_HOSTPORT=9091
1074 OSM_GRAFANA_PORT=3000
1075 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1076 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1077
1078 if [ -n "$NO_HOST_PORTS" ]; then
1079 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1080 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1081 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1082 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1083 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1084 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1085 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1086 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1087 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1088 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1089 else
1090 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1091 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1092 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1093 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1094 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1095 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1096 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1097 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1098 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1099 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1100 fi
1101 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1102 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1103 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1104 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1105 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1106 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1107 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1108 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1109 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1110
1111 pushd $OSM_DOCKER_WORK_DIR
1112 if [ -n "$INSTALL_PLA" ]; then
1113 track deploy_osm_pla
1114 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1115 else
1116 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1117 fi
1118 popd
1119
1120 echo "Finished deployment of lightweight build"
1121 }
1122
1123 function deploy_elk() {
1124 echo "Pulling docker images for ELK"
1125 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1126 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1127 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1128 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1129 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1130 echo "Finished pulling elk docker images"
1131 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1132 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1133 remove_stack osm_elk
1134 echo "Deploying ELK stack"
1135 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1136 echo "Waiting for ELK stack to be up and running"
1137 time=0
1138 step=5
1139 timelength=40
1140 elk_is_up=1
1141 while [ $time -le $timelength ]; do
1142 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1143 elk_is_up=0
1144 break
1145 fi
1146 sleep $step
1147 time=$((time+step))
1148 done
1149 if [ $elk_is_up -eq 0 ]; then
1150 echo "ELK is up and running. Trying to create index pattern..."
1151 #Create index pattern
1152 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1153 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1154 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1155 #Make it the default index
1156 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1157 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1158 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1159 else
1160 echo "Cannot connect to Kibana to create index pattern."
1161 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1162 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1163 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1164 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1165 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1166 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1167 -d"{\"value\":\"filebeat-*\"}"'
1168 fi
1169 echo "Finished deployment of ELK stack"
1170 return 0
1171 }
1172
1173 function add_local_k8scluster() {
1174 /usr/bin/osm --all-projects vim-create \
1175 --name _system-osm-vim \
1176 --account_type dummy \
1177 --auth_url http://dummy \
1178 --user osm --password osm --tenant osm \
1179 --description "dummy" \
1180 --config '{management_network_name: mgmt}'
1181 /usr/bin/osm --all-projects k8scluster-add \
1182 --creds ${HOME}/.kube/config \
1183 --vim _system-osm-vim \
1184 --k8s-nets '{"net1": null}' \
1185 --version '1.15' \
1186 --description "OSM Internal Cluster" \
1187 _system-osm-k8s
1188 }
1189
1190 function install_lightweight() {
1191 track checkingroot
1192 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1193 track noroot
1194
1195 if [ -n "$KUBERNETES" ]; then
1196 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1197 1. Install and configure LXD
1198 2. Install juju
1199 3. Install docker CE
1200 4. Disable swap space
1201 5. Install and initialize Kubernetes
1202 as pre-requirements.
1203 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1204
1205 else
1206 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1207 fi
1208 track proceed
1209
1210 echo "Installing lightweight build of OSM"
1211 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1212 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1213 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1214 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1215 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1216 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1217 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1218 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1219
1220 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1221 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1222 need_packages_lw="snapd"
1223 echo -e "Checking required packages: $need_packages_lw"
1224 dpkg -l $need_packages_lw &>/dev/null \
1225 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1226 || sudo apt-get update \
1227 || FATAL "failed to run apt-get update"
1228 dpkg -l $need_packages_lw &>/dev/null \
1229 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1230 || sudo apt-get install -y $need_packages_lw \
1231 || FATAL "failed to install $need_packages_lw"
1232 install_lxd
1233 fi
1234
1235 track prereqok
1236
1237 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1238
1239 echo "Creating folders for installation"
1240 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1241 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1242 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1243
1244 #Installs Kubernetes
1245 if [ -n "$KUBERNETES" ]; then
1246 install_kube
1247 track install_k8s
1248 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1249 kube_config_dir
1250 track init_k8s
1251 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1252 # uninstall OSM MONITORING
1253 uninstall_k8s_monitoring
1254 track uninstall_k8s_monitoring
1255 fi
1256 #remove old namespace
1257 remove_k8s_namespace $OSM_STACK_NAME
1258 deploy_cni_provider
1259 taint_master_node
1260 install_k8s_storageclass
1261 track k8s_storageclass
1262 install_k8s_metallb
1263 track k8s_metallb
1264 else
1265 #install_docker_compose
1266 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1267 track docker_swarm
1268 fi
1269
1270 [ -z "$INSTALL_NOJUJU" ] && install_juju
1271 track juju_install
1272
1273 if [ -z "$OSM_VCA_HOST" ]; then
1274 if [ -z "$CONTROLLER_NAME" ]; then
1275
1276 if [ -n "$KUBERNETES" ]; then
1277 juju_createcontroller_k8s
1278 juju_addlxd_cloud
1279 else
1280 if [ -n "$LXD_CLOUD_FILE" ]; then
1281 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1282 OSM_VCA_CLOUDNAME="lxd-cloud"
1283 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1284 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1285 fi
1286 juju_createcontroller
1287 juju_createproxy
1288 fi
1289 else
1290 OSM_VCA_CLOUDNAME="lxd-cloud"
1291 if [ -n "$LXD_CLOUD_FILE" ]; then
1292 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1293 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1294 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1295 else
1296 mkdir -p ~/.osm
1297 cat << EOF > ~/.osm/lxd-cloud.yaml
1298 clouds:
1299 lxd-cloud:
1300 type: lxd
1301 auth-types: [certificate]
1302 endpoint: "https://$DEFAULT_IP:8443"
1303 config:
1304 ssl-hostname-verification: false
1305 EOF
1306 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1307 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1308 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1309 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1310 cat << EOF > ~/.osm/lxd-credentials.yaml
1311 credentials:
1312 lxd-cloud:
1313 lxd-cloud:
1314 auth-type: certificate
1315 server-cert: |
1316 $server_cert
1317 client-cert: |
1318 $client_cert
1319 client-key: |
1320 $client_key
1321 EOF
1322 lxc config trust add local: ~/.osm/client.crt
1323 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1324 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1325 fi
1326 fi
1327 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1328 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1329 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1330 fi
1331 track juju_controller
1332
1333 if [ -z "$OSM_VCA_SECRET" ]; then
1334 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1335 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1336 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1337 fi
1338 if [ -z "$OSM_VCA_PUBKEY" ]; then
1339 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1340 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1341 fi
1342 if [ -z "$OSM_VCA_CACERT" ]; then
1343 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1344 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1345 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1346 fi
1347
1348 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1349 if [ -z "$KUBERNETES" ]; then
1350 if [ -z "$OSM_VCA_APIPROXY" ]; then
1351 OSM_VCA_APIPROXY=$DEFAULT_IP
1352 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1353 fi
1354 juju_createproxy
1355 fi
1356 track juju
1357
1358 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1359 OSM_DATABASE_COMMONKEY=$(generate_secret)
1360 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1361 fi
1362
1363 # Deploy OSM services
1364 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1365 track docker_build
1366
1367 if [ -n "$KUBERNETES" ]; then
1368 generate_k8s_manifest_files
1369 else
1370 generate_docker_compose_files
1371 fi
1372 track manifest_files
1373 generate_prometheus_grafana_files
1374 generate_docker_env_files
1375 track env_files
1376
1377 if [ -n "$KUBERNETES" ]; then
1378 deploy_charmed_services
1379 kube_secrets
1380 update_manifest_files
1381 namespace_vol
1382 deploy_osm_services
1383 if [ -n "$INSTALL_PLA"]; then
1384 # optional PLA install
1385 deploy_osm_pla_service
1386 track deploy_osm_pla
1387 fi
1388 track deploy_osm_services_k8s
1389 install_helm
1390 track install_helm
1391 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1392 # install OSM MONITORING
1393 install_k8s_monitoring
1394 track install_k8s_monitoring
1395 fi
1396 else
1397 # remove old stack
1398 remove_stack $OSM_STACK_NAME
1399 create_docker_network
1400 deploy_lightweight
1401 generate_osmclient_script
1402 track docker_deploy
1403 install_prometheus_nodeexporter
1404 track nodeexporter
1405 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1406 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1407 fi
1408
1409 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1410 track osmclient
1411
1412 echo -e "Checking OSM health state..."
1413 if [ -n "$KUBERNETES" ]; then
1414 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1415 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1416 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1417 track osm_unhealthy
1418 else
1419 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1420 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1421 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1422 track osm_unhealthy
1423 fi
1424 track after_healthcheck
1425
1426 [ -n "$KUBERNETES" ] && add_local_k8scluster
1427 track add_local_k8scluster
1428
1429 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1430 track end
1431 return 0
1432 }
1433
1434 function install_to_openstack() {
1435
1436 if [ -z "$2" ]; then
1437 FATAL "OpenStack installer requires a valid external network name"
1438 fi
1439
1440 # Install Pip for Python3
1441 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1442 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1443
1444 # Create a venv to avoid conflicts with the host installation
1445 python3 -m venv $OPENSTACK_PYTHON_VENV
1446
1447 source $OPENSTACK_PYTHON_VENV/bin/activate
1448
1449 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1450 python -m pip install -U wheel
1451 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1452
1453 # Install the Openstack cloud module (ansible>=2.10)
1454 ansible-galaxy collection install openstack.cloud
1455
1456 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1457
1458 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1459
1460 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1461
1462 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1463 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1464 fi
1465
1466 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1467 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1468 fi
1469
1470 # Execute the Ansible playbook based on openrc or clouds.yaml
1471 if [ -e "$1" ]; then
1472 . $1
1473 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1474 $OSM_DEVOPS/installers/openstack/site.yml
1475 else
1476 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1477 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1478 fi
1479
1480 # Exit from venv
1481 deactivate
1482
1483 return 0
1484 }
1485
1486 function install_vimemu() {
1487 echo "\nInstalling vim-emu"
1488 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1489 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1490 # install prerequisites (OVS is a must for the emulator to work)
1491 sudo apt-get install openvswitch-switch
1492 # clone vim-emu repository (attention: branch is currently master only)
1493 echo "Cloning vim-emu repository ..."
1494 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1495 # build vim-emu docker
1496 echo "Building vim-emu Docker container..."
1497
1498 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1499 # start vim-emu container as daemon
1500 echo "Starting vim-emu Docker container 'vim-emu' ..."
1501 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1502 # in lightweight mode, the emulator needs to be attached to netOSM
1503 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1504 else
1505 # classic build mode
1506 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1507 fi
1508 echo "Waiting for 'vim-emu' container to start ..."
1509 sleep 5
1510 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1511 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1512 # print vim-emu connection info
1513 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1514 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1515 echo -e "To add the emulated VIM to OSM you should do:"
1516 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1517 }
1518
1519 function install_k8s_monitoring() {
1520 # install OSM monitoring
1521 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1522 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1523 }
1524
1525 function uninstall_k8s_monitoring() {
1526 # uninstall OSM monitoring
1527 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1528 }
1529
1530 function dump_vars(){
1531 echo "DEVELOP=$DEVELOP"
1532 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1533 echo "UNINSTALL=$UNINSTALL"
1534 echo "UPDATE=$UPDATE"
1535 echo "RECONFIGURE=$RECONFIGURE"
1536 echo "TEST_INSTALLER=$TEST_INSTALLER"
1537 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1538 echo "INSTALL_PLA=$INSTALL_PLA"
1539 echo "INSTALL_LXD=$INSTALL_LXD"
1540 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1541 echo "INSTALL_ONLY=$INSTALL_ONLY"
1542 echo "INSTALL_ELK=$INSTALL_ELK"
1543 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1544 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1545 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1546 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1547 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1548 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1549 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1550 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1551 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1552 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1553 echo "TO_REBUILD=$TO_REBUILD"
1554 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1555 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1556 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1557 echo "RELEASE=$RELEASE"
1558 echo "REPOSITORY=$REPOSITORY"
1559 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1560 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1561 echo "OSM_DEVOPS=$OSM_DEVOPS"
1562 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1563 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1564 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1565 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1566 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1567 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1568 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1569 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1570 echo "DOCKER_USER=$DOCKER_USER"
1571 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1572 echo "PULL_IMAGES=$PULL_IMAGES"
1573 echo "KUBERNETES=$KUBERNETES"
1574 echo "NGUI=$NGUI"
1575 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1576 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1577 echo "SHOWOPTS=$SHOWOPTS"
1578 echo "Install from specific refspec (-b): $COMMIT_ID"
1579 }
1580
1581 function track(){
1582 ctime=`date +%s`
1583 duration=$((ctime - SESSION_ID))
1584 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1585 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1586 event_name="bin"
1587 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1588 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1589 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1590 event_name="${event_name}_$1"
1591 url="${url}&event=${event_name}&ce_duration=${duration}"
1592 wget -q -O /dev/null $url
1593 }
1594
1595 function parse_docker_registry_url() {
1596 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1597 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1598 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1599 }
1600
1601 JUJU_AGENT_VERSION=2.8.6
1602 UNINSTALL=""
1603 DEVELOP=""
1604 UPDATE=""
1605 RECONFIGURE=""
1606 TEST_INSTALLER=""
1607 INSTALL_LXD=""
1608 SHOWOPTS=""
1609 COMMIT_ID=""
1610 ASSUME_YES=""
1611 INSTALL_FROM_SOURCE=""
1612 RELEASE="ReleaseNINE"
1613 REPOSITORY="stable"
1614 INSTALL_VIMEMU=""
1615 INSTALL_PLA=""
1616 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1617 LXD_REPOSITORY_PATH=""
1618 INSTALL_LIGHTWEIGHT="y"
1619 INSTALL_TO_OPENSTACK=""
1620 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1621 OPENSTACK_PUBLIC_NET_NAME=""
1622 OPENSTACK_ATTACH_VOLUME="false"
1623 OPENSTACK_SSH_KEY_FILE=""
1624 OPENSTACK_USERDATA_FILE=""
1625 OPENSTACK_VM_NAME="server-osm"
1626 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1627 INSTALL_ONLY=""
1628 INSTALL_ELK=""
1629 TO_REBUILD=""
1630 INSTALL_NOLXD=""
1631 INSTALL_NODOCKER=""
1632 INSTALL_NOJUJU=""
1633 KUBERNETES="y"
1634 NGUI="y"
1635 INSTALL_K8S_MONITOR=""
1636 INSTALL_NOHOSTCLIENT=""
1637 INSTALL_NOCACHELXDIMAGES=""
1638 SESSION_ID=`date +%s`
1639 OSM_DEVOPS=
1640 OSM_VCA_HOST=
1641 OSM_VCA_SECRET=
1642 OSM_VCA_PUBKEY=
1643 OSM_VCA_CLOUDNAME="localhost"
1644 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1645 OSM_STACK_NAME=osm
1646 NO_HOST_PORTS=""
1647 DOCKER_NOBUILD=""
1648 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1649 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1650 WORKDIR_SUDO=sudo
1651 OSM_WORK_DIR="/etc/osm"
1652 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1653 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1654 OSM_HOST_VOL="/var/lib/osm"
1655 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1656 OSM_DOCKER_TAG=latest
1657 DOCKER_USER=opensourcemano
1658 PULL_IMAGES="y"
1659 KAFKA_TAG=2.11-1.0.2
1660 PROMETHEUS_TAG=v2.4.3
1661 GRAFANA_TAG=latest
1662 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1663 PROMETHEUS_CADVISOR_TAG=latest
1664 KEYSTONEDB_TAG=10
1665 OSM_DATABASE_COMMONKEY=
1666 ELASTIC_VERSION=6.4.2
1667 ELASTIC_CURATOR_VERSION=5.5.4
1668 POD_NETWORK_CIDR=10.244.0.0/16
1669 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1670 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1671 DOCKER_REGISTRY_URL=
1672 DOCKER_PROXY_URL=
1673 MODULE_DOCKER_TAG=
1674
1675 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1676 case "${o}" in
1677 b)
1678 COMMIT_ID=${OPTARG}
1679 PULL_IMAGES=""
1680 ;;
1681 r)
1682 REPOSITORY="${OPTARG}"
1683 REPO_ARGS+=(-r "$REPOSITORY")
1684 ;;
1685 c)
1686 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1687 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1688 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1689 usage && exit 1
1690 ;;
1691 n)
1692 [ "${OPTARG}" == "lwui" ] && NGUI="" && REPO_ARGS+=(-n "${OPTARG}") && continue
1693 [ "${OPTARG}" == "ngui" ] && continue
1694 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1695 usage && exit 1
1696 ;;
1697 k)
1698 REPOSITORY_KEY="${OPTARG}"
1699 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1700 ;;
1701 u)
1702 REPOSITORY_BASE="${OPTARG}"
1703 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1704 ;;
1705 R)
1706 RELEASE="${OPTARG}"
1707 REPO_ARGS+=(-R "$RELEASE")
1708 ;;
1709 D)
1710 OSM_DEVOPS="${OPTARG}"
1711 ;;
1712 o)
1713 INSTALL_ONLY="y"
1714 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1715 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1716 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1717 ;;
1718 O)
1719 INSTALL_TO_OPENSTACK="y"
1720 if [ -n "${OPTARG}" ]; then
1721 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1722 else
1723 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1724 usage && exit 1
1725 fi
1726 ;;
1727 f)
1728 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1729 ;;
1730 F)
1731 OPENSTACK_USERDATA_FILE="${OPTARG}"
1732 ;;
1733 N)
1734 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1735 ;;
1736 m)
1737 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1738 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1739 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1740 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1741 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1742 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1743 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1744 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1745 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1746 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1747 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1748 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1749 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1750 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1751 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1752 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1753 ;;
1754 H)
1755 OSM_VCA_HOST="${OPTARG}"
1756 ;;
1757 S)
1758 OSM_VCA_SECRET="${OPTARG}"
1759 ;;
1760 s)
1761 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1762 ;;
1763 w)
1764 # when specifying workdir, do not use sudo for access
1765 WORKDIR_SUDO=
1766 OSM_WORK_DIR="${OPTARG}"
1767 ;;
1768 t)
1769 OSM_DOCKER_TAG="${OPTARG}"
1770 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1771 ;;
1772 U)
1773 DOCKER_USER="${OPTARG}"
1774 ;;
1775 P)
1776 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1777 ;;
1778 A)
1779 OSM_VCA_APIPROXY="${OPTARG}"
1780 ;;
1781 l)
1782 LXD_CLOUD_FILE="${OPTARG}"
1783 ;;
1784 L)
1785 LXD_CRED_FILE="${OPTARG}"
1786 ;;
1787 K)
1788 CONTROLLER_NAME="${OPTARG}"
1789 ;;
1790 d)
1791 DOCKER_REGISTRY_URL="${OPTARG}"
1792 ;;
1793 p)
1794 DOCKER_PROXY_URL="${OPTARG}"
1795 ;;
1796 T)
1797 MODULE_DOCKER_TAG="${OPTARG}"
1798 ;;
1799 -)
1800 [ "${OPTARG}" == "help" ] && usage && exit 0
1801 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1802 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1803 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1804 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1805 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1806 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1807 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1808 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1809 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1810 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1811 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1812 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1813 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1814 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1815 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1816 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1817 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1818 [ "${OPTARG}" == "pullimages" ] && continue
1819 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1820 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1821 [ "${OPTARG}" == "bundle" ] && continue
1822 [ "${OPTARG}" == "k8s" ] && continue
1823 [ "${OPTARG}" == "lxd" ] && continue
1824 [ "${OPTARG}" == "lxd-cred" ] && continue
1825 [ "${OPTARG}" == "microstack" ] && continue
1826 [ "${OPTARG}" == "vca" ] && continue
1827 [ "${OPTARG}" == "ha" ] && continue
1828 [ "${OPTARG}" == "tag" ] && continue
1829 [ "${OPTARG}" == "registry" ] && continue
1830 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1831 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1832 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue
1833 echo -e "Invalid option: '--$OPTARG'\n" >&2
1834 usage && exit 1
1835 ;;
1836 :)
1837 echo "Option -$OPTARG requires an argument" >&2
1838 usage && exit 1
1839 ;;
1840 \?)
1841 echo -e "Invalid option: '-$OPTARG'\n" >&2
1842 usage && exit 1
1843 ;;
1844 h)
1845 usage && exit 0
1846 ;;
1847 y)
1848 ASSUME_YES="y"
1849 ;;
1850 *)
1851 usage && exit 1
1852 ;;
1853 esac
1854 done
1855
1856 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1857 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1858 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1859
1860 if [ -n "$SHOWOPTS" ]; then
1861 dump_vars
1862 exit 0
1863 fi
1864
1865 if [ -n "$CHARMED" ]; then
1866 if [ -n "$UNINSTALL" ]; then
1867 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1868 else
1869 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1870 fi
1871
1872 exit 0
1873 fi
1874
1875 # if develop, we force master
1876 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1877
1878 need_packages="git wget curl tar"
1879
1880 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1881
1882 echo -e "Checking required packages: $need_packages"
1883 dpkg -l $need_packages &>/dev/null \
1884 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1885 || sudo apt-get update \
1886 || FATAL "failed to run apt-get update"
1887 dpkg -l $need_packages &>/dev/null \
1888 || ! echo -e "Installing $need_packages requires root privileges." \
1889 || sudo apt-get install -y $need_packages \
1890 || FATAL "failed to install $need_packages"
1891 sudo snap install jq
1892 if [ -z "$OSM_DEVOPS" ]; then
1893 if [ -n "$TEST_INSTALLER" ]; then
1894 echo -e "\nUsing local devops repo for OSM installation"
1895 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1896 else
1897 echo -e "\nCreating temporary dir for OSM installation"
1898 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1899 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1900
1901 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1902
1903 if [ -z "$COMMIT_ID" ]; then
1904 echo -e "\nGuessing the current stable release"
1905 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1906 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1907
1908 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1909 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1910 else
1911 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1912 fi
1913 git -C $OSM_DEVOPS checkout $COMMIT_ID
1914 fi
1915 fi
1916
1917 . $OSM_DEVOPS/common/all_funcs
1918
1919 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1920 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1921 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1922 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1923 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1924 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1925 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1926 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1927
1928 #Installation starts here
1929 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1930 track start
1931
1932 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1933 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1934 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1935 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1936 fi
1937
1938 echo -e "Checking required packages: lxd"
1939 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1940 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1941
1942 # use local devops for containers
1943 export OSM_USE_LOCAL_DEVOPS=true
1944
1945 #Install osmclient
1946
1947 #Install vim-emu (optional)
1948 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1949
1950 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1951 track end
1952 echo -e "\nDONE"