Install pip requirements as user
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83
84 }
85
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password {
90 password_file="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name=$1
92 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller=$controller_name '{
97 indent = length($1)/2;
98 vname[indent] = $2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
103 printf("%s",$3);
104 }
105 }
106 }'
107 }
108
109 function generate_secret() {
110 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
111 }
112
113 function remove_volumes() {
114 if [ -n "$KUBERNETES" ]; then
115 k8_volume=$1
116 echo "Removing ${k8_volume}"
117 $WORKDIR_SUDO rm -rf ${k8_volume}
118 else
119 stack=$1
120 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
121 for volume in $volumes; do
122 sg docker -c "docker volume rm ${stack}_${volume}"
123 done
124 fi
125 }
126
127 function remove_network() {
128 stack=$1
129 sg docker -c "docker network rm net${stack}"
130 }
131
132 function remove_iptables() {
133 stack=$1
134 if [ -z "$OSM_VCA_HOST" ]; then
135 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
136 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
137 fi
138
139 if [ -z "$DEFAULT_IP" ]; then
140 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
143 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
144 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
145 fi
146
147 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
148 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
149 sudo netfilter-persistent save
150 fi
151 }
152
153 function remove_stack() {
154 stack=$1
155 if sg docker -c "docker stack ps ${stack}" ; then
156 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
157 COUNTER=0
158 result=1
159 while [ ${COUNTER} -lt 30 ]; do
160 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
161 #echo "Dockers running: $result"
162 if [ "${result}" == "0" ]; then
163 break
164 fi
165 let COUNTER=COUNTER+1
166 sleep 1
167 done
168 if [ "${result}" == "0" ]; then
169 echo "All dockers of the stack ${stack} were removed"
170 else
171 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 fi
173 sleep 5
174 fi
175 }
176
177 #removes osm deployments and services
178 function remove_k8s_namespace() {
179 kubectl delete ns $1
180 }
181
182 #removes helm only if there is nothing deployed in helm
183 function remove_helm() {
184 if [ "$(helm ls -q)" == "" ] ; then
185 sudo helm reset --force
186 kubectl delete --namespace kube-system serviceaccount tiller
187 kubectl delete clusterrolebinding tiller-cluster-rule
188 sudo rm /usr/local/bin/helm
189 rm -rf $HOME/.helm
190 fi
191 }
192
193 function remove_crontab_job() {
194 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
195 }
196
197 #Uninstall osmclient
198 function uninstall_osmclient() {
199 sudo apt-get remove --purge -y python-osmclient
200 sudo apt-get remove --purge -y python3-osmclient
201 }
202
203 #Uninstall lightweight OSM: remove dockers
204 function uninstall_lightweight() {
205 if [ -n "$INSTALL_ONLY" ]; then
206 if [ -n "$INSTALL_ELK" ]; then
207 echo -e "\nUninstalling OSM ELK stack"
208 remove_stack osm_elk
209 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
210 fi
211 else
212 echo -e "\nUninstalling OSM"
213 if [ -n "$KUBERNETES" ]; then
214 if [ -n "$INSTALL_K8S_MONITOR" ]; then
215 # uninstall OSM MONITORING
216 uninstall_k8s_monitoring
217 fi
218 remove_k8s_namespace $OSM_STACK_NAME
219 else
220 remove_stack $OSM_STACK_NAME
221 remove_stack osm_elk
222 fi
223 echo "Now osm docker images and volumes will be deleted"
224 # TODO: clean-up of images should take into account if other tags were used for specific modules
225 newgrp docker << EONG
226 for module in ro lcm keystone nbi mon pol pla osmclient; do
227 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
228 done
229 EONG
230
231 if [ -n "$NGUI" ]; then
232 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
233 else
234 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
235 fi
236
237 if [ -n "$KUBERNETES" ]; then
238 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
239 remove_volumes $OSM_NAMESPACE_VOL
240 else
241 remove_volumes $OSM_STACK_NAME
242 remove_network $OSM_STACK_NAME
243 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
244 fi
245 echo "Removing $OSM_DOCKER_WORK_DIR"
246 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
247 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
248 fi
249 remove_crontab_job
250
251 # Cleanup Openstack installer venv
252 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
253 rm -r $OPENSTACK_PYTHON_VENV
254 fi
255
256 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
257 echo "Some docker images will be kept in case they are used by other docker stacks"
258 echo "To remove them, just run 'docker image prune' in a terminal"
259 return 0
260 }
261
262 #Safe unattended install of iptables-persistent
263 function check_install_iptables_persistent(){
264 echo -e "\nChecking required packages: iptables-persistent"
265 if ! dpkg -l iptables-persistent &>/dev/null; then
266 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
267 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
268 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
269 sudo apt-get -yq install iptables-persistent
270 fi
271 }
272
273 #Configure NAT rules, based on the current IP addresses of containers
274 function nat(){
275 check_install_iptables_persistent
276
277 echo -e "\nConfiguring NAT rules"
278 echo -e " Required root privileges"
279 sudo $OSM_DEVOPS/installers/nat_osm
280 }
281
282 function FATAL(){
283 echo "FATAL error: Cannot install OSM due to \"$1\""
284 exit 1
285 }
286
287 function update_juju_images(){
288 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
289 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
290 }
291
292 function install_lxd() {
293 # Apply sysctl production values for optimal performance
294 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
295 sudo sysctl --system
296
297 # Install LXD snap
298 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
299 sudo snap install lxd
300
301 # Configure LXD
302 sudo usermod -a -G lxd `whoami`
303 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
304 sg lxd -c "lxd waitready"
305 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
306 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
307 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
308 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
309 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
310 #sudo systemctl stop lxd-bridge
311 #sudo systemctl --system daemon-reload
312 #sudo systemctl enable lxd-bridge
313 #sudo systemctl start lxd-bridge
314 }
315
316 function ask_user(){
317 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
318 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
319 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
320 read -e -p "$1" USER_CONFIRMATION
321 while true ; do
322 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
323 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
324 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
325 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
326 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
327 done
328 }
329
330 function install_osmclient(){
331 CLIENT_RELEASE=${RELEASE#"-R "}
332 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
333 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
334 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
335 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
336 curl $key_location | sudo apt-key add -
337 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
338 sudo apt-get update
339 sudo apt-get install -y python3-pip
340 sudo -H LC_ALL=C python3 -m pip install -U pip
341 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
342 sudo apt-get install -y python3-osm-im python3-osmclient
343 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
344 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
345 fi
346 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
347 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
348 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
349 fi
350 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
351 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
352 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
353 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
354 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
355 echo -e "\nOSM client installed"
356 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
357 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
358 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
359 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
360 else
361 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
362 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
363 echo " export OSM_HOSTNAME=<OSM_host>"
364 fi
365 return 0
366 }
367
368 function install_prometheus_nodeexporter(){
369 if (systemctl -q is-active node_exporter)
370 then
371 echo "Node Exporter is already running."
372 else
373 echo "Node Exporter is not active, installing..."
374 if getent passwd node_exporter > /dev/null 2>&1; then
375 echo "node_exporter user exists"
376 else
377 echo "Creating user node_exporter"
378 sudo useradd --no-create-home --shell /bin/false node_exporter
379 fi
380 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
381 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
382 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
383 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
384 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
385 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
386 sudo systemctl daemon-reload
387 sudo systemctl restart node_exporter
388 sudo systemctl enable node_exporter
389 echo "Node Exporter has been activated in this host."
390 fi
391 return 0
392 }
393
394 function uninstall_prometheus_nodeexporter(){
395 sudo systemctl stop node_exporter
396 sudo systemctl disable node_exporter
397 sudo rm /etc/systemd/system/node_exporter.service
398 sudo systemctl daemon-reload
399 sudo userdel node_exporter
400 sudo rm /usr/local/bin/node_exporter
401 return 0
402 }
403
404 function install_docker_ce() {
405 # installs and configures Docker CE
406 echo "Installing Docker CE ..."
407 sudo apt-get -qq update
408 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
409 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
410 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
411 sudo apt-get -qq update
412 sudo apt-get install -y docker-ce
413 echo "Adding user to group 'docker'"
414 sudo groupadd -f docker
415 sudo usermod -aG docker $USER
416 sleep 2
417 sudo service docker restart
418 echo "... restarted Docker service"
419 if [ -n "${DOCKER_PROXY_URL}" ]; then
420 echo "Configuring docker proxy ..."
421 if [ -f /etc/docker/daemon.json ]; then
422 if grep -q registry-mirrors /etc/docker/daemon.json; then
423 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
424 else
425 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
426 fi
427 else
428 sudo bash -c "cat << EOF > /etc/docker/daemon.json
429 {
430 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
431 }
432 EOF"
433 fi
434 sudo systemctl daemon-reload
435 sudo service docker restart
436 echo "... restarted Docker service again"
437 fi
438 sg docker -c "docker version" || FATAL "Docker installation failed"
439 echo "... Docker CE installation done"
440 return 0
441 }
442
443 function install_docker_compose() {
444 # installs and configures docker-compose
445 echo "Installing Docker Compose ..."
446 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
447 sudo chmod +x /usr/local/bin/docker-compose
448 echo "... Docker Compose installation done"
449 }
450
451 function install_juju() {
452 echo "Installing juju"
453 sudo snap install juju --classic --channel=2.8/stable
454 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
455 [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images
456 echo "Finished installation of juju"
457 return 0
458 }
459
460 function juju_createcontroller() {
461 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
462 # Not found created, create the controller
463 sudo usermod -a -G lxd ${USER}
464 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
465 fi
466 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
467 juju controller-config features=[k8s-operators]
468 }
469
470 function juju_addk8s() {
471 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
472 }
473
474 function juju_createcontroller_k8s(){
475 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
476 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
477 --config controller-service-type=loadbalancer \
478 --agent-version=$JUJU_AGENT_VERSION
479 }
480
481
482 function juju_addlxd_cloud(){
483 mkdir -p /tmp/.osm
484 OSM_VCA_CLOUDNAME="lxd-cloud"
485 LXDENDPOINT=$DEFAULT_IP
486 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
487 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
488
489 cat << EOF > $LXD_CLOUD
490 clouds:
491 $OSM_VCA_CLOUDNAME:
492 type: lxd
493 auth-types: [certificate]
494 endpoint: "https://$LXDENDPOINT:8443"
495 config:
496 ssl-hostname-verification: false
497 EOF
498 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
499 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
500 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
501 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
502
503 cat << EOF > $LXD_CREDENTIALS
504 credentials:
505 $OSM_VCA_CLOUDNAME:
506 lxd-cloud:
507 auth-type: certificate
508 server-cert: |
509 $server_cert
510 client-cert: |
511 $client_cert
512 client-key: |
513 $client_key
514 EOF
515 lxc config trust add local: /tmp/.osm/client.crt
516 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
517 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
518 sg lxd -c "lxd waitready"
519 juju controller-config features=[k8s-operators]
520 }
521
522
523 function juju_createproxy() {
524 check_install_iptables_persistent
525
526 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
527 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
528 sudo netfilter-persistent save
529 fi
530 }
531
532 function docker_login() {
533 echo "Docker login"
534 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
535 }
536
537 function generate_docker_images() {
538 echo "Pulling and generating docker images"
539 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
540
541 echo "Pulling docker images"
542
543 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
544 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
545 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
546 fi
547
548 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
549 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
550 fi
551
552 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
553 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
554 fi
555
556 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
557 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
558 fi
559
560 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
561 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
562 fi
563
564 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
565 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
566 fi
567
568 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
569 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
570 fi
571
572 if [ -n "$PULL_IMAGES" ]; then
573 echo "Pulling OSM docker images"
574 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient; do
575 module_lower=${module,,}
576 if [ $module == "LW-UI" ]; then
577 if [ -n "$NGUI" ]; then
578 continue
579 else
580 module_lower="light-ui"
581 fi
582 fi
583 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
584 continue
585 fi
586 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
587 continue
588 fi
589 module_tag="${OSM_DOCKER_TAG}"
590 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
591 module_tag="${MODULE_DOCKER_TAG}"
592 fi
593 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
594 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
595 done
596 else
597 _build_from=$COMMIT_ID
598 [ -z "$_build_from" ] && _build_from="latest"
599 echo "OSM Docker images generated from $_build_from"
600
601 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA; do
602 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
603 module_lower=${module,,}
604 if [ $module == "LW-UI" ]; then
605 if [ -n "$NGUI" ]; then
606 continue
607 else
608 module_lower="light-ui"
609 fi
610 fi
611 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
612 continue
613 fi
614 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
615 continue
616 fi
617 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
618 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
619 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
620 fi
621 done
622 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
623 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
624 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
625 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
626 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
627 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
628 fi
629 echo "Finished generation of docker images"
630 fi
631
632 echo "Finished pulling and generating docker images"
633 }
634
635 function cmp_overwrite() {
636 file1="$1"
637 file2="$2"
638 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
639 if [ -f "${file2}" ]; then
640 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
641 else
642 cp -b ${file1} ${file2}
643 fi
644 fi
645 }
646
647 function generate_docker_compose_files() {
648 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
649 if [ -n "$NGUI" ]; then
650 # For NG-UI
651 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
652 else
653 # Docker-compose
654 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
655 fi
656 if [ -n "$INSTALL_PLA" ]; then
657 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
658 fi
659 }
660
661 function generate_k8s_manifest_files() {
662 #Kubernetes resources
663 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
664 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
665 if [ -n "$NGUI" ]; then
666 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
667 else
668 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
669 fi
670 }
671
672 function generate_prometheus_grafana_files() {
673 [ -n "$KUBERNETES" ] && return
674 # Prometheus files
675 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
676 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
677
678 # Grafana files
679 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
680 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
681 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
682 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
683 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
684
685 # Prometheus Exporters files
686 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
687 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
688 }
689
690 function generate_docker_env_files() {
691 echo "Doing a backup of existing env files"
692 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
693 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
694 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
695 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
696 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
697 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
698 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
699 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
700 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
701
702 echo "Generating docker env files"
703 # LCM
704 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
705 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
706 fi
707
708 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
709 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
710 else
711 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
712 fi
713
714 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
715 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
716 else
717 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
718 fi
719
720 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
721 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
722 else
723 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
724 fi
725
726 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
727 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
728 else
729 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
730 fi
731
732 if [ -n "$OSM_VCA_APIPROXY" ]; then
733 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
734 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
735 else
736 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
737 fi
738 fi
739
740 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
741 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
742 fi
743
744 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
745 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
746 fi
747
748 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
749 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
750 else
751 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
752 fi
753
754 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
755 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
756 else
757 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
758 fi
759
760 # RO
761 MYSQL_ROOT_PASSWORD=$(generate_secret)
762 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
763 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
764 fi
765 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
766 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
767 fi
768 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
769 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
770 fi
771
772 # Keystone
773 KEYSTONE_DB_PASSWORD=$(generate_secret)
774 SERVICE_PASSWORD=$(generate_secret)
775 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
776 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
777 fi
778 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
779 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
780 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
781 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
782 fi
783
784 # NBI
785 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
786 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
787 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
788 fi
789
790 # MON
791 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
792 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
793 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
794 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
795 fi
796
797 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
798 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
799 else
800 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
801 fi
802
803 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
804 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
805 else
806 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
807 fi
808
809 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
810 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
811 else
812 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
813 fi
814
815 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
816 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
817 else
818 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
819 fi
820
821
822 # POL
823 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
824 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
825 fi
826
827 # LW-UI
828 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
829 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
830 fi
831
832 echo "Finished generation of docker env files"
833 }
834
835 function generate_osmclient_script () {
836 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
837 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
838 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
839 }
840
841 #installs kubernetes packages
842 function install_kube() {
843 sudo apt-get update && sudo apt-get install -y apt-transport-https
844 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
845 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
846 sudo apt-get update
847 echo "Installing Kubernetes Packages ..."
848 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
849 sudo apt-mark hold kubelet kubeadm kubectl
850 }
851
852 #initializes kubernetes control plane
853 function init_kubeadm() {
854 sudo swapoff -a
855 sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab
856 sudo kubeadm init --config $1
857 sleep 5
858 }
859
860 function kube_config_dir() {
861 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
862 mkdir -p $HOME/.kube
863 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
864 sudo chown $(id -u):$(id -g) $HOME/.kube/config
865 }
866
867 function install_k8s_storageclass() {
868 OPENEBS_DIR="$(mktemp -d -q --tmpdir "openebs.XXXXXX")"
869 trap 'rm -rf "${OPENEBS_DIR}"' EXIT
870 wget -q https://openebs.github.io/charts/openebs-operator-1.6.0.yaml -P $OPENEBS_DIR
871 kubectl apply -f $OPENEBS_DIR
872 local storageclass_timeout=400
873 local counter=0
874 local storageclass_ready=""
875 echo "Waiting for storageclass"
876 while (( counter < storageclass_timeout ))
877 do
878 kubectl get storageclass openebs-hostpath &> /dev/null
879
880 if [ $? -eq 0 ] ; then
881 echo "Storageclass available"
882 storageclass_ready="y"
883 break
884 else
885 counter=$((counter + 15))
886 sleep 15
887 fi
888 done
889 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
890 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
891 }
892
893 function install_k8s_metallb() {
894 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
895 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
896 echo "apiVersion: v1
897 kind: ConfigMap
898 metadata:
899 namespace: metallb-system
900 name: config
901 data:
902 config: |
903 address-pools:
904 - name: default
905 protocol: layer2
906 addresses:
907 - $METALLB_IP_RANGE" | kubectl apply -f -
908 }
909 #deploys flannel as daemonsets
910 function deploy_cni_provider() {
911 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
912 trap 'rm -rf "${CNI_DIR}"' EXIT
913 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
914 kubectl apply -f $CNI_DIR
915 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
916 }
917
918 #creates secrets from env files which will be used by containers
919 function kube_secrets(){
920 kubectl create ns $OSM_STACK_NAME
921 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
922 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
923 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
924 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
925 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
926 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
927 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
928 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
929 }
930
931 #taints K8s master node
932 function taint_master_node() {
933 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
934 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
935 sleep 5
936 }
937
938 #deploys osm pods and services
939 function deploy_osm_services() {
940 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
941 }
942
943 #deploy charmed services
944 function deploy_charmed_services() {
945 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
946 # deploy mongodb charm
947 namespace=$OSM_STACK_NAME
948 juju deploy cs:~charmed-osm/mongodb-k8s \
949 --config enable-sidecar=true \
950 --config replica-set=rs0 \
951 --config namespace=$namespace \
952 -m $namespace
953 }
954
955 function deploy_osm_pla_service() {
956 # corresponding to namespace_vol
957 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
958 # corresponding to deploy_osm_services
959 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
960 }
961
962 #Install helm and tiller
963 function install_helm() {
964 helm > /dev/null 2>&1
965 if [ $? != 0 ] ; then
966 # Helm is not installed. Install helm
967 echo "Helm is not installed, installing ..."
968 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
969 tar -zxvf helm-v2.15.2.tar.gz
970 sudo mv linux-amd64/helm /usr/local/bin/helm
971 rm -r linux-amd64
972 rm helm-v2.15.2.tar.gz
973 fi
974
975 # Checking if tiller has being configured
976 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
977 if [ $? == 1 ] ; then
978 # tiller account for kubernetes
979 kubectl --namespace kube-system create serviceaccount tiller
980 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
981 # HELM initialization
982 helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller
983
984 # Wait for Tiller to be up and running. If timeout expires, continue installing
985 tiller_timeout=120;
986 counter=0;
987 tiller_status=""
988 while (( counter < tiller_timeout ))
989 do
990 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
991 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
992 counter=$((counter + 5))
993 sleep 5
994 done
995 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
996 fi
997 }
998
999 function parse_yaml() {
1000 TAG=$1
1001 shift
1002 services=$@
1003 for module in $services; do
1004 if [ "$module" == "pla" ]; then
1005 if [ -n "$INSTALL_PLA" ]; then
1006 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1007 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
1008 fi
1009 else
1010 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1011 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
1012 fi
1013 done
1014 }
1015
1016 function update_manifest_files() {
1017 if [ -n "$NGUI" ]; then
1018 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1019 else
1020 osm_services="nbi lcm ro pol mon light-ui keystone pla"
1021 fi
1022 list_of_services=""
1023 for module in $osm_services; do
1024 module_upper="${module^^}"
1025 if [ "$module_upper" == "LIGHT-UI" ]; then
1026 module_upper="LW-UI"
1027 fi
1028 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1029 list_of_services="$list_of_services $module"
1030 fi
1031 done
1032 list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
1033 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
1034 parse_yaml $OSM_DOCKER_TAG $list_of_services
1035 fi
1036 if [ -n "$MODULE_DOCKER_TAG" ]; then
1037 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1038 fi
1039 }
1040
1041 function namespace_vol() {
1042 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1043 for osm in $osm_services; do
1044 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1045 done
1046 }
1047
1048 function init_docker_swarm() {
1049 if [ "${DEFAULT_MTU}" != "1500" ]; then
1050 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1051 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1052 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1053 fi
1054 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1055 return 0
1056 }
1057
1058 function create_docker_network() {
1059 echo "creating network"
1060 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1061 echo "creating network DONE"
1062 }
1063
1064 function deploy_lightweight() {
1065
1066 echo "Deploying lightweight build"
1067 OSM_NBI_PORT=9999
1068 OSM_RO_PORT=9090
1069 OSM_KEYSTONE_PORT=5000
1070 OSM_UI_PORT=80
1071 OSM_MON_PORT=8662
1072 OSM_PROM_PORT=9090
1073 OSM_PROM_CADVISOR_PORT=8080
1074 OSM_PROM_HOSTPORT=9091
1075 OSM_GRAFANA_PORT=3000
1076 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1077 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1078
1079 if [ -n "$NO_HOST_PORTS" ]; then
1080 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1081 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1082 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1083 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1084 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1085 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1086 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1087 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1088 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1089 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1090 else
1091 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1092 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1093 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1094 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1095 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1096 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1097 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1098 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1099 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1100 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1101 fi
1102 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1103 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1104 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1105 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1106 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1107 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1108 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1109 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1110 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1111
1112 pushd $OSM_DOCKER_WORK_DIR
1113 if [ -n "$INSTALL_PLA" ]; then
1114 track deploy_osm_pla
1115 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1116 else
1117 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1118 fi
1119 popd
1120
1121 echo "Finished deployment of lightweight build"
1122 }
1123
1124 function deploy_elk() {
1125 echo "Pulling docker images for ELK"
1126 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1127 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1128 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1129 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1130 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1131 echo "Finished pulling elk docker images"
1132 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1133 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1134 remove_stack osm_elk
1135 echo "Deploying ELK stack"
1136 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1137 echo "Waiting for ELK stack to be up and running"
1138 time=0
1139 step=5
1140 timelength=40
1141 elk_is_up=1
1142 while [ $time -le $timelength ]; do
1143 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1144 elk_is_up=0
1145 break
1146 fi
1147 sleep $step
1148 time=$((time+step))
1149 done
1150 if [ $elk_is_up -eq 0 ]; then
1151 echo "ELK is up and running. Trying to create index pattern..."
1152 #Create index pattern
1153 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1154 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1155 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1156 #Make it the default index
1157 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1158 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1159 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1160 else
1161 echo "Cannot connect to Kibana to create index pattern."
1162 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1163 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1164 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1165 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1166 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1167 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1168 -d"{\"value\":\"filebeat-*\"}"'
1169 fi
1170 echo "Finished deployment of ELK stack"
1171 return 0
1172 }
1173
1174 function add_local_k8scluster() {
1175 /usr/bin/osm --all-projects vim-create \
1176 --name _system-osm-vim \
1177 --account_type dummy \
1178 --auth_url http://dummy \
1179 --user osm --password osm --tenant osm \
1180 --description "dummy" \
1181 --config '{management_network_name: mgmt}'
1182 /usr/bin/osm --all-projects k8scluster-add \
1183 --creds ${HOME}/.kube/config \
1184 --vim _system-osm-vim \
1185 --k8s-nets '{"net1": null}' \
1186 --version '1.15' \
1187 --description "OSM Internal Cluster" \
1188 _system-osm-k8s
1189 }
1190
1191 function install_lightweight() {
1192 track checkingroot
1193 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1194 track noroot
1195
1196 if [ -n "$KUBERNETES" ]; then
1197 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1198 1. Install and configure LXD
1199 2. Install juju
1200 3. Install docker CE
1201 4. Disable swap space
1202 5. Install and initialize Kubernetes
1203 as pre-requirements.
1204 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1205
1206 else
1207 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1208 fi
1209 track proceed
1210
1211 echo "Installing lightweight build of OSM"
1212 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1213 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1214 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1215 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1216 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1217 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1218 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1219 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1220
1221 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1222 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1223 need_packages_lw="snapd"
1224 echo -e "Checking required packages: $need_packages_lw"
1225 dpkg -l $need_packages_lw &>/dev/null \
1226 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1227 || sudo apt-get update \
1228 || FATAL "failed to run apt-get update"
1229 dpkg -l $need_packages_lw &>/dev/null \
1230 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1231 || sudo apt-get install -y $need_packages_lw \
1232 || FATAL "failed to install $need_packages_lw"
1233 install_lxd
1234 fi
1235
1236 track prereqok
1237
1238 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1239
1240 echo "Creating folders for installation"
1241 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1242 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1243 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1244
1245 #Installs Kubernetes
1246 if [ -n "$KUBERNETES" ]; then
1247 install_kube
1248 track install_k8s
1249 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1250 kube_config_dir
1251 track init_k8s
1252 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1253 # uninstall OSM MONITORING
1254 uninstall_k8s_monitoring
1255 track uninstall_k8s_monitoring
1256 fi
1257 #remove old namespace
1258 remove_k8s_namespace $OSM_STACK_NAME
1259 deploy_cni_provider
1260 taint_master_node
1261 install_k8s_storageclass
1262 track k8s_storageclass
1263 install_k8s_metallb
1264 track k8s_metallb
1265 else
1266 #install_docker_compose
1267 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1268 track docker_swarm
1269 fi
1270
1271 [ -z "$INSTALL_NOJUJU" ] && install_juju
1272 track juju_install
1273
1274 if [ -z "$OSM_VCA_HOST" ]; then
1275 if [ -z "$CONTROLLER_NAME" ]; then
1276
1277 if [ -n "$KUBERNETES" ]; then
1278 juju_createcontroller_k8s
1279 juju_addlxd_cloud
1280 else
1281 if [ -n "$LXD_CLOUD_FILE" ]; then
1282 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1283 OSM_VCA_CLOUDNAME="lxd-cloud"
1284 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1285 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1286 fi
1287 juju_createcontroller
1288 juju_createproxy
1289 fi
1290 else
1291 OSM_VCA_CLOUDNAME="lxd-cloud"
1292 if [ -n "$LXD_CLOUD_FILE" ]; then
1293 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1294 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1295 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1296 else
1297 mkdir -p ~/.osm
1298 cat << EOF > ~/.osm/lxd-cloud.yaml
1299 clouds:
1300 lxd-cloud:
1301 type: lxd
1302 auth-types: [certificate]
1303 endpoint: "https://$DEFAULT_IP:8443"
1304 config:
1305 ssl-hostname-verification: false
1306 EOF
1307 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1308 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1309 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1310 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1311 cat << EOF > ~/.osm/lxd-credentials.yaml
1312 credentials:
1313 lxd-cloud:
1314 lxd-cloud:
1315 auth-type: certificate
1316 server-cert: |
1317 $server_cert
1318 client-cert: |
1319 $client_cert
1320 client-key: |
1321 $client_key
1322 EOF
1323 lxc config trust add local: ~/.osm/client.crt
1324 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1325 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1326 fi
1327 fi
1328 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1329 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1330 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1331 fi
1332 track juju_controller
1333
1334 if [ -z "$OSM_VCA_SECRET" ]; then
1335 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1336 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1337 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1338 fi
1339 if [ -z "$OSM_VCA_PUBKEY" ]; then
1340 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1341 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1342 fi
1343 if [ -z "$OSM_VCA_CACERT" ]; then
1344 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1345 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1346 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1347 fi
1348
1349 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1350 if [ -z "$KUBERNETES" ]; then
1351 if [ -z "$OSM_VCA_APIPROXY" ]; then
1352 OSM_VCA_APIPROXY=$DEFAULT_IP
1353 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1354 fi
1355 juju_createproxy
1356 fi
1357 track juju
1358
1359 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1360 OSM_DATABASE_COMMONKEY=$(generate_secret)
1361 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1362 fi
1363
1364 # Deploy OSM services
1365 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1366 track docker_build
1367
1368 if [ -n "$KUBERNETES" ]; then
1369 generate_k8s_manifest_files
1370 else
1371 generate_docker_compose_files
1372 fi
1373 track manifest_files
1374 generate_prometheus_grafana_files
1375 generate_docker_env_files
1376 track env_files
1377
1378 if [ -n "$KUBERNETES" ]; then
1379 deploy_charmed_services
1380 kube_secrets
1381 update_manifest_files
1382 namespace_vol
1383 deploy_osm_services
1384 if [ -n "$INSTALL_PLA"]; then
1385 # optional PLA install
1386 deploy_osm_pla_service
1387 track deploy_osm_pla
1388 fi
1389 track deploy_osm_services_k8s
1390 install_helm
1391 track install_helm
1392 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1393 # install OSM MONITORING
1394 install_k8s_monitoring
1395 track install_k8s_monitoring
1396 fi
1397 else
1398 # remove old stack
1399 remove_stack $OSM_STACK_NAME
1400 create_docker_network
1401 deploy_lightweight
1402 generate_osmclient_script
1403 track docker_deploy
1404 install_prometheus_nodeexporter
1405 track nodeexporter
1406 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1407 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1408 fi
1409
1410 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1411 track osmclient
1412
1413 echo -e "Checking OSM health state..."
1414 if [ -n "$KUBERNETES" ]; then
1415 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1416 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1417 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1418 track osm_unhealthy
1419 else
1420 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1421 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1422 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1423 track osm_unhealthy
1424 fi
1425 track after_healthcheck
1426
1427 [ -n "$KUBERNETES" ] && add_local_k8scluster
1428 track add_local_k8scluster
1429
1430 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1431 track end
1432 return 0
1433 }
1434
1435 function install_to_openstack() {
1436
1437 if [ -z "$2" ]; then
1438 FATAL "OpenStack installer requires a valid external network name"
1439 fi
1440
1441 # Install Pip for Python3
1442 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1443 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1444
1445 # Create a venv to avoid conflicts with the host installation
1446 python3 -m venv $OPENSTACK_PYTHON_VENV
1447
1448 source $OPENSTACK_PYTHON_VENV/bin/activate
1449
1450 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1451 python -m pip install -U wheel
1452 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1453
1454 # Install the Openstack cloud module (ansible>=2.10)
1455 ansible-galaxy collection install openstack.cloud
1456
1457 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1458
1459 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1460
1461 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1462
1463 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1464 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1465 fi
1466
1467 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1468 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1469 fi
1470
1471 # Execute the Ansible playbook based on openrc or clouds.yaml
1472 if [ -e "$1" ]; then
1473 . $1
1474 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1475 $OSM_DEVOPS/installers/openstack/site.yml
1476 else
1477 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1478 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1479 fi
1480
1481 # Exit from venv
1482 deactivate
1483
1484 return 0
1485 }
1486
1487 function install_vimemu() {
1488 echo "\nInstalling vim-emu"
1489 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1490 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1491 # install prerequisites (OVS is a must for the emulator to work)
1492 sudo apt-get install openvswitch-switch
1493 # clone vim-emu repository (attention: branch is currently master only)
1494 echo "Cloning vim-emu repository ..."
1495 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1496 # build vim-emu docker
1497 echo "Building vim-emu Docker container..."
1498
1499 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1500 # start vim-emu container as daemon
1501 echo "Starting vim-emu Docker container 'vim-emu' ..."
1502 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1503 # in lightweight mode, the emulator needs to be attached to netOSM
1504 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1505 else
1506 # classic build mode
1507 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1508 fi
1509 echo "Waiting for 'vim-emu' container to start ..."
1510 sleep 5
1511 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1512 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1513 # print vim-emu connection info
1514 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1515 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1516 echo -e "To add the emulated VIM to OSM you should do:"
1517 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1518 }
1519
1520 function install_k8s_monitoring() {
1521 # install OSM monitoring
1522 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1523 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1524 }
1525
1526 function uninstall_k8s_monitoring() {
1527 # uninstall OSM monitoring
1528 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1529 }
1530
1531 function dump_vars(){
1532 echo "DEVELOP=$DEVELOP"
1533 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1534 echo "UNINSTALL=$UNINSTALL"
1535 echo "UPDATE=$UPDATE"
1536 echo "RECONFIGURE=$RECONFIGURE"
1537 echo "TEST_INSTALLER=$TEST_INSTALLER"
1538 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1539 echo "INSTALL_PLA=$INSTALL_PLA"
1540 echo "INSTALL_LXD=$INSTALL_LXD"
1541 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1542 echo "INSTALL_ONLY=$INSTALL_ONLY"
1543 echo "INSTALL_ELK=$INSTALL_ELK"
1544 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1545 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1546 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1547 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1548 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1549 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1550 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1551 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1552 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1553 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1554 echo "TO_REBUILD=$TO_REBUILD"
1555 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1556 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1557 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1558 echo "RELEASE=$RELEASE"
1559 echo "REPOSITORY=$REPOSITORY"
1560 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1561 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1562 echo "OSM_DEVOPS=$OSM_DEVOPS"
1563 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1564 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1565 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1566 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1567 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1568 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1569 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1570 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1571 echo "DOCKER_USER=$DOCKER_USER"
1572 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1573 echo "PULL_IMAGES=$PULL_IMAGES"
1574 echo "KUBERNETES=$KUBERNETES"
1575 echo "NGUI=$NGUI"
1576 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1577 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1578 echo "SHOWOPTS=$SHOWOPTS"
1579 echo "Install from specific refspec (-b): $COMMIT_ID"
1580 }
1581
1582 function track(){
1583 ctime=`date +%s`
1584 duration=$((ctime - SESSION_ID))
1585 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1586 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1587 event_name="bin"
1588 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1589 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1590 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1591 event_name="${event_name}_$1"
1592 url="${url}&event=${event_name}&ce_duration=${duration}"
1593 wget -q -O /dev/null $url
1594 }
1595
1596 function parse_docker_registry_url() {
1597 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1598 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1599 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1600 }
1601
1602 JUJU_AGENT_VERSION=2.8.6
1603 UNINSTALL=""
1604 DEVELOP=""
1605 UPDATE=""
1606 RECONFIGURE=""
1607 TEST_INSTALLER=""
1608 INSTALL_LXD=""
1609 SHOWOPTS=""
1610 COMMIT_ID=""
1611 ASSUME_YES=""
1612 INSTALL_FROM_SOURCE=""
1613 RELEASE="ReleaseNINE"
1614 REPOSITORY="stable"
1615 INSTALL_VIMEMU=""
1616 INSTALL_PLA=""
1617 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1618 LXD_REPOSITORY_PATH=""
1619 INSTALL_LIGHTWEIGHT="y"
1620 INSTALL_TO_OPENSTACK=""
1621 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1622 OPENSTACK_PUBLIC_NET_NAME=""
1623 OPENSTACK_ATTACH_VOLUME="false"
1624 OPENSTACK_SSH_KEY_FILE=""
1625 OPENSTACK_USERDATA_FILE=""
1626 OPENSTACK_VM_NAME="server-osm"
1627 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1628 INSTALL_ONLY=""
1629 INSTALL_ELK=""
1630 TO_REBUILD=""
1631 INSTALL_NOLXD=""
1632 INSTALL_NODOCKER=""
1633 INSTALL_NOJUJU=""
1634 KUBERNETES="y"
1635 NGUI="y"
1636 INSTALL_K8S_MONITOR=""
1637 INSTALL_NOHOSTCLIENT=""
1638 INSTALL_NOCACHELXDIMAGES=""
1639 SESSION_ID=`date +%s`
1640 OSM_DEVOPS=
1641 OSM_VCA_HOST=
1642 OSM_VCA_SECRET=
1643 OSM_VCA_PUBKEY=
1644 OSM_VCA_CLOUDNAME="localhost"
1645 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1646 OSM_STACK_NAME=osm
1647 NO_HOST_PORTS=""
1648 DOCKER_NOBUILD=""
1649 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1650 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1651 WORKDIR_SUDO=sudo
1652 OSM_WORK_DIR="/etc/osm"
1653 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1654 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1655 OSM_HOST_VOL="/var/lib/osm"
1656 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1657 OSM_DOCKER_TAG=latest
1658 DOCKER_USER=opensourcemano
1659 PULL_IMAGES="y"
1660 KAFKA_TAG=2.11-1.0.2
1661 PROMETHEUS_TAG=v2.4.3
1662 GRAFANA_TAG=latest
1663 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1664 PROMETHEUS_CADVISOR_TAG=latest
1665 KEYSTONEDB_TAG=10
1666 OSM_DATABASE_COMMONKEY=
1667 ELASTIC_VERSION=6.4.2
1668 ELASTIC_CURATOR_VERSION=5.5.4
1669 POD_NETWORK_CIDR=10.244.0.0/16
1670 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1671 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1672 DOCKER_REGISTRY_URL=
1673 DOCKER_PROXY_URL=
1674 MODULE_DOCKER_TAG=
1675
1676 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1677 case "${o}" in
1678 b)
1679 COMMIT_ID=${OPTARG}
1680 PULL_IMAGES=""
1681 ;;
1682 r)
1683 REPOSITORY="${OPTARG}"
1684 REPO_ARGS+=(-r "$REPOSITORY")
1685 ;;
1686 c)
1687 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1688 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1689 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1690 usage && exit 1
1691 ;;
1692 n)
1693 [ "${OPTARG}" == "lwui" ] && NGUI="" && REPO_ARGS+=(-n "${OPTARG}") && continue
1694 [ "${OPTARG}" == "ngui" ] && continue
1695 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1696 usage && exit 1
1697 ;;
1698 k)
1699 REPOSITORY_KEY="${OPTARG}"
1700 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1701 ;;
1702 u)
1703 REPOSITORY_BASE="${OPTARG}"
1704 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1705 ;;
1706 R)
1707 RELEASE="${OPTARG}"
1708 REPO_ARGS+=(-R "$RELEASE")
1709 ;;
1710 D)
1711 OSM_DEVOPS="${OPTARG}"
1712 ;;
1713 o)
1714 INSTALL_ONLY="y"
1715 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1716 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1717 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1718 ;;
1719 O)
1720 INSTALL_TO_OPENSTACK="y"
1721 if [ -n "${OPTARG}" ]; then
1722 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1723 else
1724 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1725 usage && exit 1
1726 fi
1727 ;;
1728 f)
1729 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1730 ;;
1731 F)
1732 OPENSTACK_USERDATA_FILE="${OPTARG}"
1733 ;;
1734 N)
1735 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1736 ;;
1737 m)
1738 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1739 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1740 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1741 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1742 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1743 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1744 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1745 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1746 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1747 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1748 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1749 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1750 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1751 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1752 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1753 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1754 ;;
1755 H)
1756 OSM_VCA_HOST="${OPTARG}"
1757 ;;
1758 S)
1759 OSM_VCA_SECRET="${OPTARG}"
1760 ;;
1761 s)
1762 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1763 ;;
1764 w)
1765 # when specifying workdir, do not use sudo for access
1766 WORKDIR_SUDO=
1767 OSM_WORK_DIR="${OPTARG}"
1768 ;;
1769 t)
1770 OSM_DOCKER_TAG="${OPTARG}"
1771 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1772 ;;
1773 U)
1774 DOCKER_USER="${OPTARG}"
1775 ;;
1776 P)
1777 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1778 ;;
1779 A)
1780 OSM_VCA_APIPROXY="${OPTARG}"
1781 ;;
1782 l)
1783 LXD_CLOUD_FILE="${OPTARG}"
1784 ;;
1785 L)
1786 LXD_CRED_FILE="${OPTARG}"
1787 ;;
1788 K)
1789 CONTROLLER_NAME="${OPTARG}"
1790 ;;
1791 d)
1792 DOCKER_REGISTRY_URL="${OPTARG}"
1793 ;;
1794 p)
1795 DOCKER_PROXY_URL="${OPTARG}"
1796 ;;
1797 T)
1798 MODULE_DOCKER_TAG="${OPTARG}"
1799 ;;
1800 -)
1801 [ "${OPTARG}" == "help" ] && usage && exit 0
1802 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1803 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1804 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1805 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1806 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1807 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1808 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1809 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1810 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1811 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1812 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1813 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1814 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1815 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1816 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1817 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1818 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1819 [ "${OPTARG}" == "pullimages" ] && continue
1820 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1821 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1822 [ "${OPTARG}" == "bundle" ] && continue
1823 [ "${OPTARG}" == "k8s" ] && continue
1824 [ "${OPTARG}" == "lxd" ] && continue
1825 [ "${OPTARG}" == "lxd-cred" ] && continue
1826 [ "${OPTARG}" == "microstack" ] && continue
1827 [ "${OPTARG}" == "vca" ] && continue
1828 [ "${OPTARG}" == "ha" ] && continue
1829 [ "${OPTARG}" == "tag" ] && continue
1830 [ "${OPTARG}" == "registry" ] && continue
1831 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1832 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1833 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue
1834 echo -e "Invalid option: '--$OPTARG'\n" >&2
1835 usage && exit 1
1836 ;;
1837 :)
1838 echo "Option -$OPTARG requires an argument" >&2
1839 usage && exit 1
1840 ;;
1841 \?)
1842 echo -e "Invalid option: '-$OPTARG'\n" >&2
1843 usage && exit 1
1844 ;;
1845 h)
1846 usage && exit 0
1847 ;;
1848 y)
1849 ASSUME_YES="y"
1850 ;;
1851 *)
1852 usage && exit 1
1853 ;;
1854 esac
1855 done
1856
1857 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1858 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1859 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1860
1861 if [ -n "$SHOWOPTS" ]; then
1862 dump_vars
1863 exit 0
1864 fi
1865
1866 if [ -n "$CHARMED" ]; then
1867 if [ -n "$UNINSTALL" ]; then
1868 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1869 else
1870 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1871 fi
1872
1873 exit 0
1874 fi
1875
1876 # if develop, we force master
1877 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1878
1879 need_packages="git wget curl tar"
1880
1881 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1882
1883 echo -e "Checking required packages: $need_packages"
1884 dpkg -l $need_packages &>/dev/null \
1885 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1886 || sudo apt-get update \
1887 || FATAL "failed to run apt-get update"
1888 dpkg -l $need_packages &>/dev/null \
1889 || ! echo -e "Installing $need_packages requires root privileges." \
1890 || sudo apt-get install -y $need_packages \
1891 || FATAL "failed to install $need_packages"
1892 sudo snap install jq
1893 if [ -z "$OSM_DEVOPS" ]; then
1894 if [ -n "$TEST_INSTALLER" ]; then
1895 echo -e "\nUsing local devops repo for OSM installation"
1896 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1897 else
1898 echo -e "\nCreating temporary dir for OSM installation"
1899 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1900 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1901
1902 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1903
1904 if [ -z "$COMMIT_ID" ]; then
1905 echo -e "\nGuessing the current stable release"
1906 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1907 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1908
1909 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1910 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1911 else
1912 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1913 fi
1914 git -C $OSM_DEVOPS checkout $COMMIT_ID
1915 fi
1916 fi
1917
1918 . $OSM_DEVOPS/common/all_funcs
1919
1920 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1921 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1922 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1923 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1924 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1925 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1926 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1927 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1928
1929 #Installation starts here
1930 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1931 track start
1932
1933 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1934 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1935 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1936 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1937 fi
1938
1939 echo -e "Checking required packages: lxd"
1940 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1941 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1942
1943 # use local devops for containers
1944 export OSM_USE_LOCAL_DEVOPS=true
1945
1946 #Install osmclient
1947
1948 #Install vim-emu (optional)
1949 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1950
1951 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1952 track end
1953 echo -e "\nDONE"