2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
64 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
70 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
71 echo -e " [--tag]: Docker image tag"
75 # takes a juju/accounts.yaml file and returns the password specific
76 # for a controller. I wrote this using only bash tools to minimize
77 # additions of other packages
78 function parse_juju_password
{
79 password_file
="${HOME}/.local/share/juju/accounts.yaml"
80 local controller_name
=$1
81 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
82 sed -ne "s|^\($s\):|\1|" \
83 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
84 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
85 awk -F$fs -v controller
=$controller_name '{
86 indent = length($1)/2;
88 for (i in vname) {if (i > indent) {delete vname[i]}}
90 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
91 if (match(vn,controller) && match($2,"password")) {
98 function generate_secret
() {
99 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
102 function remove_volumes
() {
103 if [ -n "$KUBERNETES" ]; then
105 echo "Removing ${k8_volume}"
106 $WORKDIR_SUDO rm -rf ${k8_volume}
109 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
110 for volume
in $volumes; do
111 sg docker
-c "docker volume rm ${stack}_${volume}"
116 function remove_network
() {
118 sg docker
-c "docker network rm net${stack}"
121 function remove_iptables
() {
123 if [ -z "$OSM_VCA_HOST" ]; then
124 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
125 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
128 if [ -z "$DEFAULT_IP" ]; then
129 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
130 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
131 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
132 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
133 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
136 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
137 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
138 sudo netfilter-persistent save
142 function remove_stack
() {
144 if sg docker
-c "docker stack ps ${stack}" ; then
145 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
148 while [ ${COUNTER} -lt 30 ]; do
149 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
150 #echo "Dockers running: $result"
151 if [ "${result}" == "0" ]; then
154 let COUNTER
=COUNTER
+1
157 if [ "${result}" == "0" ]; then
158 echo "All dockers of the stack ${stack} were removed"
160 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
166 #removes osm deployments and services
167 function remove_k8s_namespace
() {
171 #removes helm only if there is nothing deployed in helm
172 function remove_helm
() {
173 if [ "$(helm ls -q)" == "" ] ; then
174 sudo helm
reset --force
175 kubectl delete
--namespace kube-system serviceaccount tiller
176 kubectl delete clusterrolebinding tiller-cluster-rule
177 sudo
rm /usr
/local
/bin
/helm
183 function uninstall_osmclient
() {
184 sudo apt-get remove
--purge -y python-osmclient
185 sudo apt-get remove
--purge -y python3-osmclient
188 #Uninstall lightweight OSM: remove dockers
189 function uninstall_lightweight
() {
190 if [ -n "$INSTALL_ONLY" ]; then
191 if [ -n "$INSTALL_ELK" ]; then
192 echo -e "\nUninstalling OSM ELK stack"
194 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
197 echo -e "\nUninstalling OSM"
198 if [ -n "$KUBERNETES" ]; then
199 if [ -n "$INSTALL_K8S_MONITOR" ]; then
200 # uninstall OSM MONITORING
201 uninstall_k8s_monitoring
203 remove_k8s_namespace
$OSM_STACK_NAME
206 remove_stack
$OSM_STACK_NAME
209 echo "Now osm docker images and volumes will be deleted"
210 newgrp docker
<< EONG
211 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
212 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
213 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
214 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
215 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
222 if [ -n "$KUBERNETES" ]; then
223 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
224 remove_volumes
$OSM_NAMESPACE_VOL
226 remove_volumes
$OSM_STACK_NAME
227 remove_network
$OSM_STACK_NAME
229 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
230 echo "Removing $OSM_DOCKER_WORK_DIR"
231 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
232 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
235 echo "Some docker images will be kept in case they are used by other docker stacks"
236 echo "To remove them, just run 'docker image prune' in a terminal"
240 #Safe unattended install of iptables-persistent
241 function check_install_iptables_persistent
(){
242 echo -e "\nChecking required packages: iptables-persistent"
243 if dpkg
-l iptables-persistent
&>/dev
/null
; then
244 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
245 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
246 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
247 sudo apt-get
-yq install iptables-persistent
251 #Configure NAT rules, based on the current IP addresses of containers
253 check_install_iptables_persistent
255 echo -e "\nConfiguring NAT rules"
256 echo -e " Required root privileges"
257 sudo
$OSM_DEVOPS/installers
/nat_osm
261 echo "FATAL error: Cannot install OSM due to \"$1\""
265 function install_lxd
() {
266 # Apply sysctl production values for optimal performance
267 sudo
cp /usr
/share
/osm-devops
/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
271 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
272 sudo snap
install lxd
273 sudo apt-get
install zfsutils-linux
-y
276 sudo usermod
-a -G lxd
`whoami`
277 cat /usr
/share
/osm-devops
/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
278 sg lxd
-c "lxd waitready"
279 DEFAULT_INTERFACE
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
280 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
281 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
282 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
283 #sudo systemctl stop lxd-bridge
284 #sudo systemctl --system daemon-reload
285 #sudo systemctl enable lxd-bridge
286 #sudo systemctl start lxd-bridge
290 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
291 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
292 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
293 read -e -p "$1" USER_CONFIRMATION
295 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
296 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
297 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
298 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
299 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
303 function install_osmclient
(){
304 CLIENT_RELEASE
=${RELEASE#"-R "}
305 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
306 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
307 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
308 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
309 curl
$key_location | sudo apt-key add
-
310 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
312 sudo apt-get
install -y python3-pip
313 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
314 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
315 sudo apt-get
install -y python3-osm-im python3-osmclient
316 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
317 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
318 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
319 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
320 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
321 echo -e "\nOSM client installed"
322 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
323 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
324 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
325 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
327 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
328 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
329 echo " export OSM_HOSTNAME=<OSM_host>"
334 function install_prometheus_nodeexporter
(){
335 if (systemctl
-q is-active node_exporter
)
337 echo "Node Exporter is already running."
339 echo "Node Exporter is not active, installing..."
340 if getent passwd node_exporter
> /dev
/null
2>&1; then
341 echo "node_exporter user exists"
343 echo "Creating user node_exporter"
344 sudo useradd
--no-create-home --shell /bin
/false node_exporter
346 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
347 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
348 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
349 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
350 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
351 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
352 sudo systemctl daemon-reload
353 sudo systemctl restart node_exporter
354 sudo systemctl
enable node_exporter
355 echo "Node Exporter has been activated in this host."
360 function uninstall_prometheus_nodeexporter
(){
361 sudo systemctl stop node_exporter
362 sudo systemctl disable node_exporter
363 sudo
rm /etc
/systemd
/system
/node_exporter.service
364 sudo systemctl daemon-reload
365 sudo userdel node_exporter
366 sudo
rm /usr
/local
/bin
/node_exporter
370 function install_docker_ce
() {
371 # installs and configures Docker CE
372 echo "Installing Docker CE ..."
373 sudo apt-get
-qq update
374 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
375 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
376 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
377 sudo apt-get
-qq update
378 sudo apt-get
install -y docker-ce
379 echo "Adding user to group 'docker'"
380 sudo groupadd
-f docker
381 sudo usermod
-aG docker
$USER
383 sudo service docker restart
384 echo "... restarted Docker service"
385 sg docker
-c "docker version" || FATAL
"Docker installation failed"
386 echo "... Docker CE installation done"
390 function install_docker_compose
() {
391 # installs and configures docker-compose
392 echo "Installing Docker Compose ..."
393 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
394 sudo
chmod +x
/usr
/local
/bin
/docker-compose
395 echo "... Docker Compose installation done"
398 function install_juju
() {
399 echo "Installing juju"
400 sudo snap
install juju
--classic --channel=2.7/stable
401 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
402 echo "Finished installation of juju"
406 function juju_createcontroller
() {
407 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
408 # Not found created, create the controller
409 sudo usermod
-a -G lxd
${USER}
410 sg lxd
-c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
412 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
415 function juju_createproxy
() {
416 check_install_iptables_persistent
418 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
419 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
420 sudo netfilter-persistent save
424 function generate_docker_images
() {
425 echo "Pulling and generating docker images"
426 _build_from
=$COMMIT_ID
427 [ -z "$_build_from" ] && _build_from
="master"
429 echo "OSM Docker images generated from $_build_from"
431 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
432 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
433 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
434 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
436 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
437 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
438 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
441 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
442 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
445 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
446 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
449 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
450 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
453 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
454 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
457 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
458 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
461 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
462 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
465 if [ -n "$PULL_IMAGES" ]; then
466 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
467 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
468 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
469 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
470 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
473 if [ -n "$PULL_IMAGES" ]; then
474 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
475 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
476 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
477 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
478 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
481 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
482 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
483 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
484 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
485 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
486 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
489 if [ -n "$PULL_IMAGES" ]; then
490 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
491 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
492 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
493 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
494 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
495 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
496 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
499 if [ -n "$PULL_IMAGES" ]; then
500 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
501 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
502 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
503 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
504 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
507 if [ -n "$PULL_IMAGES" ]; then
508 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
509 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
510 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
511 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
512 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
515 if [ -n "$PULL_IMAGES" ]; then
516 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
517 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
518 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
519 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
520 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
523 if [ -n "$PULL_IMAGES" ]; then
524 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
525 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
526 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
529 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
530 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
533 echo "Finished generation of docker images"
536 function cmp_overwrite
() {
539 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
540 if [ -f "${file2}" ]; then
541 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
543 cp -b ${file1} ${file2}
548 function generate_docker_env_files() {
549 echo "Doing a backup of existing env files
"
550 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
551 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
552 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
553 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
554 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
555 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
556 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
557 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
558 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
560 echo "Generating docker env files
"
561 if [ -n "$KUBERNETES" ]; then
562 #Kubernetes resources
563 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
566 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
567 if [ -n "$INSTALL_PLA" ]; then
568 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
572 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
573 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
576 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
577 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
578 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
579 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
580 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
582 # Prometheus Exporters files
583 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
584 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
588 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
589 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
592 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
593 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
595 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
598 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
599 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
601 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
604 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
605 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
607 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
610 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
611 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
613 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
616 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
617 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
619 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
622 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
623 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
626 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
627 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
630 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
631 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
633 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
637 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
638 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
639 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
641 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
642 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
646 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
647 SERVICE_PASSWORD
=$
(generate_secret
)
648 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
649 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
651 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
652 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
653 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
654 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
658 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
659 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
660 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
664 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
665 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
666 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
669 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
670 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
672 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
675 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
676 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
678 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
681 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
682 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
684 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
687 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
688 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
690 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
695 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
696 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
700 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
701 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
704 echo "Finished generation of docker env files"
707 function generate_osmclient_script
() {
708 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
709 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
710 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
713 #installs kubernetes packages
714 function install_kube
() {
715 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
716 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
717 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
719 echo "Installing Kubernetes Packages ..."
720 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
723 #initializes kubernetes control plane
724 function init_kubeadm
() {
726 sudo kubeadm init
--config $1
730 function kube_config_dir
() {
731 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
733 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
734 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
737 #deploys flannel as daemonsets
738 function deploy_cni_provider
() {
739 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
740 trap 'rm -rf "${CNI_DIR}"' EXIT
741 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
742 kubectl apply
-f $CNI_DIR
743 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
746 #creates secrets from env files which will be used by containers
747 function kube_secrets
(){
748 kubectl create ns
$OSM_STACK_NAME
749 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
750 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
751 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
752 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
753 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
754 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
755 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
756 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
759 #deploys osm pods and services
760 function deploy_osm_services
() {
761 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
762 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
764 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
767 function deploy_osm_pla_service
() {
768 # corresponding to parse_yaml
769 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
770 # corresponding to namespace_vol
771 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
772 # corresponding to deploy_osm_services
773 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
776 #Install helm and tiller
777 function install_helm
() {
778 helm
> /dev
/null
2>&1
779 if [ $?
!= 0 ] ; then
780 # Helm is not installed. Install helm
781 curl https
://get.helm.sh
/helm-v2.15
.2-linux-amd64.
tar.gz
--output helm-v2.15
.2.
tar.gz
782 tar -zxvf helm-v2.15
.2.
tar.gz
783 sudo
mv linux-amd64
/helm
/usr
/local
/bin
/helm
785 rm helm-v2.15
.2.
tar.gz
788 # Checking if tiller has being configured
789 kubectl
--namespace kube-system get serviceaccount tiller
> /dev
/null
2>&1
790 if [ $?
== 1 ] ; then
791 # tiller account for kubernetes
792 kubectl
--namespace kube-system create serviceaccount tiller
793 kubectl create clusterrolebinding tiller-cluster-rule
--clusterrole=cluster-admin
--serviceaccount=kube-system
:tiller
794 # HELM initialization
795 helm init
--service-account tiller
797 # Wait for Tiller to be up and running. If timeout expires, continue installing
798 tiller_timeout
=120; counter
=0
799 while (( counter
< tiller_timeout
))
801 tiller_status
=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
802 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
809 function parse_yaml
() {
810 osm_services
="nbi lcm ro pol mon light-ui keystone"
812 for osm
in $osm_services; do
813 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
817 function namespace_vol
() {
818 osm_services
="nbi lcm ro pol mon kafka mongo mysql"
819 for osm
in $osm_services; do
820 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
824 function init_docker_swarm
() {
825 if [ "${DEFAULT_MTU}" != "1500" ]; then
826 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
827 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
828 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
830 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
834 function create_docker_network
() {
835 echo "creating network"
836 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
837 echo "creating network DONE"
840 function deploy_lightweight
() {
842 echo "Deploying lightweight build"
845 OSM_KEYSTONE_PORT
=5000
849 OSM_PROM_CADVISOR_PORT
=8080
850 OSM_PROM_HOSTPORT
=9091
851 OSM_GRAFANA_PORT
=3000
852 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
853 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
855 if [ -n "$NO_HOST_PORTS" ]; then
856 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
857 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
858 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
859 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
860 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
861 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
862 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
863 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
864 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
865 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
867 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
868 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
869 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
870 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
871 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
872 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
873 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
874 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
875 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
876 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
878 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
879 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
880 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
881 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
882 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
883 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
884 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
885 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
886 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
888 pushd $OSM_DOCKER_WORK_DIR
889 if [ -n "$INSTALL_PLA" ]; then
890 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
892 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
896 echo "Finished deployment of lightweight build"
899 function deploy_elk
() {
900 echo "Pulling docker images for ELK"
901 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
902 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
903 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
904 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
905 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
906 echo "Finished pulling elk docker images"
907 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
908 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
910 echo "Deploying ELK stack"
911 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
912 echo "Waiting for ELK stack to be up and running"
917 while [ $time -le $timelength ]; do
918 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
925 if [ $elk_is_up -eq 0 ]; then
926 echo "ELK is up and running. Trying to create index pattern..."
927 #Create index pattern
928 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
929 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
930 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
931 #Make it the default index
932 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
933 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
934 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
936 echo "Cannot connect to Kibana to create index pattern."
937 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
938 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
939 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
940 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
941 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
942 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
943 -d"{\"value\":\"filebeat-*\"}"'
945 echo "Finished deployment of ELK stack"
949 function install_lightweight
() {
950 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
951 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
952 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
953 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
954 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
957 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
960 if [ -n "$KUBERNETES" ]; then
961 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
962 1. Install and configure LXD
965 4. Disable swap space
966 5. Install and initialize Kubernetes
968 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
971 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
975 echo "Installing lightweight build of OSM"
976 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
977 trap 'rm -rf "${LWTEMPDIR}"' EXIT
978 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
979 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
980 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
981 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
982 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
983 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
985 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
986 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
987 need_packages_lw
="snapd"
988 echo -e "Checking required packages: $need_packages_lw"
989 dpkg
-l $need_packages_lw &>/dev
/null \
990 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
991 || sudo apt-get update \
992 || FATAL
"failed to run apt-get update"
993 dpkg
-l $need_packages_lw &>/dev
/null \
994 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
995 || sudo apt-get
install -y $need_packages_lw \
996 || FATAL
"failed to install $need_packages_lw"
1002 [ -z "$INSTALL_NOJUJU" ] && install_juju
1005 if [ -z "$OSM_VCA_HOST" ]; then
1006 if [ -z "$CONTROLLER_NAME" ]; then
1007 if [ -n "$LXD_CLOUD_FILE" ]; then
1008 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1009 OSM_VCA_CLOUDNAME
="lxd-cloud"
1010 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1011 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
1013 juju_createcontroller
1015 OSM_VCA_CLOUDNAME
="lxd-cloud"
1016 if [ -n "$LXD_CLOUD_FILE" ]; then
1017 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1018 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1019 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
1022 cat << EOF > ~/.osm/lxd-cloud.yaml
1026 auth-types: [certificate]
1027 endpoint: "https://$DEFAULT_IP:8443"
1029 ssl-hostname-verification: false
1031 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1032 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1033 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
1034 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
1035 cat << EOF > ~/.osm/lxd-credentials.yaml
1039 auth-type: certificate
1047 lxc config trust add
local: ~
/.osm
/client.crt
1048 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
1049 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
1052 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1053 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1054 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1056 track juju_controller
1058 if [ -z "$OSM_VCA_SECRET" ]; then
1059 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1060 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1061 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1063 if [ -z "$OSM_VCA_PUBKEY" ]; then
1064 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1065 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1067 if [ -z "$OSM_VCA_CACERT" ]; then
1068 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1069 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1070 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1072 if [ -z "$OSM_VCA_APIPROXY" ]; then
1073 OSM_VCA_APIPROXY
=$DEFAULT_IP
1074 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1079 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1080 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1081 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1084 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1087 #Installs Kubernetes and deploys osm services
1088 if [ -n "$KUBERNETES" ]; then
1091 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1095 #install_docker_compose
1096 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1100 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1103 generate_docker_env_files
1105 if [ -n "$KUBERNETES" ]; then
1106 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1107 # uninstall OSM MONITORING
1108 uninstall_k8s_monitoring
1109 track uninstall_k8s_monitoring
1111 #remove old namespace
1112 remove_k8s_namespace
$OSM_STACK_NAME
1115 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
1118 if [ -n "$INSTALL_PLA"]; then
1119 # optional PLA install
1120 deploy_osm_pla_service
1122 track deploy_osm_services_k8s
1123 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1124 # install OSM MONITORING
1125 install_k8s_monitoring
1126 track install_k8s_monitoring
1130 remove_stack
$OSM_STACK_NAME
1131 create_docker_network
1133 generate_osmclient_script
1135 install_prometheus_nodeexporter
1137 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1138 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1141 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1144 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null
1149 function install_vimemu
() {
1150 echo "\nInstalling vim-emu"
1151 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1152 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1153 # install prerequisites (OVS is a must for the emulator to work)
1154 sudo apt-get
install openvswitch-switch
1155 # clone vim-emu repository (attention: branch is currently master only)
1156 echo "Cloning vim-emu repository ..."
1157 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1158 # build vim-emu docker
1159 echo "Building vim-emu Docker container..."
1161 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1162 # start vim-emu container as daemon
1163 echo "Starting vim-emu Docker container 'vim-emu' ..."
1164 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1165 # in lightweight mode, the emulator needs to be attached to netOSM
1166 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1168 # classic build mode
1169 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1171 echo "Waiting for 'vim-emu' container to start ..."
1173 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1174 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1175 # print vim-emu connection info
1176 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1177 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1178 echo -e "To add the emulated VIM to OSM you should do:"
1179 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1182 function install_k8s_monitoring
() {
1183 # install OSM monitoring
1184 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1185 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1188 function uninstall_k8s_monitoring
() {
1189 # uninstall OSM monitoring
1190 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1193 function dump_vars
(){
1194 echo "DEVELOP=$DEVELOP"
1195 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1196 echo "UNINSTALL=$UNINSTALL"
1197 echo "UPDATE=$UPDATE"
1198 echo "RECONFIGURE=$RECONFIGURE"
1199 echo "TEST_INSTALLER=$TEST_INSTALLER"
1200 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1201 echo "INSTALL_PLA=$INSTALL_PLA"
1202 echo "INSTALL_LXD=$INSTALL_LXD"
1203 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1204 echo "INSTALL_ONLY=$INSTALL_ONLY"
1205 echo "INSTALL_ELK=$INSTALL_ELK"
1206 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1207 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1208 echo "TO_REBUILD=$TO_REBUILD"
1209 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1210 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1211 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1212 echo "RELEASE=$RELEASE"
1213 echo "REPOSITORY=$REPOSITORY"
1214 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1215 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1216 echo "OSM_DEVOPS=$OSM_DEVOPS"
1217 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1218 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1219 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1220 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1221 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1222 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1223 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1224 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1225 echo "DOCKER_USER=$DOCKER_USER"
1226 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1227 echo "PULL_IMAGES=$PULL_IMAGES"
1228 echo "KUBERNETES=$KUBERNETES"
1229 echo "SHOWOPTS=$SHOWOPTS"
1230 echo "Install from specific refspec (-b): $COMMIT_ID"
1235 duration
=$
((ctime
- SESSION_ID
))
1236 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1237 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1239 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1240 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1241 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1242 event_name
="${event_name}_$1"
1243 url
="${url}&event=${event_name}&ce_duration=${duration}"
1244 wget
-q -O /dev
/null
$url
1256 INSTALL_FROM_SOURCE
=""
1257 RELEASE
="ReleaseSEVEN"
1261 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1262 LXD_REPOSITORY_PATH
=""
1263 INSTALL_LIGHTWEIGHT
="y"
1271 INSTALL_K8S_MONITOR
=""
1272 INSTALL_NOHOSTCLIENT
=""
1273 SESSION_ID
=`date +%s`
1278 OSM_VCA_CLOUDNAME
="localhost"
1282 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1283 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1285 OSM_WORK_DIR
="/etc/osm"
1286 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1287 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1288 OSM_HOST_VOL
="/var/lib/osm"
1289 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1290 OSM_DOCKER_TAG
=latest
1291 DOCKER_USER
=opensourcemano
1293 KAFKA_TAG
=2.11-1.0
.2
1294 PROMETHEUS_TAG
=v2.4
.3
1296 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1297 PROMETHEUS_CADVISOR_TAG
=latest
1299 OSM_DATABASE_COMMONKEY
=
1300 ELASTIC_VERSION
=6.4.2
1301 ELASTIC_CURATOR_VERSION
=5.5.4
1302 POD_NETWORK_CIDR
=10.244.0.0/16
1303 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1304 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1306 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o
; do
1313 REPOSITORY
="${OPTARG}"
1314 REPO_ARGS
+=(-r "$REPOSITORY")
1317 [ "${OPTARG}" == "swarm" ] && continue
1318 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1319 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1323 REPOSITORY_KEY
="${OPTARG}"
1324 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1327 REPOSITORY_BASE
="${OPTARG}"
1328 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1332 REPO_ARGS
+=(-R "$RELEASE")
1335 OSM_DEVOPS
="${OPTARG}"
1339 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1340 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1341 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1344 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1345 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1346 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1347 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1348 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1349 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1350 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1351 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1352 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1353 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1354 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1355 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1356 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1357 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1360 OSM_VCA_HOST
="${OPTARG}"
1363 OSM_VCA_SECRET
="${OPTARG}"
1366 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1369 # when specifying workdir, do not use sudo for access
1371 OSM_WORK_DIR
="${OPTARG}"
1374 OSM_DOCKER_TAG
="${OPTARG}"
1377 DOCKER_USER
="${OPTARG}"
1380 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1383 OSM_VCA_APIPROXY
="${OPTARG}"
1386 LXD_CLOUD_FILE
="${OPTARG}"
1389 LXD_CRED_FILE
="${OPTARG}"
1392 CONTROLLER_NAME
="${OPTARG}"
1395 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1396 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1397 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1398 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1399 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1400 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1401 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1402 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1403 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1404 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1405 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1406 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1407 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1408 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1409 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1410 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1411 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1412 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1413 [ "${OPTARG}" == "pullimages" ] && continue
1414 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1415 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1416 [ "${OPTARG}" == "bundle" ] && continue
1417 [ "${OPTARG}" == "k8s" ] && continue
1418 [ "${OPTARG}" == "lxd" ] && continue
1419 [ "${OPTARG}" == "lxd-cred" ] && continue
1420 [ "${OPTARG}" == "microstack" ] && continue
1421 [ "${OPTARG}" == "ha" ] && continue
1422 [ "${OPTARG}" == "tag" ] && continue
1423 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1424 echo -e "Invalid option: '--$OPTARG'\n" >&2
1428 echo "Option -$OPTARG requires an argument" >&2
1432 echo -e "Invalid option: '-$OPTARG'\n" >&2
1447 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1448 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1450 if [ -n "$SHOWOPTS" ]; then
1455 if [ -n "$CHARMED" ]; then
1456 if [ -n "$UNINSTALL" ]; then
1457 /usr
/share
/osm-devops
/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1459 /usr
/share
/osm-devops
/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1461 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1463 echo "1. Get the NBI IP with the following command:"
1465 echo NBI_IP
='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1467 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1469 echo "export OSM_HOSTNAME=\$NBI_IP"
1471 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1473 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1481 # if develop, we force master
1482 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1484 need_packages
="git wget curl tar"
1485 echo -e "Checking required packages: $need_packages"
1486 dpkg
-l $need_packages &>/dev
/null \
1487 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1488 || sudo apt-get update \
1489 || FATAL
"failed to run apt-get update"
1490 dpkg
-l $need_packages &>/dev
/null \
1491 ||
! echo -e "Installing $need_packages requires root privileges." \
1492 || sudo apt-get
install -y $need_packages \
1493 || FATAL
"failed to install $need_packages"
1494 sudo snap
install jq
1495 if [ -z "$OSM_DEVOPS" ]; then
1496 if [ -n "$TEST_INSTALLER" ]; then
1497 echo -e "\nUsing local devops repo for OSM installation"
1498 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1500 echo -e "\nCreating temporary dir for OSM installation"
1501 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1502 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1504 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1506 if [ -z "$COMMIT_ID" ]; then
1507 echo -e "\nGuessing the current stable release"
1508 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1509 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1511 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1512 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1514 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1516 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1520 .
$OSM_DEVOPS/common
/all_funcs
1522 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1523 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1524 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1525 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1526 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1527 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1529 #Installation starts here
1530 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README.txt
&> /dev
/null
1533 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1534 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1535 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1536 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1539 echo -e "Checking required packages: lxd"
1540 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1541 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1543 # use local devops for containers
1544 export OSM_USE_LOCAL_DEVOPS
=true
1548 #Install vim-emu (optional)
1549 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1551 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null