2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: install OSM with charms"
64 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " --controller <name>: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " --lxd-cloud <yaml path>: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " --lxd-credentials <yaml path>: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
70 echo -e " --tag: Docker image tag"
74 # takes a juju/accounts.yaml file and returns the password specific
75 # for a controller. I wrote this using only bash tools to minimize
76 # additions of other packages
77 function parse_juju_password
{
78 password_file
="${HOME}/.local/share/juju/accounts.yaml"
79 local controller_name
=$1
80 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
81 sed -ne "s|^\($s\):|\1|" \
82 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
83 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
84 awk -F$fs -v controller
=$controller_name '{
85 indent = length($1)/2;
87 for (i in vname) {if (i > indent) {delete vname[i]}}
89 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
90 if (match(vn,controller) && match($2,"password")) {
97 function generate_secret
() {
98 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
101 function remove_volumes
() {
102 if [ -n "$KUBERNETES" ]; then
104 echo "Removing ${k8_volume}"
105 $WORKDIR_SUDO rm -rf ${k8_volume}
108 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
109 for volume
in $volumes; do
110 sg docker
-c "docker volume rm ${stack}_${volume}"
115 function remove_network
() {
117 sg docker
-c "docker network rm net${stack}"
120 function remove_iptables
() {
122 if [ -z "$OSM_VCA_HOST" ]; then
123 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
124 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
127 if [ -z "$DEFAULT_IP" ]; then
128 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
129 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
130 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
131 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
134 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
135 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
136 sudo netfilter-persistent save
140 function remove_stack
() {
142 if sg docker
-c "docker stack ps ${stack}" ; then
143 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
146 while [ ${COUNTER} -lt 30 ]; do
147 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
148 #echo "Dockers running: $result"
149 if [ "${result}" == "0" ]; then
152 let COUNTER
=COUNTER
+1
155 if [ "${result}" == "0" ]; then
156 echo "All dockers of the stack ${stack} were removed"
158 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
164 #removes osm deployments and services
165 function remove_k8s_namespace
() {
169 #Uninstall lightweight OSM: remove dockers
170 function uninstall_lightweight
() {
171 if [ -n "$INSTALL_ONLY" ]; then
172 if [ -n "$INSTALL_ELK" ]; then
173 echo -e "\nUninstalling OSM ELK stack"
175 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
178 echo -e "\nUninstalling OSM"
179 if [ -n "$KUBERNETES" ]; then
180 if [ -n "$INSTALL_K8S_MONITOR" ]; then
181 # uninstall OSM MONITORING
182 uninstall_k8s_monitoring
184 remove_k8s_namespace
$OSM_STACK_NAME
187 remove_stack
$OSM_STACK_NAME
190 echo "Now osm docker images and volumes will be deleted"
191 newgrp docker
<< EONG
192 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
196 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
197 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
198 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
199 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
200 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
203 if [ -n "$KUBERNETES" ]; then
204 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
205 remove_volumes
$OSM_NAMESPACE_VOL
207 remove_volumes
$OSM_STACK_NAME
208 remove_network
$OSM_STACK_NAME
210 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
211 echo "Removing $OSM_DOCKER_WORK_DIR"
212 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
213 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
215 echo "Some docker images will be kept in case they are used by other docker stacks"
216 echo "To remove them, just run 'docker image prune' in a terminal"
220 #Safe unattended install of iptables-persistent
221 function check_install_iptables_persistent
(){
222 echo -e "\nChecking required packages: iptables-persistent"
223 if dpkg
-l iptables-persistent
&>/dev
/null
; then
224 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
225 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
226 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
227 sudo apt-get
-yq install iptables-persistent
231 #Configure NAT rules, based on the current IP addresses of containers
233 check_install_iptables_persistent
235 echo -e "\nConfiguring NAT rules"
236 echo -e " Required root privileges"
237 sudo
$OSM_DEVOPS/installers
/nat_osm
241 echo "FATAL error: Cannot install OSM due to \"$1\""
245 function install_lxd
() {
246 # Apply sysctl production values for optimal performance
247 sudo
cp /usr
/share
/osm-devops
/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
251 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
252 sudo snap
install lxd
253 sudo apt-get
install zfsutils-linux
-y
256 sudo usermod
-a -G lxd
`whoami`
257 cat /usr
/share
/osm-devops
/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
258 sg lxd
-c "lxd waitready"
259 DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8}')
260 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
261 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
262 #sudo systemctl stop lxd-bridge
263 #sudo systemctl --system daemon-reload
264 #sudo systemctl enable lxd-bridge
265 #sudo systemctl start lxd-bridge
269 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
270 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
271 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
272 read -e -p "$1" USER_CONFIRMATION
274 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
275 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
276 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
277 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
278 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
282 function install_osmclient
(){
283 CLIENT_RELEASE
=${RELEASE#"-R "}
284 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
285 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
286 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
287 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
288 curl
$key_location | sudo apt-key add
-
289 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
291 sudo apt-get
install -y python3-pip
292 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
293 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
294 sudo apt-get
install -y python3-osm-im python3-osmclient
295 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
296 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
297 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
298 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
299 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
300 echo -e "\nOSM client installed"
301 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
302 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
303 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
304 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
306 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
307 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
308 echo " export OSM_HOSTNAME=<OSM_host>"
313 function install_prometheus_nodeexporter
(){
314 if (systemctl
-q is-active node_exporter
)
316 echo "Node Exporter is already running."
318 echo "Node Exporter is not active, installing..."
319 if getent passwd node_exporter
> /dev
/null
2>&1; then
320 echo "node_exporter user exists"
322 echo "Creating user node_exporter"
323 sudo useradd
--no-create-home --shell /bin
/false node_exporter
325 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
326 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
327 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
328 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
329 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
330 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
331 sudo systemctl daemon-reload
332 sudo systemctl restart node_exporter
333 sudo systemctl
enable node_exporter
334 echo "Node Exporter has been activated in this host."
339 function uninstall_prometheus_nodeexporter
(){
340 sudo systemctl stop node_exporter
341 sudo systemctl disable node_exporter
342 sudo
rm /etc
/systemd
/system
/node_exporter.service
343 sudo systemctl daemon-reload
344 sudo userdel node_exporter
345 sudo
rm /usr
/local
/bin
/node_exporter
349 function install_docker_ce
() {
350 # installs and configures Docker CE
351 echo "Installing Docker CE ..."
352 sudo apt-get
-qq update
353 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
354 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
355 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
356 sudo apt-get
-qq update
357 sudo apt-get
install -y docker-ce
358 echo "Adding user to group 'docker'"
359 sudo groupadd
-f docker
360 sudo usermod
-aG docker
$USER
362 sudo service docker restart
363 echo "... restarted Docker service"
364 sg docker
-c "docker version" || FATAL
"Docker installation failed"
365 echo "... Docker CE installation done"
369 function install_docker_compose
() {
370 # installs and configures docker-compose
371 echo "Installing Docker Compose ..."
372 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
373 sudo
chmod +x
/usr
/local
/bin
/docker-compose
374 echo "... Docker Compose installation done"
377 function install_juju
() {
378 echo "Installing juju"
379 sudo snap
install juju
--classic
380 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
381 echo "Finished installation of juju"
385 function juju_createcontroller
() {
386 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
387 # Not found created, create the controller
388 sudo usermod
-a -G lxd
${USER}
389 sg lxd
-c "juju bootstrap $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
391 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
394 function juju_createproxy
() {
395 check_install_iptables_persistent
397 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
398 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
399 sudo netfilter-persistent save
403 function generate_docker_images
() {
404 echo "Pulling and generating docker images"
405 _build_from
=$COMMIT_ID
406 [ -z "$_build_from" ] && _build_from
="master"
408 echo "OSM Docker images generated from $_build_from"
410 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
411 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
412 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
413 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
415 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
416 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
417 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
420 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
421 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
424 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
425 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
428 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
429 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
432 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
433 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
436 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
437 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
440 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
441 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
444 if [ -n "$PULL_IMAGES" ]; then
445 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
446 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
447 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
448 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
449 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
452 if [ -n "$PULL_IMAGES" ]; then
453 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
454 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
455 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
456 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
457 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
460 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
461 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
462 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
463 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
464 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
465 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
468 if [ -n "$PULL_IMAGES" ]; then
469 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
470 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
471 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
472 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
473 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
474 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
475 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
478 if [ -n "$PULL_IMAGES" ]; then
479 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
480 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
481 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
482 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
483 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
486 if [ -n "$PULL_IMAGES" ]; then
487 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
488 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
489 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
490 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
491 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
494 if [ -n "$PULL_IMAGES" ]; then
495 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
496 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
497 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
498 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
499 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
502 if [ -n "$PULL_IMAGES" ]; then
503 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
504 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
505 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
508 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
509 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
512 echo "Finished generation of docker images"
515 function cmp_overwrite
() {
518 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
519 if [ -f "${file2}" ]; then
520 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
522 cp -b ${file1} ${file2}
527 function generate_docker_env_files() {
528 echo "Doing a backup of existing env files
"
529 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
530 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
531 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
532 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
533 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
534 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
535 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
536 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
537 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
539 echo "Generating docker env files
"
540 if [ -n "$KUBERNETES" ]; then
541 #Kubernetes resources
542 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
545 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
546 if [ -n "$INSTALL_PLA" ]; then
547 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
551 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
552 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
555 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
556 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
557 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
558 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
559 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
561 # Prometheus Exporters files
562 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
563 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
567 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
568 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
571 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
572 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
574 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
577 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
578 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
580 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
583 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
584 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
586 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
589 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
590 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
592 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
595 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
596 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
598 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
601 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
602 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
605 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
606 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
609 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
610 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
612 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
616 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
617 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
618 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
620 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
621 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
625 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
626 SERVICE_PASSWORD
=$
(generate_secret
)
627 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
628 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
630 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
631 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
632 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
633 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
637 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
638 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
639 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
643 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
644 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
645 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
648 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
649 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
651 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
654 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
655 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
657 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
660 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
661 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
663 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
666 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
667 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
669 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
674 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
675 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
679 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
680 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
683 echo "Finished generation of docker env files"
686 function generate_osmclient_script
() {
687 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
688 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
689 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
692 #installs kubernetes packages
693 function install_kube
() {
694 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
695 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
696 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
698 echo "Installing Kubernetes Packages ..."
699 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
702 #initializes kubernetes control plane
703 function init_kubeadm
() {
705 sudo kubeadm init
--config $1
709 function kube_config_dir
() {
710 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
712 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
713 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
716 #deploys flannel as daemonsets
717 function deploy_cni_provider
() {
718 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
719 trap 'rm -rf "${CNI_DIR}"' EXIT
720 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
721 kubectl apply
-f $CNI_DIR
722 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
725 #creates secrets from env files which will be used by containers
726 function kube_secrets
(){
727 kubectl create ns
$OSM_STACK_NAME
728 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
729 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
730 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
731 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
732 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
733 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
734 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
735 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
738 #deploys osm pods and services
739 function deploy_osm_services
() {
740 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
741 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
743 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
746 function deploy_osm_pla_service
() {
747 # corresponding to parse_yaml
748 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
749 # corresponding to namespace_vol
750 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
751 # corresponding to deploy_osm_services
752 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
755 function parse_yaml
() {
756 osm_services
="nbi lcm ro pol mon light-ui keystone"
758 for osm
in $osm_services; do
759 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
763 function namespace_vol
() {
764 osm_services
="nbi lcm ro pol mon kafka mongo mysql"
765 for osm
in $osm_services; do
766 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
770 function init_docker_swarm
() {
771 if [ "${DEFAULT_MTU}" != "1500" ]; then
772 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
773 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
774 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
776 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
780 function create_docker_network
() {
781 echo "creating network"
782 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
783 echo "creating network DONE"
786 function deploy_lightweight
() {
788 echo "Deploying lightweight build"
791 OSM_KEYSTONE_PORT
=5000
795 OSM_PROM_CADVISOR_PORT
=8080
796 OSM_PROM_HOSTPORT
=9091
797 OSM_GRAFANA_PORT
=3000
798 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
799 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
801 if [ -n "$NO_HOST_PORTS" ]; then
802 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
803 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
804 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
805 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
806 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
807 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
808 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
809 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
810 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
811 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
813 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
814 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
815 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
816 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
817 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
818 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
819 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
820 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
821 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
822 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
824 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
825 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
826 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
827 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
828 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
829 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
830 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
831 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
832 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
834 pushd $OSM_DOCKER_WORK_DIR
835 if [ -n "$INSTALL_PLA" ]; then
836 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
838 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
842 echo "Finished deployment of lightweight build"
845 function deploy_elk
() {
846 echo "Pulling docker images for ELK"
847 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
848 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
849 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
850 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
851 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
852 echo "Finished pulling elk docker images"
853 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
854 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
856 echo "Deploying ELK stack"
857 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
858 echo "Waiting for ELK stack to be up and running"
863 while [ $time -le $timelength ]; do
864 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
871 if [ $elk_is_up -eq 0 ]; then
872 echo "ELK is up and running. Trying to create index pattern..."
873 #Create index pattern
874 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
875 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
876 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
877 #Make it the default index
878 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
879 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
880 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
882 echo "Cannot connect to Kibana to create index pattern."
883 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
884 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
885 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
886 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
887 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
888 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
889 -d"{\"value\":\"filebeat-*\"}"'
891 echo "Finished deployment of ELK stack"
895 function install_lightweight
() {
896 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
897 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
898 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
899 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
900 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
903 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
906 if [ -n "$KUBERNETES" ]; then
907 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
908 1. Install and configure LXD
911 4. Disable swap space
912 5. Install and initialize Kubernetes
914 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
917 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
921 echo "Installing lightweight build of OSM"
922 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
923 trap 'rm -rf "${LWTEMPDIR}"' EXIT
924 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
925 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
926 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
927 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
928 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
930 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
931 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
932 need_packages_lw
="snapd"
933 echo -e "Checking required packages: $need_packages_lw"
934 dpkg
-l $need_packages_lw &>/dev
/null \
935 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
936 || sudo apt-get update \
937 || FATAL
"failed to run apt-get update"
938 dpkg
-l $need_packages_lw &>/dev
/null \
939 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
940 || sudo apt-get
install -y $need_packages_lw \
941 || FATAL
"failed to install $need_packages_lw"
947 [ -z "$INSTALL_NOJUJU" ] && install_juju
950 if [ -z "$OSM_VCA_HOST" ]; then
951 if [ -z "$CONTROLLER_NAME" ]; then
952 if [ -n "$LXD_CLOUD_FILE" ]; then
953 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
954 OSM_VCA_CLOUDNAME
="lxd-cloud"
955 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
956 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
958 juju_createcontroller
960 OSM_VCA_CLOUDNAME
="lxd-cloud"
961 if [ -n "$LXD_CLOUD_FILE" ]; then
962 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
963 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
964 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
967 cat << EOF > ~/.osm/lxd-cloud.yaml
971 auth-types: [certificate]
972 endpoint: "https://$DEFAULT_IP:8443"
974 ssl-hostname-verification: false
976 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
977 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
978 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
979 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
980 cat << EOF > ~/.osm/lxd-credentials.yaml
984 auth-type: certificate
992 lxc config trust add
local: ~
/.osm
/client.crt
993 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
994 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
997 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
998 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
999 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1001 track juju_controller
1003 if [ -z "$OSM_VCA_SECRET" ]; then
1004 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1005 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1006 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1008 if [ -z "$OSM_VCA_PUBKEY" ]; then
1009 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1010 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1012 if [ -z "$OSM_VCA_CACERT" ]; then
1013 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1014 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1015 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1017 if [ -z "$OSM_VCA_APIPROXY" ]; then
1018 OSM_VCA_APIPROXY
=$DEFAULT_IP
1019 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1024 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1025 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1026 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1029 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1032 #Installs Kubernetes and deploys osm services
1033 if [ -n "$KUBERNETES" ]; then
1036 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1040 #install_docker_compose
1041 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1045 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1048 generate_docker_env_files
1050 if [ -n "$KUBERNETES" ]; then
1051 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1052 # uninstall OSM MONITORING
1053 uninstall_k8s_monitoring
1054 track uninstall_k8s_monitoring
1056 #remove old namespace
1057 remove_k8s_namespace
$OSM_STACK_NAME
1060 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
1063 if [ -n "$INSTALL_PLA"]; then
1064 # optional PLA install
1065 deploy_osm_pla_service
1067 track deploy_osm_services_k8s
1068 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1069 # install OSM MONITORING
1070 install_k8s_monitoring
1071 track install_k8s_monitoring
1075 remove_stack
$OSM_STACK_NAME
1076 create_docker_network
1078 generate_osmclient_script
1080 install_prometheus_nodeexporter
1082 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1083 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1086 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1089 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null
1094 function install_vimemu
() {
1095 echo "\nInstalling vim-emu"
1096 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1097 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1098 # install prerequisites (OVS is a must for the emulator to work)
1099 sudo apt-get
install openvswitch-switch
1100 # clone vim-emu repository (attention: branch is currently master only)
1101 echo "Cloning vim-emu repository ..."
1102 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1103 # build vim-emu docker
1104 echo "Building vim-emu Docker container..."
1106 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1107 # start vim-emu container as daemon
1108 echo "Starting vim-emu Docker container 'vim-emu' ..."
1109 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1110 # in lightweight mode, the emulator needs to be attached to netOSM
1111 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1113 # classic build mode
1114 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1116 echo "Waiting for 'vim-emu' container to start ..."
1118 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1119 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1120 # print vim-emu connection info
1121 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1122 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1123 echo -e "To add the emulated VIM to OSM you should do:"
1124 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1127 function install_k8s_monitoring
() {
1128 # install OSM monitoring
1129 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1130 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1133 function uninstall_k8s_monitoring
() {
1134 # uninstall OSM monitoring
1135 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1138 function dump_vars
(){
1139 echo "DEVELOP=$DEVELOP"
1140 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1141 echo "UNINSTALL=$UNINSTALL"
1142 echo "UPDATE=$UPDATE"
1143 echo "RECONFIGURE=$RECONFIGURE"
1144 echo "TEST_INSTALLER=$TEST_INSTALLER"
1145 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1146 echo "INSTALL_PLA=$INSTALL_PLA"
1147 echo "INSTALL_LXD=$INSTALL_LXD"
1148 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1149 echo "INSTALL_ONLY=$INSTALL_ONLY"
1150 echo "INSTALL_ELK=$INSTALL_ELK"
1151 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1152 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1153 echo "TO_REBUILD=$TO_REBUILD"
1154 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1155 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1156 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1157 echo "RELEASE=$RELEASE"
1158 echo "REPOSITORY=$REPOSITORY"
1159 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1160 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1161 echo "OSM_DEVOPS=$OSM_DEVOPS"
1162 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1163 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1164 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1165 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1166 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1167 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1168 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1169 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1170 echo "DOCKER_USER=$DOCKER_USER"
1171 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1172 echo "PULL_IMAGES=$PULL_IMAGES"
1173 echo "KUBERNETES=$KUBERNETES"
1174 echo "SHOWOPTS=$SHOWOPTS"
1175 echo "Install from specific refspec (-b): $COMMIT_ID"
1180 duration
=$
((ctime
- SESSION_ID
))
1181 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1182 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1184 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1185 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1186 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1187 event_name
="${event_name}_$1"
1188 url
="${url}&event=${event_name}&ce_duration=${duration}"
1189 wget
-q -O /dev
/null
$url
1201 INSTALL_FROM_SOURCE
=""
1202 RELEASE
="ReleaseSEVEN"
1206 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1207 LXD_REPOSITORY_PATH
=""
1208 INSTALL_LIGHTWEIGHT
="y"
1216 INSTALL_K8S_MONITOR
=""
1217 INSTALL_NOHOSTCLIENT
=""
1218 SESSION_ID
=`date +%s`
1223 OSM_VCA_CLOUDNAME
="localhost"
1227 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1228 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1230 OSM_WORK_DIR
="/etc/osm"
1231 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1232 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1233 OSM_HOST_VOL
="/var/lib/osm"
1234 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1235 OSM_DOCKER_TAG
=latest
1236 DOCKER_USER
=opensourcemano
1238 KAFKA_TAG
=2.11-1.0
.2
1239 PROMETHEUS_TAG
=v2.4
.3
1241 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1242 PROMETHEUS_CADVISOR_TAG
=latest
1244 OSM_DATABASE_COMMONKEY
=
1245 ELASTIC_VERSION
=6.4.2
1246 ELASTIC_CURATOR_VERSION
=5.5.4
1247 POD_NETWORK_CIDR
=10.244.0.0/16
1248 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1249 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1251 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o
; do
1258 REPOSITORY
="${OPTARG}"
1259 REPO_ARGS
+=(-r "$REPOSITORY")
1262 [ "${OPTARG}" == "swarm" ] && continue
1263 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1264 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1268 REPOSITORY_KEY
="${OPTARG}"
1269 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1272 REPOSITORY_BASE
="${OPTARG}"
1273 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1277 REPO_ARGS
+=(-R "$RELEASE")
1280 OSM_DEVOPS
="${OPTARG}"
1284 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1285 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1286 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1289 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1290 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1291 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1292 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1293 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1294 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1295 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1296 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1297 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1298 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1299 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1300 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1301 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1302 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1305 OSM_VCA_HOST
="${OPTARG}"
1308 OSM_VCA_SECRET
="${OPTARG}"
1311 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1314 # when specifying workdir, do not use sudo for access
1316 OSM_WORK_DIR
="${OPTARG}"
1319 OSM_DOCKER_TAG
="${OPTARG}"
1322 DOCKER_USER
="${OPTARG}"
1325 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1328 OSM_VCA_APIPROXY
="${OPTARG}"
1331 LXD_CLOUD_FILE
="${OPTARG}"
1334 LXD_CRED_FILE
="${OPTARG}"
1337 CONTROLLER_NAME
="${OPTARG}"
1340 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1341 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1342 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1343 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1344 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1345 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1346 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1347 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1348 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1349 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1350 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1351 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1352 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1353 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1354 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1355 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1356 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1357 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1358 [ "${OPTARG}" == "pullimages" ] && continue
1359 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1360 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1361 [ "${OPTARG}" == "bundle" ] && continue
1362 [ "${OPTARG}" == "kubeconfig" ] && continue
1363 [ "${OPTARG}" == "lxdendpoint" ] && continue
1364 [ "${OPTARG}" == "lxdcert" ] && continue
1365 [ "${OPTARG}" == "microstack" ] && continue
1366 [ "${OPTARG}" == "tag" ] && continue
1367 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1368 echo -e "Invalid option: '--$OPTARG'\n" >&2
1372 echo "Option -$OPTARG requires an argument" >&2
1376 echo -e "Invalid option: '-$OPTARG'\n" >&2
1391 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1392 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1394 if [ -n "$SHOWOPTS" ]; then
1399 if [ -n "$CHARMED" ]; then
1400 if [ -n "$UNINSTALL" ]; then
1401 /usr
/share
/osm-devops
/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1403 /usr
/share
/osm-devops
/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1406 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1408 echo "1. Get the NBI IP with the following command:"
1410 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1412 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1414 echo "export OSM_HOSTNAME=<NBI-IP>"
1416 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1418 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1425 # if develop, we force master
1426 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1428 need_packages
="git wget curl tar"
1429 echo -e "Checking required packages: $need_packages"
1430 dpkg
-l $need_packages &>/dev
/null \
1431 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1432 || sudo apt-get update \
1433 || FATAL
"failed to run apt-get update"
1434 dpkg
-l $need_packages &>/dev
/null \
1435 ||
! echo -e "Installing $need_packages requires root privileges." \
1436 || sudo apt-get
install -y $need_packages \
1437 || FATAL
"failed to install $need_packages"
1438 sudo snap
install jq
1439 if [ -z "$OSM_DEVOPS" ]; then
1440 if [ -n "$TEST_INSTALLER" ]; then
1441 echo -e "\nUsing local devops repo for OSM installation"
1442 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1444 echo -e "\nCreating temporary dir for OSM installation"
1445 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1446 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1448 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1450 if [ -z "$COMMIT_ID" ]; then
1451 echo -e "\nGuessing the current stable release"
1452 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1453 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1455 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1456 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1458 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1460 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1464 .
$OSM_DEVOPS/common
/all_funcs
1466 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1467 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1468 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1469 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1470 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1471 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1473 #Installation starts here
1474 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README.txt
&> /dev
/null
1477 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1478 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1479 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1480 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1483 echo -e "Checking required packages: lxd"
1484 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1485 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1487 # use local devops for containers
1488 export OSM_USE_LOCAL_DEVOPS
=true
1492 #Install vim-emu (optional)
1493 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1495 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null