2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password
{
83 password_file
="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name
=$1
85 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller
=$controller_name '{
90 indent = length($1)/2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
102 function generate_secret
() {
103 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
106 function remove_volumes
() {
107 if [ -n "$KUBERNETES" ]; then
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
113 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume
in $volumes; do
115 sg docker
-c "docker volume rm ${stack}_${volume}"
120 function remove_network
() {
122 sg docker
-c "docker network rm net${stack}"
125 function remove_iptables
() {
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
140 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
141 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
146 function remove_stack
() {
148 if sg docker
-c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
152 while [ ${COUNTER} -lt 30 ]; do
153 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
158 let COUNTER
=COUNTER
+1
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
164 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
170 #removes osm deployments and services
171 function remove_k8s_namespace
() {
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm
() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm
reset --force
179 kubectl delete
--namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo
rm /usr
/local
/bin
/helm
186 function remove_crontab_job
() {
187 crontab
-l |
grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab
-
191 function uninstall_osmclient
() {
192 sudo apt-get remove
--purge -y python-osmclient
193 sudo apt-get remove
--purge -y python3-osmclient
196 #Uninstall lightweight OSM: remove dockers
197 function uninstall_lightweight
() {
198 if [ -n "$INSTALL_ONLY" ]; then
199 if [ -n "$INSTALL_ELK" ]; then
200 echo -e "\nUninstalling OSM ELK stack"
202 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
205 echo -e "\nUninstalling OSM"
206 if [ -n "$KUBERNETES" ]; then
207 if [ -n "$INSTALL_K8S_MONITOR" ]; then
208 # uninstall OSM MONITORING
209 uninstall_k8s_monitoring
211 remove_k8s_namespace
$OSM_STACK_NAME
213 remove_stack
$OSM_STACK_NAME
216 echo "Now osm docker images and volumes will be deleted"
217 newgrp docker
<< EONG
218 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
228 if [ -n "$NGUI" ]; then
229 sg docker
-c "docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
231 sg docker
-c "docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
234 if [ -n "$KUBERNETES" ]; then
235 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
236 remove_volumes
$OSM_NAMESPACE_VOL
238 remove_volumes
$OSM_STACK_NAME
239 remove_network
$OSM_STACK_NAME
241 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
242 echo "Removing $OSM_DOCKER_WORK_DIR"
243 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
244 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
247 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
248 echo "Some docker images will be kept in case they are used by other docker stacks"
249 echo "To remove them, just run 'docker image prune' in a terminal"
253 #Safe unattended install of iptables-persistent
254 function check_install_iptables_persistent
(){
255 echo -e "\nChecking required packages: iptables-persistent"
256 if ! dpkg
-l iptables-persistent
&>/dev
/null
; then
257 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
258 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
259 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
260 sudo apt-get
-yq install iptables-persistent
264 #Configure NAT rules, based on the current IP addresses of containers
266 check_install_iptables_persistent
268 echo -e "\nConfiguring NAT rules"
269 echo -e " Required root privileges"
270 sudo
$OSM_DEVOPS/installers
/nat_osm
274 echo "FATAL error: Cannot install OSM due to \"$1\""
278 function update_juju_images
(){
279 crontab
-l |
grep update-juju-lxc-images ||
(crontab
-l 2>/dev
/null
; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab
-
280 ${OSM_DEVOPS}/installers
/update-juju-lxc-images
--xenial --bionic
283 function install_lxd
() {
284 # Apply sysctl production values for optimal performance
285 sudo
cp ${OSM_DEVOPS}/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
289 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
290 sudo snap
install lxd
291 sudo apt-get
install zfsutils-linux
-y
294 sudo usermod
-a -G lxd
`whoami`
295 cat ${OSM_DEVOPS}/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
296 sg lxd
-c "lxd waitready"
297 DEFAULT_INTERFACE
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
298 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
299 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
300 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
301 sg lxd
-c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
302 #sudo systemctl stop lxd-bridge
303 #sudo systemctl --system daemon-reload
304 #sudo systemctl enable lxd-bridge
305 #sudo systemctl start lxd-bridge
309 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
310 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
311 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
312 read -e -p "$1" USER_CONFIRMATION
314 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
315 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
316 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
317 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
318 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
322 function install_osmclient
(){
323 CLIENT_RELEASE
=${RELEASE#"-R "}
324 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
325 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
326 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
327 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
328 curl
$key_location | sudo apt-key add
-
329 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
331 sudo apt-get
install -y python3-pip
332 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
333 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
334 sudo apt-get
install -y python3-osm-im python3-osmclient
335 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
336 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
337 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
338 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
339 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
340 echo -e "\nOSM client installed"
341 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
342 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
343 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
344 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
346 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
347 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
348 echo " export OSM_HOSTNAME=<OSM_host>"
353 function install_prometheus_nodeexporter
(){
354 if (systemctl
-q is-active node_exporter
)
356 echo "Node Exporter is already running."
358 echo "Node Exporter is not active, installing..."
359 if getent passwd node_exporter
> /dev
/null
2>&1; then
360 echo "node_exporter user exists"
362 echo "Creating user node_exporter"
363 sudo useradd
--no-create-home --shell /bin
/false node_exporter
365 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
366 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
367 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
368 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
369 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
370 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus_exporters
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
371 sudo systemctl daemon-reload
372 sudo systemctl restart node_exporter
373 sudo systemctl
enable node_exporter
374 echo "Node Exporter has been activated in this host."
379 function uninstall_prometheus_nodeexporter
(){
380 sudo systemctl stop node_exporter
381 sudo systemctl disable node_exporter
382 sudo
rm /etc
/systemd
/system
/node_exporter.service
383 sudo systemctl daemon-reload
384 sudo userdel node_exporter
385 sudo
rm /usr
/local
/bin
/node_exporter
389 function install_docker_ce
() {
390 # installs and configures Docker CE
391 echo "Installing Docker CE ..."
392 sudo apt-get
-qq update
393 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
394 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
395 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
396 sudo apt-get
-qq update
397 sudo apt-get
install -y docker-ce
398 echo "Adding user to group 'docker'"
399 sudo groupadd
-f docker
400 sudo usermod
-aG docker
$USER
402 sudo service docker restart
403 echo "... restarted Docker service"
404 sg docker
-c "docker version" || FATAL
"Docker installation failed"
405 echo "... Docker CE installation done"
409 function install_docker_compose
() {
410 # installs and configures docker-compose
411 echo "Installing Docker Compose ..."
412 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
413 sudo
chmod +x
/usr
/local
/bin
/docker-compose
414 echo "... Docker Compose installation done"
417 function install_juju
() {
418 echo "Installing juju"
419 sudo snap
install juju
--classic --channel=2.8/stable
420 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
422 echo "Finished installation of juju"
426 function juju_createcontroller
() {
427 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
428 # Not found created, create the controller
429 sudo usermod
-a -G lxd
${USER}
430 sg lxd
-c "juju bootstrap --bootstrap-series=xenial --agent-version=2.8.1 $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
432 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
433 juju controller-config features
=[k8s-operators
]
436 function juju_addk8s
() {
437 cat $HOME/.kube
/config | juju add-k8s
$OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
440 function juju_createproxy
() {
441 check_install_iptables_persistent
443 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
444 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
445 sudo netfilter-persistent save
449 function generate_docker_images
() {
450 echo "Pulling and generating docker images"
451 _build_from
=$COMMIT_ID
452 [ -z "$_build_from" ] && _build_from
="master"
454 echo "OSM Docker images generated from $_build_from"
456 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
457 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
458 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
459 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
461 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
462 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
463 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
466 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
467 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
470 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
471 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
474 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
475 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
478 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
479 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
482 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
483 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
486 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
487 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
490 if [ -n "$PULL_IMAGES" ]; then
491 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
492 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
493 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
494 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
495 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
498 if [ -n "$PULL_IMAGES" ]; then
499 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
500 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
501 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
502 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
503 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
506 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
507 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
508 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
509 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
510 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
511 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
514 if [ -n "$PULL_IMAGES" ]; then
515 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
516 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
517 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
518 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
519 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
520 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
521 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
524 if [ -n "$PULL_IMAGES" ]; then
525 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
526 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
527 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
528 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
529 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
532 if [ -n "$PULL_IMAGES" ]; then
533 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
534 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
535 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
536 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
537 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
540 if [ -n "$NGUI" ]; then
541 if [ -n "$PULL_IMAGES" ]; then
542 sg docker
-c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull ng-ui docker image"
543 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NG-UI
; then
544 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NG-UI
545 git
-C ${LWTEMPDIR}/NG-UI checkout
${COMMIT_ID}
546 sg docker
-c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL
"cannot build NG-UI docker image"
549 if [ -n "$PULL_IMAGES" ]; then
550 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
551 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
552 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
553 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
554 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
558 if [ -n "$PULL_IMAGES" ]; then
559 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
560 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
561 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
564 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
565 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
568 echo "Finished generation of docker images"
571 function cmp_overwrite
() {
574 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
575 if [ -f "${file2}" ]; then
576 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
578 cp -b ${file1} ${file2}
583 function generate_docker_compose_files() {
584 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
585 if [ -n "$NGUI" ]; then
587 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
590 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
592 if [ -n "$INSTALL_PLA" ]; then
593 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
597 function generate_k8s_manifest_files() {
598 #Kubernetes resources
599 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
600 if [ -n "$NGUI" ]; then
601 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
603 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
607 function generate_prometheus_grafana_files() {
608 [ -n "$KUBERNETES" ] && return
610 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
611 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
614 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
615 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
616 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
617 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
618 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
620 # Prometheus Exporters files
621 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
622 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
625 function generate_docker_env_files() {
626 echo "Doing a backup of existing env files
"
627 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
628 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
629 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
630 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
631 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
632 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
633 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
634 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
635 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
637 echo "Generating docker env files
"
639 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
640 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
643 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
644 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
646 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
649 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
650 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
652 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
655 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
656 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
658 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
661 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
662 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
664 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
667 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
668 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
670 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
673 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
674 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
677 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
678 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
681 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
682 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
684 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
687 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
688 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
690 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
694 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
695 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
696 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
698 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
699 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
703 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
704 SERVICE_PASSWORD
=$
(generate_secret
)
705 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
706 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
708 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
709 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
710 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
711 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
715 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
716 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
717 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
721 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
722 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
723 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
726 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
727 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
729 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
732 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
733 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
735 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
738 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
739 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
741 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
744 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
745 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
747 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
752 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
753 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
757 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
758 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
761 echo "Finished generation of docker env files"
764 function generate_osmclient_script
() {
765 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
766 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
767 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
770 #installs kubernetes packages
771 function install_kube
() {
772 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
773 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
774 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
776 echo "Installing Kubernetes Packages ..."
777 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
780 #initializes kubernetes control plane
781 function init_kubeadm
() {
783 sudo kubeadm init
--config $1
787 function kube_config_dir
() {
788 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
790 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
791 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
794 function install_k8s_storageclass
() {
795 kubectl apply
-f https
://openebs.github.io
/charts
/openebs-operator-1.6
.0.yaml
796 local storageclass_timeout
=300
798 echo "Waiting for storageclass"
799 while (( counter
< storageclass_timeout
))
801 kubectl get storageclass openebs-hostpath
&> /dev
/null
803 if [ $?
-eq 0 ] ; then
804 echo "Storageclass available"
807 counter
=$
((counter
+ 15))
811 kubectl
patch storageclass openebs-hostpath
-p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
814 #deploys flannel as daemonsets
815 function deploy_cni_provider
() {
816 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
817 trap 'rm -rf "${CNI_DIR}"' EXIT
818 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
819 kubectl apply
-f $CNI_DIR
820 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
823 #creates secrets from env files which will be used by containers
824 function kube_secrets
(){
825 kubectl create ns
$OSM_STACK_NAME
826 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
827 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
828 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
829 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
830 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
831 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
832 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
833 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
836 #taints K8s master node
837 function taint_master_node
() {
838 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
839 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
843 #deploys osm pods and services
844 function deploy_osm_services
() {
845 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
848 function deploy_osm_pla_service
() {
849 # corresponding to namespace_vol
850 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
851 # corresponding to deploy_osm_services
852 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
855 #Install helm and tiller
856 function install_helm
() {
857 helm
> /dev
/null
2>&1
858 if [ $?
!= 0 ] ; then
859 # Helm is not installed. Install helm
860 echo "Helm is not installed, installing ..."
861 curl https
://get.helm.sh
/helm-v2.15
.2-linux-amd64.
tar.gz
--output helm-v2.15
.2.
tar.gz
862 tar -zxvf helm-v2.15
.2.
tar.gz
863 sudo
mv linux-amd64
/helm
/usr
/local
/bin
/helm
865 rm helm-v2.15
.2.
tar.gz
868 # Checking if tiller has being configured
869 kubectl
--namespace kube-system get serviceaccount tiller
> /dev
/null
2>&1
870 if [ $?
== 1 ] ; then
871 # tiller account for kubernetes
872 kubectl
--namespace kube-system create serviceaccount tiller
873 kubectl create clusterrolebinding tiller-cluster-rule
--clusterrole=cluster-admin
--serviceaccount=kube-system
:tiller
874 # HELM initialization
875 helm init
--service-account tiller
877 # Wait for Tiller to be up and running. If timeout expires, continue installing
881 while (( counter
< tiller_timeout
))
883 tiller_status
=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
884 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
885 counter
=$
((counter
+ 5))
888 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
892 function parse_yaml
() {
893 osm_services
="nbi lcm ro pol mon light-ui ng-ui keystone"
895 for osm
in $osm_services; do
896 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
898 $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/$DOCKER_USER\/\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
901 function namespace_vol
() {
902 osm_services
="nbi lcm ro pol mon kafka mongo mysql prometheus"
903 for osm
in $osm_services; do
904 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
908 function init_docker_swarm
() {
909 if [ "${DEFAULT_MTU}" != "1500" ]; then
910 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
911 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
912 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
914 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
918 function create_docker_network
() {
919 echo "creating network"
920 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
921 echo "creating network DONE"
924 function deploy_lightweight
() {
926 echo "Deploying lightweight build"
929 OSM_KEYSTONE_PORT
=5000
933 OSM_PROM_CADVISOR_PORT
=8080
934 OSM_PROM_HOSTPORT
=9091
935 OSM_GRAFANA_PORT
=3000
936 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
937 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
939 if [ -n "$NO_HOST_PORTS" ]; then
940 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
941 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
942 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
943 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
944 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
945 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
946 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
947 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
948 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
949 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
951 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
952 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
953 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
954 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
955 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
956 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
957 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
958 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
959 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
960 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
962 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
963 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
964 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
965 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
966 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
967 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
968 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
969 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
970 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
972 pushd $OSM_DOCKER_WORK_DIR
973 if [ -n "$INSTALL_PLA" ]; then
975 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
977 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
981 echo "Finished deployment of lightweight build"
984 function deploy_elk
() {
985 echo "Pulling docker images for ELK"
986 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
987 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
988 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
989 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
990 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
991 echo "Finished pulling elk docker images"
992 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
993 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
995 echo "Deploying ELK stack"
996 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
997 echo "Waiting for ELK stack to be up and running"
1002 while [ $time -le $timelength ]; do
1003 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
1010 if [ $elk_is_up -eq 0 ]; then
1011 echo "ELK is up and running. Trying to create index pattern..."
1012 #Create index pattern
1013 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1014 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1015 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
1016 #Make it the default index
1017 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1018 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1019 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
1021 echo "Cannot connect to Kibana to create index pattern."
1022 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1023 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1024 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1025 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1026 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1027 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1028 -d"{\"value\":\"filebeat-*\"}"'
1030 echo "Finished deployment of ELK stack"
1034 function add_local_k8scluster
() {
1035 /usr
/bin
/osm
--all-projects vim-create \
1036 --name _system-osm-vim \
1037 --account_type dummy \
1038 --auth_url http
://dummy \
1039 --user osm
--password osm
--tenant osm \
1040 --description "dummy" \
1041 --config '{management_network_name: mgmt}'
1042 /usr
/bin
/osm
--all-projects k8scluster-add \
1043 --creds ${HOME}/.kube
/config \
1044 --vim _system-osm-vim \
1045 --k8s-nets '{"net1": null}' \
1047 --description "OSM Internal Cluster" \
1051 function install_lightweight
() {
1053 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1056 if [ -n "$KUBERNETES" ]; then
1057 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1058 1. Install and configure LXD
1060 3. Install docker CE
1061 4. Disable swap space
1062 5. Install and initialize Kubernetes
1063 as pre-requirements.
1064 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1067 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1071 echo "Installing lightweight build of OSM"
1072 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1073 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1074 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
1075 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
1076 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1077 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1078 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1079 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1081 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1082 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1083 need_packages_lw
="snapd"
1084 echo -e "Checking required packages: $need_packages_lw"
1085 dpkg
-l $need_packages_lw &>/dev
/null \
1086 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1087 || sudo apt-get update \
1088 || FATAL
"failed to run apt-get update"
1089 dpkg
-l $need_packages_lw &>/dev
/null \
1090 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1091 || sudo apt-get
install -y $need_packages_lw \
1092 || FATAL
"failed to install $need_packages_lw"
1098 [ -z "$INSTALL_NOJUJU" ] && install_juju
1101 if [ -z "$OSM_VCA_HOST" ]; then
1102 if [ -z "$CONTROLLER_NAME" ]; then
1103 if [ -n "$LXD_CLOUD_FILE" ]; then
1104 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1105 OSM_VCA_CLOUDNAME
="lxd-cloud"
1106 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1107 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
1109 juju_createcontroller
1111 OSM_VCA_CLOUDNAME
="lxd-cloud"
1112 if [ -n "$LXD_CLOUD_FILE" ]; then
1113 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1114 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1115 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
1118 cat << EOF > ~/.osm/lxd-cloud.yaml
1122 auth-types: [certificate]
1123 endpoint: "https://$DEFAULT_IP:8443"
1125 ssl-hostname-verification: false
1127 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1128 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1129 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
1130 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
1131 cat << EOF > ~/.osm/lxd-credentials.yaml
1135 auth-type: certificate
1143 lxc config trust add
local: ~
/.osm
/client.crt
1144 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
1145 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
1148 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1149 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1150 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1152 track juju_controller
1154 if [ -z "$OSM_VCA_SECRET" ]; then
1155 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1156 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1157 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1159 if [ -z "$OSM_VCA_PUBKEY" ]; then
1160 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1161 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1163 if [ -z "$OSM_VCA_CACERT" ]; then
1164 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1165 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1166 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1168 if [ -z "$OSM_VCA_APIPROXY" ]; then
1169 OSM_VCA_APIPROXY
=$DEFAULT_IP
1170 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1175 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1176 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1177 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1180 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1183 echo "Creating folders for installation"
1184 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1185 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
1186 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1188 #Installs Kubernetes and deploys osm services
1189 if [ -n "$KUBERNETES" ]; then
1192 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1196 #install_docker_compose
1197 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1201 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1204 if [ -n "$KUBERNETES" ]; then
1205 generate_k8s_manifest_files
1207 generate_docker_compose_files
1209 track manifest_files
1210 generate_prometheus_grafana_files
1211 generate_docker_env_files
1214 if [ -n "$KUBERNETES" ]; then
1215 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1216 # uninstall OSM MONITORING
1217 uninstall_k8s_monitoring
1218 track uninstall_k8s_monitoring
1220 #remove old namespace
1221 remove_k8s_namespace
$OSM_STACK_NAME
1224 [ ! $OSM_DOCKER_TAG == "8" ] && parse_yaml
$OSM_DOCKER_TAG
1228 if [ -n "$INSTALL_PLA"]; then
1229 # optional PLA install
1230 deploy_osm_pla_service
1231 track deploy_osm_pla
1233 track deploy_osm_services_k8s
1234 install_k8s_storageclass
1235 track k8s_storageclass
1240 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1241 # install OSM MONITORING
1242 install_k8s_monitoring
1243 track install_k8s_monitoring
1247 remove_stack
$OSM_STACK_NAME
1248 create_docker_network
1250 generate_osmclient_script
1252 install_prometheus_nodeexporter
1254 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1255 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1258 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1261 echo -e "Checking OSM health state..."
1262 if [ -n "$KUBERNETES" ]; then
1263 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
1264 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1265 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1268 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} || \
1269 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1270 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1273 track after_healthcheck
1275 [ -n "$KUBERNETES" ] && add_local_k8scluster
1276 track add_local_k8scluster
1278 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null
1283 function install_to_openstack
() {
1285 if [ -z "$2" ]; then
1286 FATAL
"OpenStack installer requires a valid external network name"
1289 # Install Pip for Python3
1290 $WORKDIR_SUDO apt
install -y python3-pip
1291 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U pip
1293 # Install Ansible, OpenStack client and SDK
1294 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U python-openstackclient
"openstacksdk<1" "ansible>=2.9,<3"
1296 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1298 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
1300 # Execute the Ansible playbook based on openrc or clouds.yaml
1301 if [ -e "$1" ]; then
1303 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1304 -e setup_volume
=$3 $OSM_DEVOPS/installers
/openstack
/site.yml
1306 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1307 -e setup_volume
=$3 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
1313 function install_vimemu
() {
1314 echo "\nInstalling vim-emu"
1315 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1316 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1317 # install prerequisites (OVS is a must for the emulator to work)
1318 sudo apt-get
install openvswitch-switch
1319 # clone vim-emu repository (attention: branch is currently master only)
1320 echo "Cloning vim-emu repository ..."
1321 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1322 # build vim-emu docker
1323 echo "Building vim-emu Docker container..."
1325 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1326 # start vim-emu container as daemon
1327 echo "Starting vim-emu Docker container 'vim-emu' ..."
1328 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1329 # in lightweight mode, the emulator needs to be attached to netOSM
1330 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1332 # classic build mode
1333 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1335 echo "Waiting for 'vim-emu' container to start ..."
1337 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1338 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1339 # print vim-emu connection info
1340 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1341 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1342 echo -e "To add the emulated VIM to OSM you should do:"
1343 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1346 function install_k8s_monitoring
() {
1347 # install OSM monitoring
1348 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1349 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1352 function uninstall_k8s_monitoring
() {
1353 # uninstall OSM monitoring
1354 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1357 function dump_vars
(){
1358 echo "DEVELOP=$DEVELOP"
1359 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1360 echo "UNINSTALL=$UNINSTALL"
1361 echo "UPDATE=$UPDATE"
1362 echo "RECONFIGURE=$RECONFIGURE"
1363 echo "TEST_INSTALLER=$TEST_INSTALLER"
1364 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1365 echo "INSTALL_PLA=$INSTALL_PLA"
1366 echo "INSTALL_LXD=$INSTALL_LXD"
1367 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1368 echo "INSTALL_ONLY=$INSTALL_ONLY"
1369 echo "INSTALL_ELK=$INSTALL_ELK"
1370 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1371 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1372 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1373 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1374 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1375 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1376 echo "TO_REBUILD=$TO_REBUILD"
1377 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1378 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1379 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1380 echo "RELEASE=$RELEASE"
1381 echo "REPOSITORY=$REPOSITORY"
1382 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1383 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1384 echo "OSM_DEVOPS=$OSM_DEVOPS"
1385 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1386 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1387 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1388 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1389 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1390 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1391 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1392 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1393 echo "DOCKER_USER=$DOCKER_USER"
1394 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1395 echo "PULL_IMAGES=$PULL_IMAGES"
1396 echo "KUBERNETES=$KUBERNETES"
1398 echo "SHOWOPTS=$SHOWOPTS"
1399 echo "Install from specific refspec (-b): $COMMIT_ID"
1404 duration
=$
((ctime
- SESSION_ID
))
1405 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1406 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1408 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1409 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1410 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1411 event_name
="${event_name}_$1"
1412 url
="${url}&event=${event_name}&ce_duration=${duration}"
1413 wget
-q -O /dev
/null
$url
1425 INSTALL_FROM_SOURCE
=""
1426 RELEASE
="ReleaseEIGHT"
1430 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1431 LXD_REPOSITORY_PATH
=""
1432 INSTALL_LIGHTWEIGHT
="y"
1433 INSTALL_TO_OPENSTACK
=""
1434 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
1435 OPENSTACK_PUBLIC_NET_NAME
=""
1436 OPENSTACK_ATTACH_VOLUME
="false"
1445 INSTALL_K8S_MONITOR
=""
1446 INSTALL_NOHOSTCLIENT
=""
1447 SESSION_ID
=`date +%s`
1452 OSM_VCA_CLOUDNAME
="localhost"
1453 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
1457 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1458 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1460 OSM_WORK_DIR
="/etc/osm"
1461 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1462 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1463 OSM_HOST_VOL
="/var/lib/osm"
1464 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1465 OSM_DOCKER_TAG
=latest
1466 DOCKER_USER
=opensourcemano
1468 KAFKA_TAG
=2.11-1.0
.2
1469 PROMETHEUS_TAG
=v2.4
.3
1471 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1472 PROMETHEUS_CADVISOR_TAG
=latest
1474 OSM_DATABASE_COMMONKEY
=
1475 ELASTIC_VERSION
=6.4.2
1476 ELASTIC_CURATOR_VERSION
=5.5.4
1477 POD_NETWORK_CIDR
=10.244.0.0/16
1478 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1479 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1481 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o
; do
1488 REPOSITORY
="${OPTARG}"
1489 REPO_ARGS
+=(-r "$REPOSITORY")
1492 [ "${OPTARG}" == "swarm" ] && continue
1493 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1494 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1498 [ "${OPTARG}" == "lwui" ] && continue
1499 [ "${OPTARG}" == "ngui" ] && NGUI
="y" && continue
1500 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1504 REPOSITORY_KEY
="${OPTARG}"
1505 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1508 REPOSITORY_BASE
="${OPTARG}"
1509 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1513 REPO_ARGS
+=(-R "$RELEASE")
1516 OSM_DEVOPS
="${OPTARG}"
1520 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1521 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1522 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1525 INSTALL_TO_OPENSTACK
="y"
1526 if [ -n "${OPTARG}" ]; then
1527 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1529 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1534 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1537 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1538 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1539 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1540 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1541 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1542 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1543 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1544 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1545 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1546 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1547 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1548 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1549 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1550 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1553 OSM_VCA_HOST
="${OPTARG}"
1556 OSM_VCA_SECRET
="${OPTARG}"
1559 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1562 # when specifying workdir, do not use sudo for access
1564 OSM_WORK_DIR
="${OPTARG}"
1567 OSM_DOCKER_TAG
="${OPTARG}"
1568 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1571 DOCKER_USER
="${OPTARG}"
1574 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1577 OSM_VCA_APIPROXY
="${OPTARG}"
1580 LXD_CLOUD_FILE
="${OPTARG}"
1583 LXD_CRED_FILE
="${OPTARG}"
1586 CONTROLLER_NAME
="${OPTARG}"
1589 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1590 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1591 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1592 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1593 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1594 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1595 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1596 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1597 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1598 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1599 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1600 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1601 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1602 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1603 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1604 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1605 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1606 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1607 [ "${OPTARG}" == "pullimages" ] && continue
1608 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1609 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1610 [ "${OPTARG}" == "bundle" ] && continue
1611 [ "${OPTARG}" == "k8s" ] && continue
1612 [ "${OPTARG}" == "lxd" ] && continue
1613 [ "${OPTARG}" == "lxd-cred" ] && continue
1614 [ "${OPTARG}" == "microstack" ] && continue
1615 [ "${OPTARG}" == "ha" ] && continue
1616 [ "${OPTARG}" == "tag" ] && continue
1617 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1618 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1619 echo -e "Invalid option: '--$OPTARG'\n" >&2
1623 echo "Option -$OPTARG requires an argument" >&2
1627 echo -e "Invalid option: '-$OPTARG'\n" >&2
1642 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1643 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1645 if [ -n "$SHOWOPTS" ]; then
1650 if [ -n "$CHARMED" ]; then
1651 if [ -n "$UNINSTALL" ]; then
1652 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1654 ${OSM_DEVOPS}/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1660 # if develop, we force master
1661 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1663 need_packages
="git wget curl tar"
1665 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1667 echo -e "Checking required packages: $need_packages"
1668 dpkg
-l $need_packages &>/dev
/null \
1669 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1670 || sudo apt-get update \
1671 || FATAL
"failed to run apt-get update"
1672 dpkg
-l $need_packages &>/dev
/null \
1673 ||
! echo -e "Installing $need_packages requires root privileges." \
1674 || sudo apt-get
install -y $need_packages \
1675 || FATAL
"failed to install $need_packages"
1676 sudo snap
install jq
1677 if [ -z "$OSM_DEVOPS" ]; then
1678 if [ -n "$TEST_INSTALLER" ]; then
1679 echo -e "\nUsing local devops repo for OSM installation"
1680 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1682 echo -e "\nCreating temporary dir for OSM installation"
1683 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1684 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1686 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1688 if [ -z "$COMMIT_ID" ]; then
1689 echo -e "\nGuessing the current stable release"
1690 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1691 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1693 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1694 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1696 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1698 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1702 .
$OSM_DEVOPS/common
/all_funcs
1704 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1705 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1706 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1707 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1708 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1709 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1710 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1711 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1713 #Installation starts here
1714 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README.txt
&> /dev
/null
1717 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1718 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1719 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1720 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1723 echo -e "Checking required packages: lxd"
1724 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1725 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1727 # use local devops for containers
1728 export OSM_USE_LOCAL_DEVOPS
=true
1732 #Install vim-emu (optional)
1733 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1735 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null