2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password
{
83 password_file
="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name
=$1
85 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller
=$controller_name '{
90 indent = length($1)/2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
102 function generate_secret
() {
103 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
106 function remove_volumes
() {
107 if [ -n "$KUBERNETES" ]; then
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
113 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume
in $volumes; do
115 sg docker
-c "docker volume rm ${stack}_${volume}"
120 function remove_network
() {
122 sg docker
-c "docker network rm net${stack}"
125 function remove_iptables
() {
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
140 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
141 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
146 function remove_stack
() {
148 if sg docker
-c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
152 while [ ${COUNTER} -lt 30 ]; do
153 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
158 let COUNTER
=COUNTER
+1
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
164 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
170 #removes osm deployments and services
171 function remove_k8s_namespace
() {
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm
() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm
reset --force
179 kubectl delete
--namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo
rm /usr
/local
/bin
/helm
186 function remove_crontab_job
() {
187 crontab
-l |
grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab
-
191 function uninstall_osmclient
() {
192 sudo apt-get remove
--purge -y python-osmclient
193 sudo apt-get remove
--purge -y python3-osmclient
196 #Uninstall lightweight OSM: remove dockers
197 function uninstall_lightweight
() {
198 if [ -n "$INSTALL_ONLY" ]; then
199 if [ -n "$INSTALL_ELK" ]; then
200 echo -e "\nUninstalling OSM ELK stack"
202 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
205 echo -e "\nUninstalling OSM"
206 if [ -n "$KUBERNETES" ]; then
207 if [ -n "$INSTALL_K8S_MONITOR" ]; then
208 # uninstall OSM MONITORING
209 uninstall_k8s_monitoring
211 remove_k8s_namespace
$OSM_STACK_NAME
214 remove_stack
$OSM_STACK_NAME
217 echo "Now osm docker images and volumes will be deleted"
218 newgrp docker
<< EONG
219 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
226 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
227 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
230 if [ -n "$NGUI" ]; then
231 newgrp docker
<< EONG
232 docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}
235 newgrp docker
<< EONG
236 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
240 if [ -n "$KUBERNETES" ]; then
241 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
242 remove_volumes
$OSM_NAMESPACE_VOL
244 remove_volumes
$OSM_STACK_NAME
245 remove_network
$OSM_STACK_NAME
247 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
248 echo "Removing $OSM_DOCKER_WORK_DIR"
249 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
250 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
254 echo "Some docker images will be kept in case they are used by other docker stacks"
255 echo "To remove them, just run 'docker image prune' in a terminal"
259 #Safe unattended install of iptables-persistent
260 function check_install_iptables_persistent
(){
261 echo -e "\nChecking required packages: iptables-persistent"
262 if dpkg
-l iptables-persistent
&>/dev
/null
; then
263 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
264 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
265 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
266 sudo apt-get
-yq install iptables-persistent
270 #Configure NAT rules, based on the current IP addresses of containers
272 check_install_iptables_persistent
274 echo -e "\nConfiguring NAT rules"
275 echo -e " Required root privileges"
276 sudo
$OSM_DEVOPS/installers
/nat_osm
280 echo "FATAL error: Cannot install OSM due to \"$1\""
284 function update_juju_images
(){
285 crontab
-l |
grep update-juju-lxc-images ||
(crontab
-l 2>/dev
/null
; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab
-
286 ${OSM_DEVOPS}/installers
/update-juju-lxc-images
--xenial --bionic
289 function install_lxd
() {
290 # Apply sysctl production values for optimal performance
291 sudo
cp ${OSM_DEVOPS}/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
295 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
296 sudo snap
install lxd
297 sudo apt-get
install zfsutils-linux
-y
300 sudo usermod
-a -G lxd
`whoami`
301 cat ${OSM_DEVOPS}/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
302 sg lxd
-c "lxd waitready"
303 DEFAULT_INTERFACE
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
304 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
305 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
306 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
307 sg lxd
-c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
308 #sudo systemctl stop lxd-bridge
309 #sudo systemctl --system daemon-reload
310 #sudo systemctl enable lxd-bridge
311 #sudo systemctl start lxd-bridge
315 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
316 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
317 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
318 read -e -p "$1" USER_CONFIRMATION
320 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
321 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
322 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
323 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
324 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
328 function install_osmclient
(){
329 CLIENT_RELEASE
=${RELEASE#"-R "}
330 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
331 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
332 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
333 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
334 curl
$key_location | sudo apt-key add
-
335 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
337 sudo apt-get
install -y python3-pip
338 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
339 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
340 sudo apt-get
install -y python3-osm-im python3-osmclient
341 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
342 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
343 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
344 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
345 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
346 echo -e "\nOSM client installed"
347 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
348 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
349 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
350 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
352 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
353 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
354 echo " export OSM_HOSTNAME=<OSM_host>"
359 function install_prometheus_nodeexporter
(){
360 if (systemctl
-q is-active node_exporter
)
362 echo "Node Exporter is already running."
364 echo "Node Exporter is not active, installing..."
365 if getent passwd node_exporter
> /dev
/null
2>&1; then
366 echo "node_exporter user exists"
368 echo "Creating user node_exporter"
369 sudo useradd
--no-create-home --shell /bin
/false node_exporter
371 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
372 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
373 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
374 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
375 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
376 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus_exporters
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
377 sudo systemctl daemon-reload
378 sudo systemctl restart node_exporter
379 sudo systemctl
enable node_exporter
380 echo "Node Exporter has been activated in this host."
385 function uninstall_prometheus_nodeexporter
(){
386 sudo systemctl stop node_exporter
387 sudo systemctl disable node_exporter
388 sudo
rm /etc
/systemd
/system
/node_exporter.service
389 sudo systemctl daemon-reload
390 sudo userdel node_exporter
391 sudo
rm /usr
/local
/bin
/node_exporter
395 function install_docker_ce
() {
396 # installs and configures Docker CE
397 echo "Installing Docker CE ..."
398 sudo apt-get
-qq update
399 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
400 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
401 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
402 sudo apt-get
-qq update
403 sudo apt-get
install -y docker-ce
404 echo "Adding user to group 'docker'"
405 sudo groupadd
-f docker
406 sudo usermod
-aG docker
$USER
408 sudo service docker restart
409 echo "... restarted Docker service"
410 sg docker
-c "docker version" || FATAL
"Docker installation failed"
411 echo "... Docker CE installation done"
415 function install_docker_compose
() {
416 # installs and configures docker-compose
417 echo "Installing Docker Compose ..."
418 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
419 sudo
chmod +x
/usr
/local
/bin
/docker-compose
420 echo "... Docker Compose installation done"
423 function install_juju
() {
424 echo "Installing juju"
425 sudo snap
install juju
--classic --channel=2.7/stable
426 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
428 echo "Finished installation of juju"
432 function juju_createcontroller
() {
433 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
434 # Not found created, create the controller
435 sudo usermod
-a -G lxd
${USER}
436 sg lxd
-c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
438 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
441 function juju_createproxy
() {
442 check_install_iptables_persistent
444 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
445 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
446 sudo netfilter-persistent save
450 function generate_docker_images
() {
451 echo "Pulling and generating docker images"
452 _build_from
=$COMMIT_ID
453 [ -z "$_build_from" ] && _build_from
="master"
455 echo "OSM Docker images generated from $_build_from"
457 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
458 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
459 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
460 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
462 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
463 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
464 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
467 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
468 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
471 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
472 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
475 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
476 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
479 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
480 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
483 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
484 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
487 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
488 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
491 if [ -n "$PULL_IMAGES" ]; then
492 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
493 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
494 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
495 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
496 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
499 if [ -n "$PULL_IMAGES" ]; then
500 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
501 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
502 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
503 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
504 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
507 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
508 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
509 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
510 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
511 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
512 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
515 if [ -n "$PULL_IMAGES" ]; then
516 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
517 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
518 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
519 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
520 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
521 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
522 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
525 if [ -n "$PULL_IMAGES" ]; then
526 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
527 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
528 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
529 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
530 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
533 if [ -n "$PULL_IMAGES" ]; then
534 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
535 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
536 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
537 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
538 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
541 if [ -n "$NGUI" ]; then
542 if [ -n "$PULL_IMAGES" ]; then
543 sg docker
-c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull ng-ui docker image"
544 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NG-UI
; then
545 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NG-UI
546 git
-C ${LWTEMPDIR}/NG-UI checkout
${COMMIT_ID}
547 sg docker
-c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL
"cannot build NG-UI docker image"
550 if [ -n "$PULL_IMAGES" ]; then
551 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
552 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
553 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
554 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
555 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
559 if [ -n "$PULL_IMAGES" ]; then
560 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
561 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
562 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
565 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
566 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
569 echo "Finished generation of docker images"
572 function cmp_overwrite
() {
575 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
576 if [ -f "${file2}" ]; then
577 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
579 cp -b ${file1} ${file2}
584 function generate_docker_env_files() {
585 echo "Doing a backup of existing env files
"
586 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
587 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
588 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
589 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
590 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
591 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
592 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
593 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
594 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
596 echo "Generating docker env files
"
597 if [ -n "$KUBERNETES" ]; then
598 #Kubernetes resources
599 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
600 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
602 if [ -n "$NGUI" ]; then
604 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
607 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
609 if [ -n "$INSTALL_PLA" ]; then
610 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
614 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
615 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
618 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
619 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
620 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
621 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
622 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
624 # Prometheus Exporters files
625 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
626 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
630 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
631 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
634 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
635 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
637 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
640 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
641 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
643 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
646 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
647 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
649 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
652 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
653 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
655 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
658 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
659 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
661 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
664 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
665 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
668 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
669 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
672 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
673 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
675 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
679 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
680 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
681 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
683 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
684 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
688 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
689 SERVICE_PASSWORD
=$
(generate_secret
)
690 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
691 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
693 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
694 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
695 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
696 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
700 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
701 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
702 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
706 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
707 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
708 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
711 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
712 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
714 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
717 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
718 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
720 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
723 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
724 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
726 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
729 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
730 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
732 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
737 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
738 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
742 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
743 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
746 echo "Finished generation of docker env files"
749 function generate_osmclient_script
() {
750 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
751 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
752 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
755 #installs kubernetes packages
756 function install_kube
() {
757 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
758 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
759 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
761 echo "Installing Kubernetes Packages ..."
762 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
765 #initializes kubernetes control plane
766 function init_kubeadm
() {
768 sudo kubeadm init
--config $1
772 function kube_config_dir
() {
773 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
775 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
776 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
779 #deploys flannel as daemonsets
780 function deploy_cni_provider
() {
781 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
782 trap 'rm -rf "${CNI_DIR}"' EXIT
783 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
784 kubectl apply
-f $CNI_DIR
785 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
788 #creates secrets from env files which will be used by containers
789 function kube_secrets
(){
790 kubectl create ns
$OSM_STACK_NAME
791 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
792 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
793 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
794 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
795 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
796 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
797 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
798 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
801 #deploys osm pods and services
802 function deploy_osm_services
() {
803 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
804 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
806 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
809 function deploy_osm_pla_service
() {
810 # corresponding to parse_yaml
811 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
812 # corresponding to namespace_vol
813 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
814 # corresponding to deploy_osm_services
815 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
818 #Install helm and tiller
819 function install_helm
() {
820 helm
> /dev
/null
2>&1
821 if [ $?
!= 0 ] ; then
822 # Helm is not installed. Install helm
823 curl https
://get.helm.sh
/helm-v2.15
.2-linux-amd64.
tar.gz
--output helm-v2.15
.2.
tar.gz
824 tar -zxvf helm-v2.15
.2.
tar.gz
825 sudo
mv linux-amd64
/helm
/usr
/local
/bin
/helm
827 rm helm-v2.15
.2.
tar.gz
830 # Checking if tiller has being configured
831 kubectl
--namespace kube-system get serviceaccount tiller
> /dev
/null
2>&1
832 if [ $?
== 1 ] ; then
833 # tiller account for kubernetes
834 kubectl
--namespace kube-system create serviceaccount tiller
835 kubectl create clusterrolebinding tiller-cluster-rule
--clusterrole=cluster-admin
--serviceaccount=kube-system
:tiller
836 # HELM initialization
837 helm init
--service-account tiller
839 # Wait for Tiller to be up and running. If timeout expires, continue installing
840 tiller_timeout
=120; counter
=0
841 while (( counter
< tiller_timeout
))
843 tiller_status
=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
844 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
851 function parse_yaml
() {
852 osm_services
="nbi lcm ro pol mon light-ui ng-ui keystone"
854 for osm
in $osm_services; do
855 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
859 function namespace_vol
() {
860 osm_services
="nbi lcm ro pol mon kafka mongo mysql prometheus"
861 for osm
in $osm_services; do
862 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
866 function init_docker_swarm
() {
867 if [ "${DEFAULT_MTU}" != "1500" ]; then
868 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
869 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
870 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
872 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
876 function create_docker_network
() {
877 echo "creating network"
878 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
879 echo "creating network DONE"
882 function deploy_lightweight
() {
884 echo "Deploying lightweight build"
887 OSM_KEYSTONE_PORT
=5000
891 OSM_PROM_CADVISOR_PORT
=8080
892 OSM_PROM_HOSTPORT
=9091
893 OSM_GRAFANA_PORT
=3000
894 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
895 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
897 if [ -n "$NO_HOST_PORTS" ]; then
898 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
899 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
900 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
901 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
902 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
903 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
904 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
905 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
906 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
907 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
909 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
910 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
911 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
912 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
913 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
914 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
915 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
916 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
917 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
918 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
920 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
921 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
922 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
923 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
924 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
925 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
926 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
927 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
928 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
930 pushd $OSM_DOCKER_WORK_DIR
931 if [ -n "$INSTALL_PLA" ]; then
932 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
934 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
938 echo "Finished deployment of lightweight build"
941 function deploy_elk
() {
942 echo "Pulling docker images for ELK"
943 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
944 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
945 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
946 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
947 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
948 echo "Finished pulling elk docker images"
949 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
950 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
952 echo "Deploying ELK stack"
953 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
954 echo "Waiting for ELK stack to be up and running"
959 while [ $time -le $timelength ]; do
960 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
967 if [ $elk_is_up -eq 0 ]; then
968 echo "ELK is up and running. Trying to create index pattern..."
969 #Create index pattern
970 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
971 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
972 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
973 #Make it the default index
974 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
975 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
976 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
978 echo "Cannot connect to Kibana to create index pattern."
979 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
980 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
981 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
982 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
983 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
984 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
985 -d"{\"value\":\"filebeat-*\"}"'
987 echo "Finished deployment of ELK stack"
991 function add_local_k8scluster
() {
992 /usr
/bin
/osm
--all-projects vim-create \
993 --name _system-osm-vim \
994 --account_type dummy \
995 --auth_url http
://dummy \
996 --user osm
--password osm
--tenant osm \
997 --description "dummy" \
998 --config '{management_network_name: mgmt}'
999 /usr
/bin
/osm
--all-projects k8scluster-add \
1000 --creds ${HOME}/.kube
/config \
1001 --vim _system-osm-vim \
1002 --k8s-nets '{"net1": null}' \
1004 --description "OSM Internal Cluster" \
1008 function install_lightweight
() {
1009 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1010 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1011 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1012 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
1013 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1016 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1019 if [ -n "$KUBERNETES" ]; then
1020 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1021 1. Install and configure LXD
1023 3. Install docker CE
1024 4. Disable swap space
1025 5. Install and initialize Kubernetes
1026 as pre-requirements.
1027 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1030 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1034 echo "Installing lightweight build of OSM"
1035 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1036 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1037 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
1038 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
1039 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1040 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1041 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1042 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1044 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1045 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1046 need_packages_lw
="snapd"
1047 echo -e "Checking required packages: $need_packages_lw"
1048 dpkg
-l $need_packages_lw &>/dev
/null \
1049 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1050 || sudo apt-get update \
1051 || FATAL
"failed to run apt-get update"
1052 dpkg
-l $need_packages_lw &>/dev
/null \
1053 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1054 || sudo apt-get
install -y $need_packages_lw \
1055 || FATAL
"failed to install $need_packages_lw"
1061 [ -z "$INSTALL_NOJUJU" ] && install_juju
1064 if [ -z "$OSM_VCA_HOST" ]; then
1065 if [ -z "$CONTROLLER_NAME" ]; then
1066 if [ -n "$LXD_CLOUD_FILE" ]; then
1067 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1068 OSM_VCA_CLOUDNAME
="lxd-cloud"
1069 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1070 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
1072 juju_createcontroller
1074 OSM_VCA_CLOUDNAME
="lxd-cloud"
1075 if [ -n "$LXD_CLOUD_FILE" ]; then
1076 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1077 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1078 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
1081 cat << EOF > ~/.osm/lxd-cloud.yaml
1085 auth-types: [certificate]
1086 endpoint: "https://$DEFAULT_IP:8443"
1088 ssl-hostname-verification: false
1090 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1091 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1092 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
1093 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
1094 cat << EOF > ~/.osm/lxd-credentials.yaml
1098 auth-type: certificate
1106 lxc config trust add
local: ~
/.osm
/client.crt
1107 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
1108 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
1111 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1112 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1113 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1115 track juju_controller
1117 if [ -z "$OSM_VCA_SECRET" ]; then
1118 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1119 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1120 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1122 if [ -z "$OSM_VCA_PUBKEY" ]; then
1123 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1124 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1126 if [ -z "$OSM_VCA_CACERT" ]; then
1127 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1128 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1129 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1131 if [ -z "$OSM_VCA_APIPROXY" ]; then
1132 OSM_VCA_APIPROXY
=$DEFAULT_IP
1133 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1138 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1139 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1140 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1143 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1146 #Installs Kubernetes and deploys osm services
1147 if [ -n "$KUBERNETES" ]; then
1150 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1154 #install_docker_compose
1155 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1159 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1162 generate_docker_env_files
1164 if [ -n "$KUBERNETES" ]; then
1165 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1166 # uninstall OSM MONITORING
1167 uninstall_k8s_monitoring
1168 track uninstall_k8s_monitoring
1170 #remove old namespace
1171 remove_k8s_namespace
$OSM_STACK_NAME
1174 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
1177 if [ -n "$INSTALL_PLA"]; then
1178 # optional PLA install
1179 deploy_osm_pla_service
1181 track deploy_osm_services_k8s
1182 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1183 # install OSM MONITORING
1184 install_k8s_monitoring
1185 track install_k8s_monitoring
1189 remove_stack
$OSM_STACK_NAME
1190 create_docker_network
1192 generate_osmclient_script
1194 install_prometheus_nodeexporter
1196 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1197 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1200 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1203 echo -e "Checking OSM health state..."
1204 if [ -n "$KUBERNETES" ]; then
1205 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
1206 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1207 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1210 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} || \
1211 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1212 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1215 track after_healthcheck
1217 [ -n "$KUBERNETES" ] && add_local_k8scluster
1218 track add_local_k8scluster
1221 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null
1226 function install_to_openstack
() {
1228 if [ -z "$2" ]; then
1229 FATAL
"OpenStack installer requires a valid external network name"
1232 # Install Pip for Python3
1233 $WORKDIR_SUDO apt
install -y python3-pip
1234 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U pip
1236 # Install Ansible, OpenStack client and SDK
1237 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U python-openstackclient
"openstacksdk<1" "ansible>=2.9,<3"
1239 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1241 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
1243 # Execute the Ansible playbook based on openrc or clouds.yaml
1244 if [ -e "$1" ]; then
1246 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1247 -e setup_volume
=$3 $OSM_DEVOPS/installers
/openstack
/site.yml
1249 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1250 -e setup_volume
=$3 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
1256 function install_vimemu
() {
1257 echo "\nInstalling vim-emu"
1258 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1259 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1260 # install prerequisites (OVS is a must for the emulator to work)
1261 sudo apt-get
install openvswitch-switch
1262 # clone vim-emu repository (attention: branch is currently master only)
1263 echo "Cloning vim-emu repository ..."
1264 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1265 # build vim-emu docker
1266 echo "Building vim-emu Docker container..."
1268 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1269 # start vim-emu container as daemon
1270 echo "Starting vim-emu Docker container 'vim-emu' ..."
1271 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1272 # in lightweight mode, the emulator needs to be attached to netOSM
1273 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1275 # classic build mode
1276 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1278 echo "Waiting for 'vim-emu' container to start ..."
1280 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1281 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1282 # print vim-emu connection info
1283 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1284 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1285 echo -e "To add the emulated VIM to OSM you should do:"
1286 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1289 function install_k8s_monitoring
() {
1290 # install OSM monitoring
1291 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1292 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1295 function uninstall_k8s_monitoring
() {
1296 # uninstall OSM monitoring
1297 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1300 function dump_vars
(){
1301 echo "DEVELOP=$DEVELOP"
1302 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1303 echo "UNINSTALL=$UNINSTALL"
1304 echo "UPDATE=$UPDATE"
1305 echo "RECONFIGURE=$RECONFIGURE"
1306 echo "TEST_INSTALLER=$TEST_INSTALLER"
1307 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1308 echo "INSTALL_PLA=$INSTALL_PLA"
1309 echo "INSTALL_LXD=$INSTALL_LXD"
1310 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1311 echo "INSTALL_ONLY=$INSTALL_ONLY"
1312 echo "INSTALL_ELK=$INSTALL_ELK"
1313 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1314 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1315 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1316 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1317 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1318 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1319 echo "TO_REBUILD=$TO_REBUILD"
1320 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1321 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1322 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1323 echo "RELEASE=$RELEASE"
1324 echo "REPOSITORY=$REPOSITORY"
1325 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1326 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1327 echo "OSM_DEVOPS=$OSM_DEVOPS"
1328 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1329 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1330 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1331 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1332 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1333 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1334 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1335 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1336 echo "DOCKER_USER=$DOCKER_USER"
1337 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1338 echo "PULL_IMAGES=$PULL_IMAGES"
1339 echo "KUBERNETES=$KUBERNETES"
1341 echo "SHOWOPTS=$SHOWOPTS"
1342 echo "Install from specific refspec (-b): $COMMIT_ID"
1347 duration
=$
((ctime
- SESSION_ID
))
1348 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1349 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1351 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1352 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1353 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1354 event_name
="${event_name}_$1"
1355 url
="${url}&event=${event_name}&ce_duration=${duration}"
1356 wget
-q -O /dev
/null
$url
1368 INSTALL_FROM_SOURCE
=""
1369 RELEASE
="ReleaseEIGHT"
1373 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1374 LXD_REPOSITORY_PATH
=""
1375 INSTALL_LIGHTWEIGHT
="y"
1376 INSTALL_TO_OPENSTACK
=""
1377 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
1378 OPENSTACK_PUBLIC_NET_NAME
=""
1379 OPENSTACK_ATTACH_VOLUME
="false"
1388 INSTALL_K8S_MONITOR
=""
1389 INSTALL_NOHOSTCLIENT
=""
1390 SESSION_ID
=`date +%s`
1395 OSM_VCA_CLOUDNAME
="localhost"
1399 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1400 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1402 OSM_WORK_DIR
="/etc/osm"
1403 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1404 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1405 OSM_HOST_VOL
="/var/lib/osm"
1406 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1407 OSM_DOCKER_TAG
=latest
1408 DOCKER_USER
=opensourcemano
1410 KAFKA_TAG
=2.11-1.0
.2
1411 PROMETHEUS_TAG
=v2.4
.3
1413 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1414 PROMETHEUS_CADVISOR_TAG
=latest
1416 OSM_DATABASE_COMMONKEY
=
1417 ELASTIC_VERSION
=6.4.2
1418 ELASTIC_CURATOR_VERSION
=5.5.4
1419 POD_NETWORK_CIDR
=10.244.0.0/16
1420 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1421 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1423 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o
; do
1430 REPOSITORY
="${OPTARG}"
1431 REPO_ARGS
+=(-r "$REPOSITORY")
1434 [ "${OPTARG}" == "swarm" ] && continue
1435 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1436 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1440 [ "${OPTARG}" == "lwui" ] && continue
1441 [ "${OPTARG}" == "ngui" ] && NGUI
="y" && continue
1442 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1446 REPOSITORY_KEY
="${OPTARG}"
1447 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1450 REPOSITORY_BASE
="${OPTARG}"
1451 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1455 REPO_ARGS
+=(-R "$RELEASE")
1458 OSM_DEVOPS
="${OPTARG}"
1462 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1463 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1464 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1467 INSTALL_TO_OPENSTACK
="y"
1468 if [ -n "${OPTARG}" ]; then
1469 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1471 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1476 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1479 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1480 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1481 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1482 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1483 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1484 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1485 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1486 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1487 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1488 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1489 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1490 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1491 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1492 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1495 OSM_VCA_HOST
="${OPTARG}"
1498 OSM_VCA_SECRET
="${OPTARG}"
1501 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1504 # when specifying workdir, do not use sudo for access
1506 OSM_WORK_DIR
="${OPTARG}"
1509 OSM_DOCKER_TAG
="${OPTARG}"
1510 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1513 DOCKER_USER
="${OPTARG}"
1516 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1519 OSM_VCA_APIPROXY
="${OPTARG}"
1522 LXD_CLOUD_FILE
="${OPTARG}"
1525 LXD_CRED_FILE
="${OPTARG}"
1528 CONTROLLER_NAME
="${OPTARG}"
1531 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1532 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1533 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1534 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1535 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1536 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1537 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1538 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1539 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1540 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1541 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1542 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1543 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1544 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1545 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1546 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1547 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1548 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1549 [ "${OPTARG}" == "pullimages" ] && continue
1550 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1551 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1552 [ "${OPTARG}" == "bundle" ] && continue
1553 [ "${OPTARG}" == "k8s" ] && continue
1554 [ "${OPTARG}" == "lxd" ] && continue
1555 [ "${OPTARG}" == "lxd-cred" ] && continue
1556 [ "${OPTARG}" == "microstack" ] && continue
1557 [ "${OPTARG}" == "ha" ] && continue
1558 [ "${OPTARG}" == "tag" ] && continue
1559 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1560 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1561 echo -e "Invalid option: '--$OPTARG'\n" >&2
1565 echo "Option -$OPTARG requires an argument" >&2
1569 echo -e "Invalid option: '-$OPTARG'\n" >&2
1584 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1585 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1587 if [ -n "$SHOWOPTS" ]; then
1592 if [ -n "$CHARMED" ]; then
1593 if [ -n "$UNINSTALL" ]; then
1594 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1596 ${OSM_DEVOPS}/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1598 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1600 echo "1. Get the NBI IP with the following command:"
1602 echo NBI_IP
='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1604 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1606 echo "export OSM_HOSTNAME=\$NBI_IP"
1608 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1610 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1618 # if develop, we force master
1619 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1621 need_packages
="git wget curl tar"
1623 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1625 echo -e "Checking required packages: $need_packages"
1626 dpkg
-l $need_packages &>/dev
/null \
1627 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1628 || sudo apt-get update \
1629 || FATAL
"failed to run apt-get update"
1630 dpkg
-l $need_packages &>/dev
/null \
1631 ||
! echo -e "Installing $need_packages requires root privileges." \
1632 || sudo apt-get
install -y $need_packages \
1633 || FATAL
"failed to install $need_packages"
1634 sudo snap
install jq
1635 if [ -z "$OSM_DEVOPS" ]; then
1636 if [ -n "$TEST_INSTALLER" ]; then
1637 echo -e "\nUsing local devops repo for OSM installation"
1638 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1640 echo -e "\nCreating temporary dir for OSM installation"
1641 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1642 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1644 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1646 if [ -z "$COMMIT_ID" ]; then
1647 echo -e "\nGuessing the current stable release"
1648 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1649 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1651 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1652 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1654 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1656 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1660 .
$OSM_DEVOPS/common
/all_funcs
1662 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1663 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1664 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1665 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1666 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1667 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1669 #Installation starts here
1670 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README.txt
&> /dev
/null
1673 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1674 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1675 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1676 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1679 echo -e "Checking required packages: lxd"
1680 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1681 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1683 # use local devops for containers
1684 export OSM_USE_LOCAL_DEVOPS
=true
1688 #Install vim-emu (optional)
1689 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1691 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null