2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
81 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
82 echo -e " [--tag]: Docker image tag. (--charmed option)"
83 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
87 # takes a juju/accounts.yaml file and returns the password specific
88 # for a controller. I wrote this using only bash tools to minimize
89 # additions of other packages
90 function parse_juju_password
{
91 password_file
="${HOME}/.local/share/juju/accounts.yaml"
92 local controller_name
=$1
93 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
94 sed -ne "s|^\($s\):|\1|" \
95 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
96 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
97 awk -F$fs -v controller
=$controller_name '{
98 indent = length($1)/2;
100 for (i in vname) {if (i > indent) {delete vname[i]}}
101 if (length($3) > 0) {
102 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
103 if (match(vn,controller) && match($2,"password")) {
110 function generate_secret
() {
111 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
114 function remove_volumes
() {
115 if [ -n "$KUBERNETES" ]; then
117 echo "Removing ${k8_volume}"
118 $WORKDIR_SUDO rm -rf ${k8_volume}
121 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
122 for volume
in $volumes; do
123 sg docker
-c "docker volume rm ${stack}_${volume}"
128 function remove_network
() {
130 sg docker
-c "docker network rm net${stack}"
133 function remove_iptables
() {
135 if [ -z "$OSM_VCA_HOST" ]; then
136 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
137 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
140 if [ -z "$DEFAULT_IP" ]; then
141 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
143 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
144 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
145 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
148 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
149 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
150 sudo netfilter-persistent save
154 function remove_stack
() {
156 if sg docker
-c "docker stack ps ${stack}" ; then
157 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
160 while [ ${COUNTER} -lt 30 ]; do
161 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
162 #echo "Dockers running: $result"
163 if [ "${result}" == "0" ]; then
166 let COUNTER
=COUNTER
+1
169 if [ "${result}" == "0" ]; then
170 echo "All dockers of the stack ${stack} were removed"
172 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
178 #removes osm deployments and services
179 function remove_k8s_namespace
() {
183 #removes helm only if there is nothing deployed in helm
184 function remove_helm
() {
185 if [ "$(helm ls -q)" == "" ] ; then
186 sudo helm
reset --force
187 kubectl delete
--namespace kube-system serviceaccount tiller
188 kubectl delete clusterrolebinding tiller-cluster-rule
189 sudo
rm /usr
/local
/bin
/helm
194 function remove_crontab_job
() {
195 crontab
-l |
grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab
-
199 function uninstall_osmclient
() {
200 sudo apt-get remove
--purge -y python-osmclient
201 sudo apt-get remove
--purge -y python3-osmclient
204 #Uninstall lightweight OSM: remove dockers
205 function uninstall_lightweight
() {
206 if [ -n "$INSTALL_ONLY" ]; then
207 if [ -n "$INSTALL_ELK" ]; then
208 echo -e "\nUninstalling OSM ELK stack"
210 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
213 echo -e "\nUninstalling OSM"
214 if [ -n "$KUBERNETES" ]; then
215 if [ -n "$INSTALL_K8S_MONITOR" ]; then
216 # uninstall OSM MONITORING
217 uninstall_k8s_monitoring
219 remove_k8s_namespace
$OSM_STACK_NAME
221 remove_stack
$OSM_STACK_NAME
224 echo "Now osm docker images and volumes will be deleted"
225 # TODO: clean-up of images should take into account if other tags were used for specific modules
226 newgrp docker
<< EONG
227 for module in ro lcm keystone nbi mon pol pla osmclient; do
228 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
232 if [ -n "$NGUI" ]; then
233 sg docker
-c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
235 sg docker
-c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
238 if [ -n "$KUBERNETES" ]; then
239 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
240 remove_volumes
$OSM_NAMESPACE_VOL
242 remove_volumes
$OSM_STACK_NAME
243 remove_network
$OSM_STACK_NAME
244 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
246 echo "Removing $OSM_DOCKER_WORK_DIR"
247 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
248 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
252 # Cleanup Openstack installer venv
253 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
254 rm -r $OPENSTACK_PYTHON_VENV
257 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
258 echo "Some docker images will be kept in case they are used by other docker stacks"
259 echo "To remove them, just run 'docker image prune' in a terminal"
263 #Safe unattended install of iptables-persistent
264 function check_install_iptables_persistent
(){
265 echo -e "\nChecking required packages: iptables-persistent"
266 if ! dpkg
-l iptables-persistent
&>/dev
/null
; then
267 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
268 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
269 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
270 sudo apt-get
-yq install iptables-persistent
274 #Configure NAT rules, based on the current IP addresses of containers
276 check_install_iptables_persistent
278 echo -e "\nConfiguring NAT rules"
279 echo -e " Required root privileges"
280 sudo
$OSM_DEVOPS/installers
/nat_osm
284 echo "FATAL error: Cannot install OSM due to \"$1\""
288 function update_juju_images
(){
289 crontab
-l |
grep update-juju-lxc-images ||
(crontab
-l 2>/dev
/null
; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab
-
290 ${OSM_DEVOPS}/installers
/update-juju-lxc-images
--xenial --bionic
293 function install_lxd
() {
294 # Apply sysctl production values for optimal performance
295 sudo
cp ${OSM_DEVOPS}/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
299 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
300 sudo snap
install lxd
303 sudo usermod
-a -G lxd
`whoami`
304 cat ${OSM_DEVOPS}/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
305 sg lxd
-c "lxd waitready"
306 DEFAULT_INTERFACE
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
307 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
308 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
309 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
310 sg lxd
-c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
311 #sudo systemctl stop lxd-bridge
312 #sudo systemctl --system daemon-reload
313 #sudo systemctl enable lxd-bridge
314 #sudo systemctl start lxd-bridge
318 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
319 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
320 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
321 read -e -p "$1" USER_CONFIRMATION
323 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
324 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
325 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
326 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
327 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
331 function install_osmclient
(){
332 CLIENT_RELEASE
=${RELEASE#"-R "}
333 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
334 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
335 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
336 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
337 curl
$key_location | sudo apt-key add
-
338 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
340 sudo apt-get
install -y python3-pip
341 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
342 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
343 sudo apt-get
install -y python3-osm-im python3-osmclient
344 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
345 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
347 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
348 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev
349 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
351 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
352 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
353 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
354 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
355 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
356 echo -e "\nOSM client installed"
357 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
358 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
359 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
360 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
362 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
363 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
364 echo " export OSM_HOSTNAME=<OSM_host>"
369 function install_prometheus_nodeexporter
(){
370 if (systemctl
-q is-active node_exporter
)
372 echo "Node Exporter is already running."
374 echo "Node Exporter is not active, installing..."
375 if getent passwd node_exporter
> /dev
/null
2>&1; then
376 echo "node_exporter user exists"
378 echo "Creating user node_exporter"
379 sudo useradd
--no-create-home --shell /bin
/false node_exporter
381 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
382 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
383 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
384 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
385 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
386 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus_exporters
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
387 sudo systemctl daemon-reload
388 sudo systemctl restart node_exporter
389 sudo systemctl
enable node_exporter
390 echo "Node Exporter has been activated in this host."
395 function uninstall_prometheus_nodeexporter
(){
396 sudo systemctl stop node_exporter
397 sudo systemctl disable node_exporter
398 sudo
rm /etc
/systemd
/system
/node_exporter.service
399 sudo systemctl daemon-reload
400 sudo userdel node_exporter
401 sudo
rm /usr
/local
/bin
/node_exporter
405 function install_docker_ce
() {
406 # installs and configures Docker CE
407 echo "Installing Docker CE ..."
408 sudo apt-get
-qq update
409 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
410 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
411 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
412 sudo apt-get
-qq update
413 sudo apt-get
install -y docker-ce
414 echo "Adding user to group 'docker'"
415 sudo groupadd
-f docker
416 sudo usermod
-aG docker
$USER
418 sudo service docker restart
419 echo "... restarted Docker service"
420 if [ -n "${DOCKER_PROXY_URL}" ]; then
421 echo "Configuring docker proxy ..."
422 if [ -f /etc
/docker
/daemon.json
]; then
423 if grep -q registry-mirrors
/etc
/docker
/daemon.json
; then
424 sudo
sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc
/docker
/daemon.json
426 sudo
sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc
/docker
/daemon.json
429 sudo bash
-c "cat << EOF > /etc/docker/daemon.json
431 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
435 sudo systemctl daemon-reload
436 sudo service docker restart
437 echo "... restarted Docker service again"
439 sg docker
-c "docker version" || FATAL
"Docker installation failed"
440 echo "... Docker CE installation done"
444 function install_docker_compose
() {
445 # installs and configures docker-compose
446 echo "Installing Docker Compose ..."
447 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
448 sudo
chmod +x
/usr
/local
/bin
/docker-compose
449 echo "... Docker Compose installation done"
452 function install_juju
() {
453 echo "Installing juju"
454 sudo snap
install juju
--classic --channel=2.8/stable
455 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
456 [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images
457 echo "Finished installation of juju"
461 function juju_createcontroller
() {
462 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
463 # Not found created, create the controller
464 sudo usermod
-a -G lxd
${USER}
465 sg lxd
-c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
467 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
468 juju controller-config features
=[k8s-operators
]
471 function juju_addk8s
() {
472 cat $HOME/.kube
/config | juju add-k8s
$OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
475 function juju_createcontroller_k8s
(){
476 cat $HOME/.kube
/config | juju add-k8s
$OSM_VCA_K8S_CLOUDNAME --client
477 juju bootstrap
$OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
478 --config controller-service-type
=loadbalancer \
479 --agent-version=$JUJU_AGENT_VERSION
483 function juju_addlxd_cloud
(){
485 OSM_VCA_CLOUDNAME
="lxd-cloud"
486 LXDENDPOINT
=$DEFAULT_IP
487 LXD_CLOUD
=/tmp
/.osm
/lxd-cloud.yaml
488 LXD_CREDENTIALS
=/tmp
/.osm
/lxd-credentials.yaml
490 cat << EOF > $LXD_CLOUD
494 auth-types: [certificate]
495 endpoint: "https://$LXDENDPOINT:8443"
497 ssl-hostname-verification: false
499 openssl req
-nodes -new -x509 -keyout /tmp
/.osm
/client.key
-out /tmp
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
500 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
501 local client_cert
=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
502 local client_key
=`cat /tmp/.osm/client.key | sed 's/^/ /'`
504 cat << EOF > $LXD_CREDENTIALS
508 auth-type: certificate
516 lxc config trust add
local: /tmp
/.osm
/client.crt
517 juju add-cloud
-c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
518 juju add-credential
-c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
519 sg lxd
-c "lxd waitready"
520 juju controller-config features
=[k8s-operators
]
524 function juju_createproxy
() {
525 check_install_iptables_persistent
527 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
528 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
529 sudo netfilter-persistent save
533 function docker_login
() {
535 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
538 function generate_docker_images
() {
539 echo "Pulling and generating docker images"
540 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
542 echo "Pulling docker images"
544 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
545 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
546 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
549 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
550 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
553 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
554 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
557 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
558 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
561 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
562 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
565 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
566 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
569 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
570 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
573 if [ -n "$PULL_IMAGES" ]; then
574 echo "Pulling OSM docker images"
575 for module
in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient
; do
576 module_lower
=${module,,}
577 if [ $module == "LW-UI" ]; then
578 if [ -n "$NGUI" ]; then
581 module_lower
="light-ui"
584 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
587 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
590 module_tag
="${OSM_DOCKER_TAG}"
591 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
592 module_tag
="${MODULE_DOCKER_TAG}"
594 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
595 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
598 _build_from
=$COMMIT_ID
599 [ -z "$_build_from" ] && _build_from
="latest"
600 echo "OSM Docker images generated from $_build_from"
602 for module
in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA
; do
603 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
604 module_lower
=${module,,}
605 if [ $module == "LW-UI" ]; then
606 if [ -n "$NGUI" ]; then
609 module_lower
="light-ui"
612 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
615 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
618 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
619 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
620 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
623 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
624 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
625 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
626 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
627 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
628 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
630 echo "Finished generation of docker images"
633 echo "Finished pulling and generating docker images"
636 function cmp_overwrite
() {
639 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
640 if [ -f "${file2}" ]; then
641 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
643 cp -b ${file1} ${file2}
648 function generate_docker_compose_files() {
649 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
650 if [ -n "$NGUI" ]; then
652 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
655 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
657 if [ -n "$INSTALL_PLA" ]; then
658 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
662 function generate_k8s_manifest_files() {
663 #Kubernetes resources
664 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
665 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
666 if [ -n "$NGUI" ]; then
667 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
669 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
673 function generate_prometheus_grafana_files() {
674 [ -n "$KUBERNETES" ] && return
676 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
677 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
680 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
681 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
682 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
683 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
684 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
686 # Prometheus Exporters files
687 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
688 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
691 function generate_docker_env_files() {
692 echo "Doing a backup of existing env files
"
693 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
694 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
695 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
696 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
697 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
698 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
699 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
700 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
701 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
703 echo "Generating docker env files
"
705 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
706 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
709 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
710 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
712 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
715 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
716 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
718 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
721 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
722 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
724 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
727 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
728 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
730 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
733 if [ -n "$OSM_VCA_APIPROXY" ]; then
734 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
735 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
737 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
741 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
742 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
745 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
746 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
749 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
750 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
752 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
755 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
756 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
758 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
762 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
763 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
764 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
766 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
767 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
769 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
770 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
774 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
775 SERVICE_PASSWORD
=$
(generate_secret
)
776 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
777 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
779 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
780 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
781 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
782 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
786 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
787 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
788 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
792 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
793 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
794 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
795 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
798 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
799 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
801 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
804 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
805 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
807 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
810 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
811 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
813 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
816 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
817 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
819 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
824 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
825 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
829 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
830 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
833 echo "Finished generation of docker env files"
836 function generate_osmclient_script
() {
837 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
838 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
839 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
842 #installs kubernetes packages
843 function install_kube
() {
844 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
845 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
846 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
848 echo "Installing Kubernetes Packages ..."
849 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
850 sudo apt-mark hold kubelet kubeadm kubectl
853 #initializes kubernetes control plane
854 function init_kubeadm
() {
856 sudo
sed -i.bak
'/.*none.*swap/s/^\(.*\)$/#\1/g' /etc
/fstab
857 sudo kubeadm init
--config $1
861 function kube_config_dir
() {
862 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
864 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
865 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
868 function install_k8s_storageclass
() {
869 OPENEBS_DIR
="$(mktemp -d -q --tmpdir "openebs.XXXXXX
")"
870 trap 'rm -rf "${OPENEBS_DIR}"' EXIT
871 wget
-q https
://openebs.github.io
/charts
/openebs-operator-1.6
.0.yaml
-P $OPENEBS_DIR
872 kubectl apply
-f $OPENEBS_DIR
873 local storageclass_timeout
=400
875 local storageclass_ready
=""
876 echo "Waiting for storageclass"
877 while (( counter
< storageclass_timeout
))
879 kubectl get storageclass openebs-hostpath
&> /dev
/null
881 if [ $?
-eq 0 ] ; then
882 echo "Storageclass available"
883 storageclass_ready
="y"
886 counter
=$
((counter
+ 15))
890 [ -n "$storageclass_ready" ] || FATAL
"Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
891 kubectl
patch storageclass openebs-hostpath
-p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
894 function install_k8s_metallb
() {
895 METALLB_IP_RANGE
=$DEFAULT_IP-$DEFAULT_IP
896 cat ${OSM_DEVOPS}/installers
/k8s
/metallb
/metallb.yaml | kubectl apply
-f -
900 namespace: metallb-system
908 - $METALLB_IP_RANGE" | kubectl apply
-f -
910 #deploys flannel as daemonsets
911 function deploy_cni_provider
() {
912 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
913 trap 'rm -rf "${CNI_DIR}"' EXIT
914 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
915 kubectl apply
-f $CNI_DIR
916 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
919 #creates secrets from env files which will be used by containers
920 function kube_secrets
(){
921 kubectl create ns
$OSM_STACK_NAME
922 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
923 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
924 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
925 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
926 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
927 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
928 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
929 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
932 #taints K8s master node
933 function taint_master_node
() {
934 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
935 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
939 #deploys osm pods and services
940 function deploy_osm_services
() {
941 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
944 #deploy charmed services
945 function deploy_charmed_services
() {
946 juju add-model
$OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
947 # deploy mongodb charm
948 namespace
=$OSM_STACK_NAME
949 juju deploy cs
:~charmed-osm
/mongodb-k8s \
950 --config enable-sidecar
=true \
951 --config replica-set
=rs0 \
952 --config namespace
=$namespace \
956 function deploy_osm_pla_service
() {
957 # corresponding to namespace_vol
958 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
959 # corresponding to deploy_osm_services
960 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
963 #Install helm and tiller
964 function install_helm
() {
965 helm
> /dev
/null
2>&1
966 if [ $?
!= 0 ] ; then
967 # Helm is not installed. Install helm
968 echo "Helm is not installed, installing ..."
969 curl https
://get.helm.sh
/helm-v2.15
.2-linux-amd64.
tar.gz
--output helm-v2.15
.2.
tar.gz
970 tar -zxvf helm-v2.15
.2.
tar.gz
971 sudo
mv linux-amd64
/helm
/usr
/local
/bin
/helm
973 rm helm-v2.15
.2.
tar.gz
976 # Checking if tiller has being configured
977 kubectl
--namespace kube-system get serviceaccount tiller
> /dev
/null
2>&1
978 if [ $?
== 1 ] ; then
979 # tiller account for kubernetes
980 kubectl
--namespace kube-system create serviceaccount tiller
981 kubectl create clusterrolebinding tiller-cluster-rule
--clusterrole=cluster-admin
--serviceaccount=kube-system
:tiller
982 # HELM initialization
983 helm init
--stable-repo-url https
://charts.helm.sh
/stable
--service-account tiller
985 # Wait for Tiller to be up and running. If timeout expires, continue installing
989 while (( counter
< tiller_timeout
))
991 tiller_status
=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
992 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
993 counter
=$
((counter
+ 5))
996 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
1000 function parse_yaml
() {
1004 for module
in $services; do
1005 if [ "$module" == "pla" ]; then
1006 if [ -n "$INSTALL_PLA" ]; then
1007 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1008 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
1011 echo "Updating K8s manifest
file from opensourcemano\
/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1012 $WORKDIR_SUDO sed -i "s
#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
1017 function update_manifest_files
() {
1018 if [ -n "$NGUI" ]; then
1019 osm_services
="nbi lcm ro pol mon ng-ui keystone pla"
1021 osm_services
="nbi lcm ro pol mon light-ui keystone pla"
1024 for module
in $osm_services; do
1025 module_upper
="${module^^}"
1026 if [ "$module_upper" == "LIGHT-UI" ]; then
1027 module_upper
="LW-UI"
1029 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
1030 list_of_services
="$list_of_services $module"
1033 list_of_services_to_rebuild
=$
(echo ${TO_REBUILD,,} |
sed "s/lw-ui/light-ui/g")
1034 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
1035 parse_yaml
$OSM_DOCKER_TAG $list_of_services
1037 if [ -n "$MODULE_DOCKER_TAG" ]; then
1038 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
1042 function namespace_vol
() {
1043 osm_services
="nbi lcm ro pol mon kafka mysql prometheus"
1044 for osm
in $osm_services; do
1045 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1049 function init_docker_swarm
() {
1050 if [ "${DEFAULT_MTU}" != "1500" ]; then
1051 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1052 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1053 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1055 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1059 function create_docker_network
() {
1060 echo "creating network"
1061 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1062 echo "creating network DONE"
1065 function deploy_lightweight
() {
1067 echo "Deploying lightweight build"
1070 OSM_KEYSTONE_PORT
=5000
1074 OSM_PROM_CADVISOR_PORT
=8080
1075 OSM_PROM_HOSTPORT
=9091
1076 OSM_GRAFANA_PORT
=3000
1077 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
1078 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1080 if [ -n "$NO_HOST_PORTS" ]; then
1081 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
1082 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
1083 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
1084 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
1085 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
1086 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
1087 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
1088 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
1089 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1090 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
1092 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
1093 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
1094 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1095 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
1096 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
1097 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1098 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1099 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1100 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1101 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
1103 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1104 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1105 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1106 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1107 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1108 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1109 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1110 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1111 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1113 pushd $OSM_DOCKER_WORK_DIR
1114 if [ -n "$INSTALL_PLA" ]; then
1115 track deploy_osm_pla
1116 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1118 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1122 echo "Finished deployment of lightweight build"
1125 function deploy_elk
() {
1126 echo "Pulling docker images for ELK"
1127 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
1128 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
1129 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
1130 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
1131 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
1132 echo "Finished pulling elk docker images"
1133 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
1134 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
1135 remove_stack osm_elk
1136 echo "Deploying ELK stack"
1137 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1138 echo "Waiting for ELK stack to be up and running"
1143 while [ $time -le $timelength ]; do
1144 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
1151 if [ $elk_is_up -eq 0 ]; then
1152 echo "ELK is up and running. Trying to create index pattern..."
1153 #Create index pattern
1154 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1155 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1156 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
1157 #Make it the default index
1158 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1159 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1160 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
1162 echo "Cannot connect to Kibana to create index pattern."
1163 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1164 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1165 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1166 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1167 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1168 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1169 -d"{\"value\":\"filebeat-*\"}"'
1171 echo "Finished deployment of ELK stack"
1175 function add_local_k8scluster
() {
1176 /usr
/bin
/osm
--all-projects vim-create \
1177 --name _system-osm-vim \
1178 --account_type dummy \
1179 --auth_url http
://dummy \
1180 --user osm
--password osm
--tenant osm \
1181 --description "dummy" \
1182 --config '{management_network_name: mgmt}'
1183 /usr
/bin
/osm
--all-projects k8scluster-add \
1184 --creds ${HOME}/.kube
/config \
1185 --vim _system-osm-vim \
1186 --k8s-nets '{"net1": null}' \
1188 --description "OSM Internal Cluster" \
1192 function install_lightweight
() {
1194 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1197 if [ -n "$KUBERNETES" ]; then
1198 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1199 1. Install and configure LXD
1201 3. Install docker CE
1202 4. Disable swap space
1203 5. Install and initialize Kubernetes
1204 as pre-requirements.
1205 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1208 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1212 echo "Installing lightweight build of OSM"
1213 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1214 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1215 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
1216 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
1217 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1218 DEFAULT_IP
=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1219 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1220 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1222 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1223 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1224 need_packages_lw
="snapd"
1225 echo -e "Checking required packages: $need_packages_lw"
1226 dpkg
-l $need_packages_lw &>/dev
/null \
1227 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1228 || sudo apt-get update \
1229 || FATAL
"failed to run apt-get update"
1230 dpkg
-l $need_packages_lw &>/dev
/null \
1231 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1232 || sudo apt-get
install -y $need_packages_lw \
1233 || FATAL
"failed to install $need_packages_lw"
1239 [ -n "$INSTALL_NODOCKER" ] ||
(install_docker_ce
&& track docker_ce
)
1241 echo "Creating folders for installation"
1242 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1243 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
1244 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1246 #Installs Kubernetes
1247 if [ -n "$KUBERNETES" ]; then
1250 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1253 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1254 # uninstall OSM MONITORING
1255 uninstall_k8s_monitoring
1256 track uninstall_k8s_monitoring
1258 #remove old namespace
1259 remove_k8s_namespace
$OSM_STACK_NAME
1262 install_k8s_storageclass
1263 track k8s_storageclass
1267 #install_docker_compose
1268 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1272 [ -z "$INSTALL_NOJUJU" ] && install_juju
1275 if [ -z "$OSM_VCA_HOST" ]; then
1276 if [ -z "$CONTROLLER_NAME" ]; then
1278 if [ -n "$KUBERNETES" ]; then
1279 juju_createcontroller_k8s
1282 if [ -n "$LXD_CLOUD_FILE" ]; then
1283 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1284 OSM_VCA_CLOUDNAME
="lxd-cloud"
1285 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1286 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
1288 juju_createcontroller
1292 OSM_VCA_CLOUDNAME
="lxd-cloud"
1293 if [ -n "$LXD_CLOUD_FILE" ]; then
1294 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1295 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1296 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
1299 cat << EOF > ~/.osm/lxd-cloud.yaml
1303 auth-types: [certificate]
1304 endpoint: "https://$DEFAULT_IP:8443"
1306 ssl-hostname-verification: false
1308 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1309 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1310 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
1311 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
1312 cat << EOF > ~/.osm/lxd-credentials.yaml
1316 auth-type: certificate
1324 lxc config trust add
local: ~
/.osm
/client.crt
1325 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
1326 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
1329 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1330 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1331 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1333 track juju_controller
1335 if [ -z "$OSM_VCA_SECRET" ]; then
1336 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1337 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1338 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1340 if [ -z "$OSM_VCA_PUBKEY" ]; then
1341 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1342 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1344 if [ -z "$OSM_VCA_CACERT" ]; then
1345 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1346 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1347 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1350 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1351 if [ -z "$KUBERNETES" ]; then
1352 if [ -z "$OSM_VCA_APIPROXY" ]; then
1353 OSM_VCA_APIPROXY
=$DEFAULT_IP
1354 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1360 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1361 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1362 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1365 # Deploy OSM services
1366 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1369 if [ -n "$KUBERNETES" ]; then
1370 generate_k8s_manifest_files
1372 generate_docker_compose_files
1374 track manifest_files
1375 generate_prometheus_grafana_files
1376 generate_docker_env_files
1379 if [ -n "$KUBERNETES" ]; then
1380 deploy_charmed_services
1382 update_manifest_files
1385 if [ -n "$INSTALL_PLA"]; then
1386 # optional PLA install
1387 deploy_osm_pla_service
1388 track deploy_osm_pla
1390 track deploy_osm_services_k8s
1393 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1394 # install OSM MONITORING
1395 install_k8s_monitoring
1396 track install_k8s_monitoring
1400 remove_stack
$OSM_STACK_NAME
1401 create_docker_network
1403 generate_osmclient_script
1405 install_prometheus_nodeexporter
1407 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1408 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1411 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1414 echo -e "Checking OSM health state..."
1415 if [ -n "$KUBERNETES" ]; then
1416 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
1417 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1418 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1421 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} || \
1422 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1423 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1426 track after_healthcheck
1428 [ -n "$KUBERNETES" ] && add_local_k8scluster
1429 track add_local_k8scluster
1431 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-9.0
-nine/README2.txt
&> /dev
/null
1436 function install_to_openstack
() {
1438 if [ -z "$2" ]; then
1439 FATAL
"OpenStack installer requires a valid external network name"
1442 # Install Pip for Python3
1443 $WORKDIR_SUDO apt
install -y python3-pip python3-venv
1444 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U pip
1446 # Create a venv to avoid conflicts with the host installation
1447 python3
-m venv
$OPENSTACK_PYTHON_VENV
1449 source $OPENSTACK_PYTHON_VENV/bin
/activate
1451 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1452 python
-m pip
install -U wheel
1453 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1455 # Install the Openstack cloud module (ansible>=2.10)
1456 ansible-galaxy collection
install openstack.cloud
1458 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1460 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
1462 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1464 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1465 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1468 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1469 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1472 # Execute the Ansible playbook based on openrc or clouds.yaml
1473 if [ -e "$1" ]; then
1475 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1476 $OSM_DEVOPS/installers
/openstack
/site.yml
1478 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1479 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
1488 function install_vimemu
() {
1489 echo "\nInstalling vim-emu"
1490 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1491 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1492 # install prerequisites (OVS is a must for the emulator to work)
1493 sudo apt-get
install openvswitch-switch
1494 # clone vim-emu repository (attention: branch is currently master only)
1495 echo "Cloning vim-emu repository ..."
1496 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1497 # build vim-emu docker
1498 echo "Building vim-emu Docker container..."
1500 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1501 # start vim-emu container as daemon
1502 echo "Starting vim-emu Docker container 'vim-emu' ..."
1503 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1504 # in lightweight mode, the emulator needs to be attached to netOSM
1505 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1507 # classic build mode
1508 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1510 echo "Waiting for 'vim-emu' container to start ..."
1512 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1513 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1514 # print vim-emu connection info
1515 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1516 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1517 echo -e "To add the emulated VIM to OSM you should do:"
1518 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1521 function install_k8s_monitoring
() {
1522 # install OSM monitoring
1523 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1524 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1527 function uninstall_k8s_monitoring
() {
1528 # uninstall OSM monitoring
1529 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1532 function dump_vars
(){
1533 echo "DEVELOP=$DEVELOP"
1534 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1535 echo "UNINSTALL=$UNINSTALL"
1536 echo "UPDATE=$UPDATE"
1537 echo "RECONFIGURE=$RECONFIGURE"
1538 echo "TEST_INSTALLER=$TEST_INSTALLER"
1539 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1540 echo "INSTALL_PLA=$INSTALL_PLA"
1541 echo "INSTALL_LXD=$INSTALL_LXD"
1542 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1543 echo "INSTALL_ONLY=$INSTALL_ONLY"
1544 echo "INSTALL_ELK=$INSTALL_ELK"
1545 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1546 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1547 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1548 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1549 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1550 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1551 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1552 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1553 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1554 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1555 echo "TO_REBUILD=$TO_REBUILD"
1556 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1557 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1558 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1559 echo "RELEASE=$RELEASE"
1560 echo "REPOSITORY=$REPOSITORY"
1561 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1562 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1563 echo "OSM_DEVOPS=$OSM_DEVOPS"
1564 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1565 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1566 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1567 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1568 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1569 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1570 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1571 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1572 echo "DOCKER_USER=$DOCKER_USER"
1573 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1574 echo "PULL_IMAGES=$PULL_IMAGES"
1575 echo "KUBERNETES=$KUBERNETES"
1577 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1578 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1579 echo "SHOWOPTS=$SHOWOPTS"
1580 echo "Install from specific refspec (-b): $COMMIT_ID"
1585 duration
=$
((ctime
- SESSION_ID
))
1586 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1587 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1589 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1590 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1591 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1592 event_name
="${event_name}_$1"
1593 url
="${url}&event=${event_name}&ce_duration=${duration}"
1594 wget
-q -O /dev
/null
$url
1597 function parse_docker_registry_url
() {
1598 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1599 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1600 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
1603 JUJU_AGENT_VERSION
=2.8.6
1613 INSTALL_FROM_SOURCE
=""
1614 RELEASE
="ReleaseNINE"
1618 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1619 LXD_REPOSITORY_PATH
=""
1620 INSTALL_LIGHTWEIGHT
="y"
1621 INSTALL_TO_OPENSTACK
=""
1622 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
1623 OPENSTACK_PUBLIC_NET_NAME
=""
1624 OPENSTACK_ATTACH_VOLUME
="false"
1625 OPENSTACK_SSH_KEY_FILE
=""
1626 OPENSTACK_USERDATA_FILE
=""
1627 OPENSTACK_VM_NAME
="server-osm"
1628 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
1637 INSTALL_K8S_MONITOR
=""
1638 INSTALL_NOHOSTCLIENT
=""
1639 INSTALL_NOCACHELXDIMAGES
=""
1640 SESSION_ID
=`date +%s`
1645 OSM_VCA_CLOUDNAME
="localhost"
1646 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
1650 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1651 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1653 OSM_WORK_DIR
="/etc/osm"
1654 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1655 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1656 OSM_HOST_VOL
="/var/lib/osm"
1657 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1658 OSM_DOCKER_TAG
=latest
1659 DOCKER_USER
=opensourcemano
1661 KAFKA_TAG
=2.11-1.0
.2
1662 PROMETHEUS_TAG
=v2.4
.3
1664 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1665 PROMETHEUS_CADVISOR_TAG
=latest
1667 OSM_DATABASE_COMMONKEY
=
1668 ELASTIC_VERSION
=6.4.2
1669 ELASTIC_CURATOR_VERSION
=5.5.4
1670 POD_NETWORK_CIDR
=10.244.0.0/16
1671 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1672 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1673 DOCKER_REGISTRY_URL
=
1677 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
1684 REPOSITORY
="${OPTARG}"
1685 REPO_ARGS
+=(-r "$REPOSITORY")
1688 [ "${OPTARG}" == "swarm" ] && KUBERNETES
="" && REPO_ARGS
+=(-c "${OPTARG}") && continue
1689 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1690 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1694 [ "${OPTARG}" == "lwui" ] && NGUI
="" && REPO_ARGS
+=(-n "${OPTARG}") && continue
1695 [ "${OPTARG}" == "ngui" ] && continue
1696 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1700 REPOSITORY_KEY
="${OPTARG}"
1701 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1704 REPOSITORY_BASE
="${OPTARG}"
1705 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1709 REPO_ARGS
+=(-R "$RELEASE")
1712 OSM_DEVOPS
="${OPTARG}"
1716 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1717 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1718 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1721 INSTALL_TO_OPENSTACK
="y"
1722 if [ -n "${OPTARG}" ]; then
1723 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1725 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1730 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
1733 OPENSTACK_USERDATA_FILE
="${OPTARG}"
1736 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1739 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1740 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
1741 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1742 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1743 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1744 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1745 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1746 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1747 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
1748 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1749 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1750 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1751 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1752 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1753 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1754 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1757 OSM_VCA_HOST
="${OPTARG}"
1760 OSM_VCA_SECRET
="${OPTARG}"
1763 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1766 # when specifying workdir, do not use sudo for access
1768 OSM_WORK_DIR
="${OPTARG}"
1771 OSM_DOCKER_TAG
="${OPTARG}"
1772 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1775 DOCKER_USER
="${OPTARG}"
1778 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1781 OSM_VCA_APIPROXY
="${OPTARG}"
1784 LXD_CLOUD_FILE
="${OPTARG}"
1787 LXD_CRED_FILE
="${OPTARG}"
1790 CONTROLLER_NAME
="${OPTARG}"
1793 DOCKER_REGISTRY_URL
="${OPTARG}"
1796 DOCKER_PROXY_URL
="${OPTARG}"
1799 MODULE_DOCKER_TAG
="${OPTARG}"
1802 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1803 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1804 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1805 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1806 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1807 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1808 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1809 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1810 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1811 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1812 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1813 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1814 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1815 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1816 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1817 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1818 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1819 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1820 [ "${OPTARG}" == "pullimages" ] && continue
1821 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1822 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1823 [ "${OPTARG}" == "bundle" ] && continue
1824 [ "${OPTARG}" == "k8s" ] && continue
1825 [ "${OPTARG}" == "lxd" ] && continue
1826 [ "${OPTARG}" == "lxd-cred" ] && continue
1827 [ "${OPTARG}" == "microstack" ] && continue
1828 [ "${OPTARG}" == "overlay" ] && continue
1829 [ "${OPTARG}" == "only-vca" ] && continue
1830 [ "${OPTARG}" == "vca" ] && continue
1831 [ "${OPTARG}" == "ha" ] && continue
1832 [ "${OPTARG}" == "tag" ] && continue
1833 [ "${OPTARG}" == "registry" ] && continue
1834 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1835 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1836 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES
="y" && continue
1837 echo -e "Invalid option: '--$OPTARG'\n" >&2
1841 echo "Option -$OPTARG requires an argument" >&2
1845 echo -e "Invalid option: '-$OPTARG'\n" >&2
1860 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1861 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1862 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1864 if [ -n "$SHOWOPTS" ]; then
1869 if [ -n "$CHARMED" ]; then
1870 if [ -n "$UNINSTALL" ]; then
1871 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1873 ${OSM_DEVOPS}/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1879 # if develop, we force master
1880 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1882 need_packages
="git wget curl tar"
1884 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1886 echo -e "Checking required packages: $need_packages"
1887 dpkg
-l $need_packages &>/dev
/null \
1888 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1889 || sudo apt-get update \
1890 || FATAL
"failed to run apt-get update"
1891 dpkg
-l $need_packages &>/dev
/null \
1892 ||
! echo -e "Installing $need_packages requires root privileges." \
1893 || sudo apt-get
install -y $need_packages \
1894 || FATAL
"failed to install $need_packages"
1895 sudo snap
install jq
1896 if [ -z "$OSM_DEVOPS" ]; then
1897 if [ -n "$TEST_INSTALLER" ]; then
1898 echo -e "\nUsing local devops repo for OSM installation"
1899 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1901 echo -e "\nCreating temporary dir for OSM installation"
1902 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1903 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1905 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1907 if [ -z "$COMMIT_ID" ]; then
1908 echo -e "\nGuessing the current stable release"
1909 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1910 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1912 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1913 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1915 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1917 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1921 .
$OSM_DEVOPS/common
/all_funcs
1923 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1924 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1925 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1926 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1927 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1928 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1929 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1930 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1932 #Installation starts here
1933 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-9.0
-nine/README.txt
&> /dev
/null
1936 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1937 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1938 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1939 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1942 echo -e "Checking required packages: lxd"
1943 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1944 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1946 # use local devops for containers
1947 export OSM_USE_LOCAL_DEVOPS
=true
1951 #Install vim-emu (optional)
1952 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1954 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-9.0
-nine/README2.txt
&> /dev
/null