3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
81 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
82 echo -e " [--tag]: Docker image tag. (--charmed option)"
83 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
84 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
87 # takes a juju/accounts.yaml file and returns the password specific
88 # for a controller. I wrote this using only bash tools to minimize
89 # additions of other packages
90 function parse_juju_password
{
91 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
92 password_file
="${HOME}/.local/share/juju/accounts.yaml"
93 local controller_name
=$1
94 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
95 sed -ne "s|^\($s\):|\1|" \
96 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
97 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
98 awk -F$fs -v controller
=$controller_name '{
99 indent = length($1)/2;
101 for (i in vname) {if (i > indent) {delete vname[i]}}
102 if (length($3) > 0) {
103 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
104 if (match(vn,controller) && match($2,"password")) {
109 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
112 function set_vca_variables
() {
113 OSM_VCA_CLOUDNAME
="lxd-cloud"
114 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
115 if [ -z "$OSM_VCA_HOST" ]; then
116 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
120 if [ -z "$OSM_VCA_SECRET" ]; then
121 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
122 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
123 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
125 if [ -z "$OSM_VCA_PUBKEY" ]; then
126 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
127 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
129 if [ -z "$OSM_VCA_CACERT" ]; then
130 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
131 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
132 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
136 function generate_secret
() {
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
138 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
139 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
142 function check_packages
() {
144 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
145 for PACKAGE
in ${NEEDED_PACKAGES} ; do
147 if [ $?
-ne 0 ]; then
148 echo -e "Package ${PACKAGE} is not installed."
149 echo -e "Updating apt-cache ..."
151 echo -e "Installing ${PACKAGE} ..."
152 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
155 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
158 function install_lxd
() {
159 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
160 # Apply sysctl production values for optimal performance
161 sudo
cp ${OSM_DEVOPS}/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
165 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
166 sudo snap
install lxd
--channel $LXD_VERSION/stable
169 sudo usermod
-a -G lxd
`whoami`
170 cat ${OSM_DEVOPS}/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
171 sg lxd
-c "lxd waitready"
172 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
173 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
174 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
175 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
176 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
177 sg lxd
-c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
178 #sudo systemctl stop lxd-bridge
179 #sudo systemctl --system daemon-reload
180 #sudo systemctl enable lxd-bridge
181 #sudo systemctl start lxd-bridge
182 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
186 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
187 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
188 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
189 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
190 read -e -p "$1" USER_CONFIRMATION
192 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
193 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
194 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
195 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
196 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
198 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
201 function install_osmclient
(){
202 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
203 CLIENT_RELEASE
=${RELEASE#"-R "}
204 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
205 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
206 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
207 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
208 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
209 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
211 sudo apt-get
install -y python3-pip
212 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
213 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
214 sudo apt-get
install -y python3-osm-im python3-osmclient
215 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
216 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
218 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
219 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev
220 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
222 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
223 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
224 echo -e "\nOSM client installed"
225 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
226 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
227 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
228 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
230 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
231 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
232 echo " export OSM_HOSTNAME=<OSM_host>"
234 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
238 function docker_login
() {
239 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
241 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
242 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
243 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
246 function generate_docker_images
() {
247 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
248 echo "Pulling and generating docker images"
249 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
251 echo "Pulling docker images"
253 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
254 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
255 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
258 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
259 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
262 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
263 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
266 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
267 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
270 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
271 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
274 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
275 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
278 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
279 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
282 if [ -n "$PULL_IMAGES" ]; then
283 echo "Pulling OSM docker images"
284 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient
; do
285 module_lower
=${module,,}
286 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
289 module_tag
="${OSM_DOCKER_TAG}"
290 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
291 module_tag
="${MODULE_DOCKER_TAG}"
293 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
294 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
297 _build_from
=$COMMIT_ID
298 [ -z "$_build_from" ] && _build_from
="latest"
299 echo "OSM Docker images generated from $_build_from"
300 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
301 trap 'rm -rf "${LWTEMPDIR}"' EXIT
302 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
303 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
304 module_lower
=${module,,}
305 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
308 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
309 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
310 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
313 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
314 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
315 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
316 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
317 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
318 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
320 echo "Finished generation of docker images"
323 echo "Finished pulling and generating docker images"
324 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
327 function cmp_overwrite
() {
328 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
331 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
332 if [ -f "${file2}" ]; then
333 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
335 cp -b ${file1} ${file2}
338 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
341 function generate_k8s_manifest_files() {
342 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
343 #Kubernetes resources
344 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
345 sudo rm -f $OSM_K8S_WORK_DIR/mongo.yaml
346 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
349 function generate_prometheus_grafana_files() {
350 #this only works with docker swarm
351 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
353 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
354 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
357 sudo mkdir -p $OSM_DOCKER_WORK_DIR/grafana
358 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
359 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
360 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
361 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
363 # Prometheus Exporters files
364 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
365 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
366 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
369 function generate_docker_env_files() {
370 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
371 echo "Doing a backup of existing env files
"
372 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
373 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
374 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
375 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
376 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
377 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
378 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
379 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
381 echo "Generating docker env files
"
383 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
384 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
387 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
388 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
390 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
393 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
394 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
396 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
399 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
400 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
402 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
405 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
406 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
408 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
411 if [ -n "$OSM_VCA_APIPROXY" ]; then
412 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
413 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
415 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
419 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
420 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
423 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
424 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
427 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
428 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
430 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
433 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
434 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
436 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
440 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
441 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
442 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
444 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
445 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
447 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
448 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
452 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
453 SERVICE_PASSWORD
=$
(generate_secret
)
454 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
455 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
457 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
458 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
459 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
460 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
464 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
465 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
466 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
470 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
471 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
472 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
473 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
476 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
477 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
479 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
482 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
483 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
485 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
488 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
489 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
491 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
494 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
495 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
497 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
502 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
503 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
506 echo "Finished generation of docker env files"
507 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
510 #creates secrets from env files which will be used by containers
511 function kube_secrets
(){
512 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
513 kubectl create ns
$OSM_STACK_NAME
514 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
515 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
516 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
517 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
518 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
519 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
520 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
521 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
524 #deploys osm pods and services
525 function deploy_osm_services
() {
526 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
527 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
528 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
531 #deploy charmed services
532 function deploy_charmed_services
() {
533 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
534 juju add-model
$OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
535 juju deploy ch
:mongodb-k8s
-m $OSM_STACK_NAME
536 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
539 function deploy_osm_pla_service
() {
540 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
541 # corresponding to namespace_vol
542 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
543 # corresponding to deploy_osm_services
544 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
545 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
548 function parse_yaml
() {
549 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
553 for module
in $services; do
554 if [ "$module" == "pla" ]; then
555 if [ -n "$INSTALL_PLA" ]; then
556 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
557 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
560 echo "Updating K8s manifest
file from opensourcemano\
/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
561 sudo sed -i "s
#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
564 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
567 function update_manifest_files
() {
568 osm_services
="nbi lcm ro pol mon ng-ui keystone pla prometheus"
570 for module
in $osm_services; do
571 module_upper
="${module^^}"
572 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
573 list_of_services
="$list_of_services $module"
576 if [ ! "$OSM_DOCKER_TAG" == "11" ]; then
577 parse_yaml
$OSM_DOCKER_TAG $list_of_services
579 if [ -n "$MODULE_DOCKER_TAG" ]; then
580 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
582 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
585 function namespace_vol
() {
586 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
587 osm_services
="nbi lcm ro pol mon kafka mysql prometheus"
588 for osm
in $osm_services; do
589 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
591 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
594 function add_local_k8scluster
() {
595 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
596 /usr
/bin
/osm
--all-projects vim-create \
597 --name _system-osm-vim \
598 --account_type dummy \
599 --auth_url http
://dummy \
600 --user osm
--password osm
--tenant osm \
601 --description "dummy" \
602 --config '{management_network_name: mgmt}'
603 /usr
/bin
/osm
--all-projects k8scluster-add \
604 --creds ${HOME}/.kube
/config \
605 --vim _system-osm-vim \
606 --k8s-nets '{"net1": null}' \
608 --description "OSM Internal Cluster" \
610 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
613 function configure_apt_proxy
() {
614 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
616 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
617 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
618 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
619 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
620 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
623 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
625 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
626 track prereq apt_proxy_configured_ok
627 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
630 function install_osm
() {
631 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
634 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
636 track checks checkingroot_ok
637 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
638 track checks noroot_ok
640 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
641 1. Install and configure LXD
644 4. Disable swap space
645 5. Install and initialize Kubernetes
647 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
648 track checks proceed_ok
650 echo "Installing OSM"
652 echo "Determining IP address of the interface with the default route"
653 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
654 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
655 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
656 DEFAULT_IP
=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
657 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
659 # configure apt proxy
660 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
662 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
663 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
667 track prereq prereqok_ok
669 if [ ! -n "$INSTALL_NODOCKER" ]; then
670 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
671 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
672 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce
"install_docker_ce.sh failed"
675 track docker_ce docker_ce_ok
677 echo "Creating folders for installation"
678 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
679 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
680 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
682 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
683 FATAL_TRACK k8scluster
"install_kubeadm_cluster.sh failed"
684 track k8scluster k8scluster_ok
686 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
687 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
688 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
689 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
690 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
691 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS} || FATAL_TRACK juju
"install_juju.sh failed"
695 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
696 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
697 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
700 # Deploy OSM services
701 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
702 track docker_images docker_images_ok
704 generate_k8s_manifest_files
705 track osm_files manifest_files_ok
706 generate_docker_env_files
707 track osm_files env_files_ok
709 deploy_charmed_services
710 track deploy_osm deploy_charmed_services_ok
712 track deploy_osm kube_secrets_ok
713 update_manifest_files
714 track deploy_osm update_manifest_files_ok
716 track deploy_osm namespace_vol_ok
718 track deploy_osm deploy_osm_services_k8s_ok
719 if [ -n "$INSTALL_PLA"]; then
720 # optional PLA install
721 deploy_osm_pla_service
722 track deploy_osm deploy_osm_pla_ok
724 if [ -n "$INSTALL_K8S_MONITOR" ]; then
725 # install OSM MONITORING
726 install_k8s_monitoring
727 track deploy_osm install_k8s_monitoring_ok
730 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
731 track osmclient osmclient_ok
733 echo -e "Checking OSM health state..."
734 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
735 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
736 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
737 track healthchecks osm_unhealthy didnotconverge
)
738 track healthchecks after_healthcheck_ok
741 track final_ops add_local_k8scluster_ok
743 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-11.0
-eleven/README2.txt
&> /dev
/null
746 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
750 function install_to_openstack
() {
751 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
754 FATAL
"OpenStack installer requires a valid external network name"
757 # Install Pip for Python3
758 sudo apt
install -y python3-pip python3-venv
759 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
761 # Create a venv to avoid conflicts with the host installation
762 python3
-m venv
$OPENSTACK_PYTHON_VENV
764 source $OPENSTACK_PYTHON_VENV/bin
/activate
766 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
767 python
-m pip
install -U wheel
768 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
770 # Install the Openstack cloud module (ansible>=2.10)
771 ansible-galaxy collection
install openstack.cloud
773 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
775 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
777 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
779 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
780 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
783 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
784 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
787 # Execute the Ansible playbook based on openrc or clouds.yaml
790 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
791 $OSM_DEVOPS/installers
/openstack
/site.yml
793 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
794 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
800 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
804 function install_k8s_monitoring
() {
805 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
806 # install OSM monitoring
807 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
808 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring
"k8s/install_osm_k8s_monitoring.sh failed"
809 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
812 function dump_vars
(){
813 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
814 echo "APT_PROXY_URL=$APT_PROXY_URL"
815 echo "DEVELOP=$DEVELOP"
816 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
817 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
818 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
819 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
820 echo "DOCKER_USER=$DOCKER_USER"
821 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
822 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
823 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
824 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
825 echo "INSTALL_LXD=$INSTALL_LXD"
826 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
827 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
828 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
829 echo "INSTALL_ONLY=$INSTALL_ONLY"
830 echo "INSTALL_PLA=$INSTALL_PLA"
831 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
832 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
833 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
834 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
835 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
836 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
837 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
838 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
839 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
840 echo "OSM_DEVOPS=$OSM_DEVOPS"
841 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
842 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
843 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
844 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
845 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
846 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
847 echo "PULL_IMAGES=$PULL_IMAGES"
848 echo "RECONFIGURE=$RECONFIGURE"
849 echo "RELEASE=$RELEASE"
850 echo "REPOSITORY=$REPOSITORY"
851 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
852 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
853 echo "SHOWOPTS=$SHOWOPTS"
854 echo "TEST_INSTALLER=$TEST_INSTALLER"
855 echo "TO_REBUILD=$TO_REBUILD"
856 echo "UNINSTALL=$UNINSTALL"
857 echo "UPDATE=$UPDATE"
858 echo "Install from specific refspec (-b): $COMMIT_ID"
859 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
862 function parse_docker_registry_url
() {
863 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
864 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
865 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
866 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
867 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
871 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
872 echo "** Trapped CTRL-C"
873 FATAL
"User stopped the installation"
874 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
879 JUJU_AGENT_VERSION
=2.9.17
890 INSTALL_FROM_SOURCE
=""
896 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
897 LXD_REPOSITORY_PATH
=""
898 INSTALL_LIGHTWEIGHT
="y"
899 INSTALL_TO_OPENSTACK
=""
900 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
901 OPENSTACK_PUBLIC_NET_NAME
=""
902 OPENSTACK_ATTACH_VOLUME
="false"
903 OPENSTACK_SSH_KEY_FILE
=""
904 OPENSTACK_USERDATA_FILE
=""
905 OPENSTACK_VM_NAME
="server-osm"
906 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
912 INSTALL_K8S_MONITOR
=""
913 INSTALL_NOHOSTCLIENT
=""
914 INSTALL_CACHELXDIMAGES
=""
919 OSM_VCA_CLOUDNAME
="localhost"
920 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
924 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
925 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
926 OSM_WORK_DIR
="/etc/osm"
927 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
928 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
929 OSM_HOST_VOL
="/var/lib/osm"
930 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
931 OSM_DOCKER_TAG
=latest
932 DOCKER_USER
=opensourcemano
935 PROMETHEUS_TAG
=v2.4
.3
937 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
938 PROMETHEUS_CADVISOR_TAG
=latest
940 OSM_DATABASE_COMMONKEY
=
941 ELASTIC_VERSION
=6.4.2
942 ELASTIC_CURATOR_VERSION
=5.5.4
943 POD_NETWORK_CIDR
=10.244.0.0/16
944 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
945 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
949 OSM_INSTALLATION_TYPE
="Default"
951 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
954 APT_PROXY_URL
=${OPTARG}
961 REPOSITORY
="${OPTARG}"
962 REPO_ARGS
+=(-r "$REPOSITORY")
965 REPOSITORY_KEY
="${OPTARG}"
966 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
969 REPOSITORY_BASE
="${OPTARG}"
970 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
974 REPO_ARGS
+=(-R "$RELEASE")
977 OSM_DEVOPS
="${OPTARG}"
981 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
984 INSTALL_TO_OPENSTACK
="y"
985 if [ -n "${OPTARG}" ]; then
986 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
988 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
993 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
996 OPENSTACK_USERDATA_FILE
="${OPTARG}"
999 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1002 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
1003 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1004 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1005 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1006 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1007 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1008 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1009 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
1010 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1011 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1012 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1013 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1014 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1015 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1016 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1019 OSM_VCA_HOST
="${OPTARG}"
1022 OSM_VCA_SECRET
="${OPTARG}"
1025 OSM_STACK_NAME
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1028 OSM_DOCKER_TAG
="${OPTARG}"
1029 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1032 DOCKER_USER
="${OPTARG}"
1035 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1038 OSM_VCA_APIPROXY
="${OPTARG}"
1041 LXD_CLOUD_FILE
="${OPTARG}"
1044 LXD_CRED_FILE
="${OPTARG}"
1047 CONTROLLER_NAME
="${OPTARG}"
1050 DOCKER_REGISTRY_URL
="${OPTARG}"
1053 DOCKER_PROXY_URL
="${OPTARG}"
1056 MODULE_DOCKER_TAG
="${OPTARG}"
1059 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1060 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1061 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1062 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1063 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1064 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1065 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1066 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1067 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1068 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1069 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1070 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1071 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1072 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1073 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1074 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1075 [ "${OPTARG}" == "pullimages" ] && continue
1076 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1077 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && OSM_INSTALLATION_TYPE
="Charmed" && continue
1078 [ "${OPTARG}" == "bundle" ] && continue
1079 [ "${OPTARG}" == "k8s" ] && continue
1080 [ "${OPTARG}" == "lxd" ] && continue
1081 [ "${OPTARG}" == "lxd-cred" ] && continue
1082 [ "${OPTARG}" == "microstack" ] && continue
1083 [ "${OPTARG}" == "overlay" ] && continue
1084 [ "${OPTARG}" == "only-vca" ] && continue
1085 [ "${OPTARG}" == "vca" ] && continue
1086 [ "${OPTARG}" == "ha" ] && continue
1087 [ "${OPTARG}" == "tag" ] && continue
1088 [ "${OPTARG}" == "registry" ] && continue
1089 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1090 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1091 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1092 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1093 echo -e "Invalid option: '--$OPTARG'\n" >&2
1097 echo "Option -$OPTARG requires an argument" >&2
1101 echo -e "Invalid option: '-$OPTARG'\n" >&2
1116 source $OSM_DEVOPS/common
/all_funcs
1118 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1119 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1121 # Uninstall if "--uninstall"
1122 if [ -n "$UNINSTALL" ]; then
1123 if [ -n "$CHARMED" ]; then
1124 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
1125 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1126 FATAL_TRACK charmed_uninstall
"charmed_uninstall.sh failed"
1128 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@" || \
1129 FATAL_TRACK community_uninstall
"uninstall_osm.sh failed"
1135 # Charmed installation
1136 if [ -n "$CHARMED" ]; then
1137 ${OSM_DEVOPS}/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1138 FATAL_TRACK charmed_install
"charmed_install.sh failed"
1143 # Installation to Openstack
1144 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1145 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1150 # Community_installer
1151 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1152 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1153 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1155 # if develop, we force master
1156 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1158 check_packages
"git wget curl tar snapd"
1160 sudo snap
install jq
1161 if [ -z "$OSM_DEVOPS" ]; then
1162 if [ -n "$TEST_INSTALLER" ]; then
1163 echo -e "\nUsing local devops repo for OSM installation"
1164 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1166 echo -e "\nCreating temporary dir for OSM installation"
1167 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1168 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1170 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1172 if [ -z "$COMMIT_ID" ]; then
1173 echo -e "\nGuessing the current stable release"
1174 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1175 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1177 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1178 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1180 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1182 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1186 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1187 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1188 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1189 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1191 #Installation starts here
1192 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-11.0
-eleven/README.txt
&> /dev
/null
1193 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"