3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
71 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
72 echo -e " --showopts: print chosen options and exit (only for debugging)"
73 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
74 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
75 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
76 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
81 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
82 echo -e " [--tag]: Docker image tag. (--charmed option)"
83 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
84 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
87 # takes a juju/accounts.yaml file and returns the password specific
88 # for a controller. I wrote this using only bash tools to minimize
89 # additions of other packages
90 function parse_juju_password
{
91 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
92 password_file
="${HOME}/.local/share/juju/accounts.yaml"
93 local controller_name
=$1
94 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
95 sed -ne "s|^\($s\):|\1|" \
96 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
97 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
98 awk -F$fs -v controller
=$controller_name '{
99 indent = length($1)/2;
101 for (i in vname) {if (i > indent) {delete vname[i]}}
102 if (length($3) > 0) {
103 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
104 if (match(vn,controller) && match($2,"password")) {
109 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
112 function set_vca_variables
() {
113 OSM_VCA_CLOUDNAME
="lxd-cloud"
114 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
115 if [ -z "$OSM_VCA_HOST" ]; then
116 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
120 if [ -z "$OSM_VCA_SECRET" ]; then
121 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
122 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
123 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
125 if [ -z "$OSM_VCA_PUBKEY" ]; then
126 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
127 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
129 if [ -z "$OSM_VCA_CACERT" ]; then
130 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
131 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
132 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
136 function generate_secret
() {
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
138 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
139 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
142 function install_lxd
() {
143 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
144 # Apply sysctl production values for optimal performance
145 sudo
cp ${OSM_DEVOPS}/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
149 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
150 sudo snap
install lxd
--channel $LXD_VERSION/stable
153 sudo usermod
-a -G lxd
`whoami`
154 cat ${OSM_DEVOPS}/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
155 sg lxd
-c "lxd waitready"
156 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
157 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
158 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
159 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
160 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
161 sg lxd
-c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
162 #sudo systemctl stop lxd-bridge
163 #sudo systemctl --system daemon-reload
164 #sudo systemctl enable lxd-bridge
165 #sudo systemctl start lxd-bridge
166 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
170 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
171 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
172 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
173 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
174 read -e -p "$1" USER_CONFIRMATION
176 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
177 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
178 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
179 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
180 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
182 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
185 function install_osmclient
(){
186 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
187 CLIENT_RELEASE
=${RELEASE#"-R "}
188 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
189 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
190 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
191 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
192 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
193 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
195 sudo apt-get
install -y python3-pip
196 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
197 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
198 sudo apt-get
install -y python3-osm-im python3-osmclient
199 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
200 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
202 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
203 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev
204 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
206 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
207 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
208 echo -e "\nOSM client installed"
209 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
210 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
211 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
212 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
214 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
215 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
216 echo " export OSM_HOSTNAME=<OSM_host>"
218 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
222 function docker_login
() {
223 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
225 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
226 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
227 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
230 function generate_docker_images
() {
231 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
232 echo "Pulling and generating docker images"
233 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
235 echo "Pulling docker images"
237 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
238 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
239 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
242 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
243 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
246 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
247 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
250 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
251 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
254 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
255 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
258 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
259 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
262 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
263 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
266 if [ -n "$PULL_IMAGES" ]; then
267 echo "Pulling OSM docker images"
268 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient
; do
269 module_lower
=${module,,}
270 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
273 module_tag
="${OSM_DOCKER_TAG}"
274 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
275 module_tag
="${MODULE_DOCKER_TAG}"
277 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
278 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
281 _build_from
=$COMMIT_ID
282 [ -z "$_build_from" ] && _build_from
="latest"
283 echo "OSM Docker images generated from $_build_from"
284 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
285 trap 'rm -rf "${LWTEMPDIR}"' EXIT
286 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
287 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
288 module_lower
=${module,,}
289 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
292 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
293 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
294 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
297 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
298 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
299 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
300 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
301 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
302 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
304 echo "Finished generation of docker images"
307 echo "Finished pulling and generating docker images"
308 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
311 function cmp_overwrite
() {
312 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
315 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
316 if [ -f "${file2}" ]; then
317 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
319 cp -b ${file1} ${file2}
322 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
325 function generate_k8s_manifest_files() {
326 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
327 #Kubernetes resources
328 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
329 sudo rm -f $OSM_K8S_WORK_DIR/mongo.yaml
330 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
333 function generate_prometheus_grafana_files() {
334 #this only works with docker swarm
335 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
337 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
338 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
341 sudo mkdir -p $OSM_DOCKER_WORK_DIR/grafana
342 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
343 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
344 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
345 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
347 # Prometheus Exporters files
348 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
349 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
350 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
353 function generate_docker_env_files() {
354 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
355 echo "Doing a backup of existing env files
"
356 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
357 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
358 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
359 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
360 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
361 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
362 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
363 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
365 echo "Generating docker env files
"
367 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
368 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
371 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
372 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
374 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
377 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
378 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
380 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
383 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
384 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
386 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
389 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
390 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
392 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
395 if [ -n "$OSM_VCA_APIPROXY" ]; then
396 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
397 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
399 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
403 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
404 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
407 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
408 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
411 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
412 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
414 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
417 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
418 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
420 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
424 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
425 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
426 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
428 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
429 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
431 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
432 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
436 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
437 SERVICE_PASSWORD
=$
(generate_secret
)
438 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
439 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
441 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
442 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
443 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
444 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
448 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
449 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
450 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
454 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
455 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
456 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
457 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
460 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
461 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
463 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
466 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
467 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
469 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
472 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
473 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
475 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
478 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
479 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
481 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
486 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
487 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
490 echo "Finished generation of docker env files"
491 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
494 #creates secrets from env files which will be used by containers
495 function kube_secrets
(){
496 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
497 kubectl create ns
$OSM_STACK_NAME
498 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
499 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
500 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
501 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
502 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
503 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
504 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
505 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
508 #deploys osm pods and services
509 function deploy_osm_services
() {
510 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
511 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
512 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
515 #deploy charmed services
516 function deploy_charmed_services
() {
517 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
518 juju add-model
$OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
519 juju deploy ch
:mongodb-k8s
-m $OSM_STACK_NAME
520 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
523 function deploy_osm_pla_service
() {
524 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
525 # corresponding to namespace_vol
526 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
527 # corresponding to deploy_osm_services
528 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
529 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
532 function parse_yaml
() {
533 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
537 for module
in $services; do
538 if [ "$module" == "pla" ]; then
539 if [ -n "$INSTALL_PLA" ]; then
540 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
541 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
544 echo "Updating K8s manifest
file from opensourcemano\
/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
545 sudo sed -i "s
#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
548 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
551 function update_manifest_files
() {
552 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
553 osm_services
="nbi lcm ro pol mon ng-ui keystone pla"
555 for module
in $osm_services; do
556 module_upper
="${module^^}"
557 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
558 list_of_services
="$list_of_services $module"
561 if [ ! "$OSM_DOCKER_TAG" == "11" ]; then
562 parse_yaml
$OSM_DOCKER_TAG $list_of_services
564 if [ -n "$MODULE_DOCKER_TAG" ]; then
565 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
567 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
570 function namespace_vol
() {
571 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
572 osm_services
="nbi lcm ro pol mon kafka mysql prometheus"
573 for osm
in $osm_services; do
574 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
576 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
579 function add_local_k8scluster
() {
580 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
581 /usr
/bin
/osm
--all-projects vim-create \
582 --name _system-osm-vim \
583 --account_type dummy \
584 --auth_url http
://dummy \
585 --user osm
--password osm
--tenant osm \
586 --description "dummy" \
587 --config '{management_network_name: mgmt}'
588 /usr
/bin
/osm
--all-projects k8scluster-add \
589 --creds ${HOME}/.kube
/config \
590 --vim _system-osm-vim \
591 --k8s-nets '{"net1": null}' \
593 --description "OSM Internal Cluster" \
595 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
598 function configure_apt_proxy
() {
599 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
601 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
602 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
603 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
604 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
605 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
608 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
610 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
611 track apt_proxy_configured
612 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
615 function install_osm
() {
616 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
618 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
621 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
622 1. Install and configure LXD
625 4. Disable swap space
626 5. Install and initialize Kubernetes
628 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
631 echo "Installing OSM"
633 echo "Determining IP address of the interface with the default route"
634 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
635 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
636 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
637 DEFAULT_IP
=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
638 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
640 # configure apt proxy
641 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
643 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
644 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
645 need_packages_lw
="snapd"
646 echo -e "Checking required packages: $need_packages_lw"
647 dpkg
-l $need_packages_lw &>/dev
/null \
648 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
649 || sudo apt-get update \
650 || FATAL
"failed to run apt-get update"
651 dpkg
-l $need_packages_lw &>/dev
/null \
652 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
653 || sudo apt-get
install -y $need_packages_lw \
654 || FATAL
"failed to install $need_packages_lw"
660 if [ ! -n "$INSTALL_NODOCKER" ]; then
661 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
662 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
663 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS}
668 echo "Creating folders for installation"
669 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
670 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
671 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
673 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL}
676 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
677 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
678 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
679 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
680 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
681 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS}
685 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
686 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
687 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
690 # Deploy OSM services
691 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
694 generate_k8s_manifest_files
696 generate_docker_env_files
699 deploy_charmed_services
701 update_manifest_files
704 if [ -n "$INSTALL_PLA"]; then
705 # optional PLA install
706 deploy_osm_pla_service
709 track deploy_osm_services_k8s
710 if [ -n "$INSTALL_K8S_MONITOR" ]; then
711 # install OSM MONITORING
712 install_k8s_monitoring
713 track install_k8s_monitoring
716 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
719 echo -e "Checking OSM health state..."
720 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
721 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
722 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
724 track after_healthcheck
727 track add_local_k8scluster
729 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-11.0
-eleven/README2.txt
&> /dev
/null
732 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
736 function install_to_openstack
() {
737 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
740 FATAL
"OpenStack installer requires a valid external network name"
743 # Install Pip for Python3
744 sudo apt
install -y python3-pip python3-venv
745 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
747 # Create a venv to avoid conflicts with the host installation
748 python3
-m venv
$OPENSTACK_PYTHON_VENV
750 source $OPENSTACK_PYTHON_VENV/bin
/activate
752 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
753 python
-m pip
install -U wheel
754 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
756 # Install the Openstack cloud module (ansible>=2.10)
757 ansible-galaxy collection
install openstack.cloud
759 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
761 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
763 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
765 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
766 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
769 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
770 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
773 # Execute the Ansible playbook based on openrc or clouds.yaml
776 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
777 $OSM_DEVOPS/installers
/openstack
/site.yml
779 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
780 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
786 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
790 function install_k8s_monitoring
() {
791 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
792 # install OSM monitoring
793 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
794 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
795 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
798 function dump_vars
(){
799 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
800 echo "APT_PROXY_URL=$APT_PROXY_URL"
801 echo "DEVELOP=$DEVELOP"
802 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
803 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
804 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
805 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
806 echo "DOCKER_USER=$DOCKER_USER"
807 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
808 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
809 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
810 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
811 echo "INSTALL_LXD=$INSTALL_LXD"
812 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
813 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
814 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
815 echo "INSTALL_ONLY=$INSTALL_ONLY"
816 echo "INSTALL_PLA=$INSTALL_PLA"
817 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
818 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
819 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
820 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
821 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
822 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
823 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
824 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
825 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
826 echo "OSM_DEVOPS=$OSM_DEVOPS"
827 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
828 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
829 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
830 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
831 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
832 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
833 echo "PULL_IMAGES=$PULL_IMAGES"
834 echo "RECONFIGURE=$RECONFIGURE"
835 echo "RELEASE=$RELEASE"
836 echo "REPOSITORY=$REPOSITORY"
837 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
838 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
839 echo "SHOWOPTS=$SHOWOPTS"
840 echo "TEST_INSTALLER=$TEST_INSTALLER"
841 echo "TO_REBUILD=$TO_REBUILD"
842 echo "UNINSTALL=$UNINSTALL"
843 echo "UPDATE=$UPDATE"
844 echo "Install from specific refspec (-b): $COMMIT_ID"
845 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
848 function parse_docker_registry_url
() {
849 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
850 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
851 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
852 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
853 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
858 JUJU_AGENT_VERSION
=2.9.17
869 INSTALL_FROM_SOURCE
=""
875 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
876 LXD_REPOSITORY_PATH
=""
877 INSTALL_LIGHTWEIGHT
="y"
878 INSTALL_TO_OPENSTACK
=""
879 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
880 OPENSTACK_PUBLIC_NET_NAME
=""
881 OPENSTACK_ATTACH_VOLUME
="false"
882 OPENSTACK_SSH_KEY_FILE
=""
883 OPENSTACK_USERDATA_FILE
=""
884 OPENSTACK_VM_NAME
="server-osm"
885 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
891 INSTALL_K8S_MONITOR
=""
892 INSTALL_NOHOSTCLIENT
=""
893 INSTALL_CACHELXDIMAGES
=""
898 OSM_VCA_CLOUDNAME
="localhost"
899 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
903 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
904 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
905 OSM_WORK_DIR
="/etc/osm"
906 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
907 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
908 OSM_HOST_VOL
="/var/lib/osm"
909 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
910 OSM_DOCKER_TAG
=latest
911 DOCKER_USER
=opensourcemano
914 PROMETHEUS_TAG
=v2.4
.3
916 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
917 PROMETHEUS_CADVISOR_TAG
=latest
919 OSM_DATABASE_COMMONKEY
=
920 ELASTIC_VERSION
=6.4.2
921 ELASTIC_CURATOR_VERSION
=5.5.4
922 POD_NETWORK_CIDR
=10.244.0.0/16
923 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
924 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
929 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
932 APT_PROXY_URL
=${OPTARG}
939 REPOSITORY
="${OPTARG}"
940 REPO_ARGS
+=(-r "$REPOSITORY")
943 REPOSITORY_KEY
="${OPTARG}"
944 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
947 REPOSITORY_BASE
="${OPTARG}"
948 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
952 REPO_ARGS
+=(-R "$RELEASE")
955 OSM_DEVOPS
="${OPTARG}"
959 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
962 INSTALL_TO_OPENSTACK
="y"
963 if [ -n "${OPTARG}" ]; then
964 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
966 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
971 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
974 OPENSTACK_USERDATA_FILE
="${OPTARG}"
977 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
980 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
981 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
982 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
983 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
984 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
985 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
986 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
987 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
988 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
989 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
990 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
991 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
992 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
993 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
994 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
997 OSM_VCA_HOST
="${OPTARG}"
1000 OSM_VCA_SECRET
="${OPTARG}"
1003 OSM_STACK_NAME
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1006 OSM_DOCKER_TAG
="${OPTARG}"
1007 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1010 DOCKER_USER
="${OPTARG}"
1013 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1016 OSM_VCA_APIPROXY
="${OPTARG}"
1019 LXD_CLOUD_FILE
="${OPTARG}"
1022 LXD_CRED_FILE
="${OPTARG}"
1025 CONTROLLER_NAME
="${OPTARG}"
1028 DOCKER_REGISTRY_URL
="${OPTARG}"
1031 DOCKER_PROXY_URL
="${OPTARG}"
1034 MODULE_DOCKER_TAG
="${OPTARG}"
1037 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1038 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1039 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1040 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1041 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1042 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1043 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1044 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1045 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1046 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1047 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1048 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1049 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1050 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1051 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1052 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1053 [ "${OPTARG}" == "pullimages" ] && continue
1054 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1055 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1056 [ "${OPTARG}" == "bundle" ] && continue
1057 [ "${OPTARG}" == "k8s" ] && continue
1058 [ "${OPTARG}" == "lxd" ] && continue
1059 [ "${OPTARG}" == "lxd-cred" ] && continue
1060 [ "${OPTARG}" == "microstack" ] && continue
1061 [ "${OPTARG}" == "overlay" ] && continue
1062 [ "${OPTARG}" == "only-vca" ] && continue
1063 [ "${OPTARG}" == "vca" ] && continue
1064 [ "${OPTARG}" == "ha" ] && continue
1065 [ "${OPTARG}" == "tag" ] && continue
1066 [ "${OPTARG}" == "registry" ] && continue
1067 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1068 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1069 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1070 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1071 echo -e "Invalid option: '--$OPTARG'\n" >&2
1075 echo "Option -$OPTARG requires an argument" >&2
1079 echo -e "Invalid option: '-$OPTARG'\n" >&2
1094 source $OSM_DEVOPS/common
/all_funcs
1096 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1097 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1099 # Uninstall if "--uninstall"
1100 if [ -n "$UNINSTALL" ]; then
1101 if [ -n "$CHARMED" ]; then
1102 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@"
1104 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@"
1110 # Charmed installation
1111 if [ -n "$CHARMED" ]; then
1112 ${OSM_DEVOPS}/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@"
1117 # Installation to Openstack
1118 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1119 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1124 # Community_installer
1125 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1126 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1127 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1129 # if develop, we force master
1130 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1132 need_packages
="git wget curl tar"
1134 echo -e "Checking required packages: $need_packages"
1135 dpkg
-l $need_packages &>/dev
/null \
1136 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1137 || sudo apt-get update \
1138 || FATAL
"failed to run apt-get update"
1139 dpkg
-l $need_packages &>/dev
/null \
1140 ||
! echo -e "Installing $need_packages requires root privileges." \
1141 || sudo apt-get
install -y $need_packages \
1142 || FATAL
"failed to install $need_packages"
1143 sudo snap
install jq
1144 if [ -z "$OSM_DEVOPS" ]; then
1145 if [ -n "$TEST_INSTALLER" ]; then
1146 echo -e "\nUsing local devops repo for OSM installation"
1147 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1149 echo -e "\nCreating temporary dir for OSM installation"
1150 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1151 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1153 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1155 if [ -z "$COMMIT_ID" ]; then
1156 echo -e "\nGuessing the current stable release"
1157 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1158 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1160 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1161 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1163 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1165 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1169 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1170 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1171 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1172 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1174 #Installation starts here
1175 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-11.0
-eleven/README.txt
&> /dev
/null
1176 OSM_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1178 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none
1181 echo -e "\nDONE" && exit 0
1182 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1183 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1184 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1187 echo -e "Checking required packages: lxd"
1188 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1189 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1191 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-11.0
-eleven/README2.txt
&> /dev
/null