3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
41 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
42 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
43 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
44 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
45 echo -e " -D <devops path> use local devops installation path"
46 echo -e " -w <work dir> Location to store runtime installation"
47 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
48 echo -e " -l: LXD cloud yaml file"
49 echo -e " -L: LXD credentials yaml file"
50 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
51 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
52 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
53 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
54 echo -e " --debug: debug mode"
55 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
56 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 echo -e " --showopts: print chosen options and exit (only for debugging)"
70 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
71 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
72 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
73 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
74 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
75 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
76 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
77 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
78 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
79 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
80 echo -e " [--tag]: Docker image tag. (--charmed option)"
81 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
82 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
85 # takes a juju/accounts.yaml file and returns the password specific
86 # for a controller. I wrote this using only bash tools to minimize
87 # additions of other packages
88 function parse_juju_password
{
89 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
90 password_file
="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name
=$1
92 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller
=$controller_name '{
97 indent = length($1)/2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
107 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
110 function set_vca_variables
() {
111 OSM_VCA_CLOUDNAME
="lxd-cloud"
112 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
113 if [ -z "$OSM_VCA_HOST" ]; then
114 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
115 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
116 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
118 if [ -z "$OSM_VCA_SECRET" ]; then
119 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
120 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
121 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
123 if [ -z "$OSM_VCA_PUBKEY" ]; then
124 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
125 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
127 if [ -z "$OSM_VCA_CACERT" ]; then
128 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
129 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
130 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
134 function generate_secret
() {
135 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
136 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
140 function check_packages
() {
142 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
143 for PACKAGE
in ${NEEDED_PACKAGES} ; do
145 if [ $?
-ne 0 ]; then
146 echo -e "Package ${PACKAGE} is not installed."
147 echo -e "Updating apt-cache ..."
149 echo -e "Installing ${PACKAGE} ..."
150 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
153 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
157 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
158 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
159 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
160 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
161 read -e -p "$1" USER_CONFIRMATION
163 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
164 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
165 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
166 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
167 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
169 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
172 function install_osmclient
(){
173 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
174 CLIENT_RELEASE
=${RELEASE#"-R "}
175 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
176 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
177 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
178 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
179 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
180 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
182 sudo apt-get
install -y python3-pip
183 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
184 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
185 sudo apt-get
install -y python3-osm-im python3-osmclient
186 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
187 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
189 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
190 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev libmagic1
191 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
193 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
194 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
195 echo -e "\nOSM client installed"
196 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
197 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
198 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
199 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
201 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
202 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
203 echo " export OSM_HOSTNAME=<OSM_host>"
205 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
209 function docker_login
() {
210 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
213 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
214 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
217 function generate_docker_images
() {
218 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
219 echo "Pulling and generating docker images"
220 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
222 echo "Pulling docker images"
224 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
225 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
226 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
229 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
230 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
233 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
234 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
237 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
238 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
239 sg docker
-c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL
"cannot get kiwigrid k8s-sidecar docker image"
242 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
243 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
246 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
247 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
250 if [ -n "$PULL_IMAGES" ]; then
251 echo "Pulling OSM docker images"
252 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient
; do
253 module_lower
=${module,,}
254 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
257 module_tag
="${OSM_DOCKER_TAG}"
258 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
259 module_tag
="${MODULE_DOCKER_TAG}"
261 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
262 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
265 _build_from
=$COMMIT_ID
266 [ -z "$_build_from" ] && _build_from
="latest"
267 echo "OSM Docker images generated from $_build_from"
268 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
269 trap 'rm -rf "${LWTEMPDIR}"' EXIT
270 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
271 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
272 module_lower
=${module,,}
273 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
276 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
277 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
278 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
281 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
282 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
283 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
284 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
285 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
286 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
288 echo "Finished generation of docker images"
291 echo "Finished pulling and generating docker images"
292 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
295 function cmp_overwrite
() {
296 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
299 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
300 if [ -f "${file2}" ]; then
301 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
303 cp -b ${file1} ${file2}
306 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
309 function generate_k8s_manifest_files() {
310 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
311 #Kubernetes resources
312 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
313 sudo rm -f ${OSM_DOCKER_WORK_DIR}/osm_pods/ng-prometheus.yaml
314 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
317 function generate_prometheus_grafana_files() {
318 #this only works with docker swarm
319 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
321 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
322 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
325 sudo mkdir -p $OSM_DOCKER_WORK_DIR/grafana
326 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
327 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
328 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
329 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
331 # Prometheus Exporters files
332 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
333 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
334 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
337 function generate_docker_env_files() {
338 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
339 echo "Doing a backup of existing env files
"
340 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
341 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
342 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
343 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
344 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
345 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
346 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
347 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
349 echo "Generating docker env files
"
351 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
352 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
355 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
356 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
358 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
361 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
362 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
364 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
367 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
368 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
370 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
373 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
374 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
376 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
379 if [ -n "$OSM_VCA_APIPROXY" ]; then
380 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
381 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
383 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
387 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
388 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
391 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
392 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
395 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
396 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
398 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
401 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
402 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
404 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
408 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
409 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
410 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
412 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
413 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
415 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
416 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
420 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
421 SERVICE_PASSWORD
=$
(generate_secret
)
422 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
423 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
425 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
426 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
427 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
428 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
432 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
433 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
434 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
438 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
439 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
440 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
441 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
444 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
445 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
447 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
450 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
451 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
453 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
456 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
457 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
459 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
462 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
463 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
465 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
470 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
471 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
474 echo "Finished generation of docker env files"
475 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
478 #creates secrets from env files which will be used by containers
479 function kube_secrets
(){
480 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
481 kubectl create ns
$OSM_STACK_NAME
482 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
483 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
484 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
485 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
486 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
487 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
488 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
489 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
492 #deploys osm pods and services
493 function deploy_osm_services
() {
494 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
495 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
496 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
499 #deploy charmed services
500 function deploy_charmed_services
() {
501 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
502 juju add-model
$OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
503 juju deploy ch
:mongodb-k8s
-m $OSM_STACK_NAME
504 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
507 function deploy_osm_pla_service
() {
508 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
509 # corresponding to namespace_vol
510 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
511 # corresponding to deploy_osm_services
512 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
513 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
516 function parse_yaml
() {
517 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
521 for module
in $services; do
522 if [ "$module" == "pla" ]; then
523 if [ -n "$INSTALL_PLA" ]; then
524 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
525 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
528 echo "Updating K8s manifest
file from opensourcemano\
/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
529 sudo sed -i "s
#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
532 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
535 function update_manifest_files
() {
536 osm_services
="nbi lcm ro pol mon ng-ui keystone pla prometheus"
538 for module
in $osm_services; do
539 module_upper
="${module^^}"
540 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
541 list_of_services
="$list_of_services $module"
544 if [ ! "$OSM_DOCKER_TAG" == "12" ]; then
545 parse_yaml
$OSM_DOCKER_TAG $list_of_services
547 if [ -n "$MODULE_DOCKER_TAG" ]; then
548 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
550 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
553 function namespace_vol
() {
554 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
555 osm_services
="nbi lcm ro pol mon kafka mysql prometheus"
556 for osm
in $osm_services; do
557 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
559 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
562 function add_local_k8scluster
() {
563 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
564 /usr
/bin
/osm
--all-projects vim-create \
565 --name _system-osm-vim \
566 --account_type dummy \
567 --auth_url http
://dummy \
568 --user osm
--password osm
--tenant osm \
569 --description "dummy" \
570 --config '{management_network_name: mgmt}'
571 /usr
/bin
/osm
--all-projects k8scluster-add \
572 --creds ${HOME}/.kube
/config \
573 --vim _system-osm-vim \
574 --k8s-nets '{"net1": null}' \
576 --description "OSM Internal Cluster" \
578 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
581 function configure_apt_proxy
() {
582 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
584 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
585 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
586 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
587 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
588 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
591 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
593 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
594 track prereq apt_proxy_configured_ok
595 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
598 function install_osm
() {
599 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
602 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
604 track checks checkingroot_ok
605 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
606 track checks noroot_ok
608 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
609 1. Install and configure LXD
612 4. Disable swap space
613 5. Install and initialize Kubernetes
615 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
616 track checks proceed_ok
618 echo "Installing OSM"
620 echo "Determining IP address of the interface with the default route"
621 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
622 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
623 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
624 DEFAULT_IP
=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
625 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
627 # configure apt proxy
628 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
630 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
631 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
632 LXD_INSTALL_OPTS
="-D ${OSM_DEVOPS} -i ${DEFAULT_IP} ${DEBUG_INSTALL}"
633 $OSM_DEVOPS/installers
/install_lxd.sh
${LXD_INSTALL_OPTS} || FATAL_TRACK lxd
"install_lxd.sh failed"
636 track prereq prereqok_ok
638 if [ ! -n "$INSTALL_NODOCKER" ]; then
639 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
640 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
641 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce
"install_docker_ce.sh failed"
644 track docker_ce docker_ce_ok
646 echo "Creating folders for installation"
647 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
648 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
649 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
651 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
652 FATAL_TRACK k8scluster
"install_kubeadm_cluster.sh failed"
653 track k8scluster k8scluster_ok
655 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
656 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
657 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
658 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
659 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
660 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS} || FATAL_TRACK juju
"install_juju.sh failed"
664 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
665 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
666 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
669 # Deploy OSM services
670 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
671 track docker_images docker_images_ok
673 generate_k8s_manifest_files
674 track osm_files manifest_files_ok
675 generate_docker_env_files
676 track osm_files env_files_ok
678 deploy_charmed_services
679 track deploy_osm deploy_charmed_services_ok
681 track deploy_osm kube_secrets_ok
682 update_manifest_files
683 track deploy_osm update_manifest_files_ok
685 track deploy_osm namespace_vol_ok
687 track deploy_osm deploy_osm_services_k8s_ok
688 if [ -n "$INSTALL_PLA"]; then
689 # optional PLA install
690 deploy_osm_pla_service
691 track deploy_osm deploy_osm_pla_ok
693 if [ -n "$INSTALL_K8S_MONITOR" ]; then
694 # install OSM MONITORING
695 install_k8s_monitoring
696 track deploy_osm install_k8s_monitoring_ok
699 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
700 track osmclient osmclient_ok
702 echo -e "Checking OSM health state..."
703 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
704 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
705 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
706 track healthchecks osm_unhealthy didnotconverge
)
707 track healthchecks after_healthcheck_ok
710 track final_ops add_local_k8scluster_ok
712 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README2.txt
&> /dev
/null
715 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
719 function install_to_openstack
() {
720 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
723 FATAL
"OpenStack installer requires a valid external network name"
726 # Install Pip for Python3
727 sudo apt
install -y python3-pip python3-venv
728 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
730 # Create a venv to avoid conflicts with the host installation
731 python3
-m venv
$OPENSTACK_PYTHON_VENV
733 source $OPENSTACK_PYTHON_VENV/bin
/activate
735 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
736 python
-m pip
install -U wheel
737 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
739 # Install the Openstack cloud module (ansible>=2.10)
740 ansible-galaxy collection
install openstack.cloud
742 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
744 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
746 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
748 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
749 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
752 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
753 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
756 # Execute the Ansible playbook based on openrc or clouds.yaml
759 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
760 $OSM_DEVOPS/installers
/openstack
/site.yml
762 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
763 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
769 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
773 function install_k8s_monitoring
() {
774 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
775 # install OSM monitoring
776 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
777 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring
"k8s/install_osm_k8s_monitoring.sh failed"
778 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
781 function dump_vars
(){
782 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
783 echo "APT_PROXY_URL=$APT_PROXY_URL"
784 echo "DEVELOP=$DEVELOP"
785 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
786 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
787 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
788 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
789 echo "DOCKER_USER=$DOCKER_USER"
790 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
791 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
792 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
793 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
794 echo "INSTALL_LXD=$INSTALL_LXD"
795 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
796 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
797 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
798 echo "INSTALL_ONLY=$INSTALL_ONLY"
799 echo "INSTALL_PLA=$INSTALL_PLA"
800 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
801 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
802 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
803 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
804 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
805 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
806 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
807 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
808 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
809 echo "OSM_DEVOPS=$OSM_DEVOPS"
810 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
811 echo "OSM_DOCKER_WORK_DIR=$OSM_DOCKER_WORK_DIR"
812 echo "OSM_K8S_WORK_DIR=$OSM_K8S_WORK_DIR"
813 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
814 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
815 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
816 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
817 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
818 echo "PULL_IMAGES=$PULL_IMAGES"
819 echo "RECONFIGURE=$RECONFIGURE"
820 echo "RELEASE=$RELEASE"
821 echo "REPOSITORY=$REPOSITORY"
822 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
823 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
824 echo "SHOWOPTS=$SHOWOPTS"
825 echo "TEST_INSTALLER=$TEST_INSTALLER"
826 echo "TO_REBUILD=$TO_REBUILD"
827 echo "UNINSTALL=$UNINSTALL"
828 echo "UPDATE=$UPDATE"
829 echo "Install from specific refspec (-b): $COMMIT_ID"
830 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
833 function parse_docker_registry_url
() {
834 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
835 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
836 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
837 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
838 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
842 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
843 echo "** Trapped CTRL-C"
844 FATAL
"User stopped the installation"
845 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
858 INSTALL_FROM_SOURCE
=""
864 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
865 LXD_REPOSITORY_PATH
=""
866 INSTALL_LIGHTWEIGHT
="y"
867 INSTALL_TO_OPENSTACK
=""
868 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
869 OPENSTACK_PUBLIC_NET_NAME
=""
870 OPENSTACK_ATTACH_VOLUME
="false"
871 OPENSTACK_SSH_KEY_FILE
=""
872 OPENSTACK_USERDATA_FILE
=""
873 OPENSTACK_VM_NAME
="server-osm"
874 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
880 INSTALL_K8S_MONITOR
=""
881 INSTALL_NOHOSTCLIENT
=""
882 INSTALL_CACHELXDIMAGES
=""
887 OSM_VCA_CLOUDNAME
="localhost"
888 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
892 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
893 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
894 OSM_WORK_DIR
="/etc/osm"
895 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
896 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
897 OSM_HOST_VOL
="/var/lib/osm"
898 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
899 OSM_DOCKER_TAG
=latest
900 DOCKER_USER
=opensourcemano
903 KIWIGRID_K8S_SIDECAR_TAG
="1.15.6"
904 PROMETHEUS_TAG
=v2.28
.1
906 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
907 PROMETHEUS_CADVISOR_TAG
=latest
909 OSM_DATABASE_COMMONKEY
=
910 ELASTIC_VERSION
=6.4.2
911 ELASTIC_CURATOR_VERSION
=5.5.4
912 POD_NETWORK_CIDR
=10.244.0.0/16
913 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
914 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
918 OSM_INSTALLATION_TYPE
="Default"
920 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
923 APT_PROXY_URL
=${OPTARG}
930 REPOSITORY
="${OPTARG}"
931 REPO_ARGS
+=(-r "$REPOSITORY")
934 REPOSITORY_KEY
="${OPTARG}"
935 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
938 REPOSITORY_BASE
="${OPTARG}"
939 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
943 REPO_ARGS
+=(-R "$RELEASE")
946 OSM_DEVOPS
="${OPTARG}"
950 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
953 INSTALL_TO_OPENSTACK
="y"
954 if [ -n "${OPTARG}" ]; then
955 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
957 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
962 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
965 OPENSTACK_USERDATA_FILE
="${OPTARG}"
968 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
971 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
972 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
973 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
974 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
975 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
976 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
977 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
978 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
979 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
980 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
981 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
982 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
983 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
984 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
985 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
988 OSM_VCA_HOST
="${OPTARG}"
991 OSM_VCA_SECRET
="${OPTARG}"
994 OSM_STACK_NAME
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
997 OSM_DOCKER_TAG
="${OPTARG}"
998 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1001 DOCKER_USER
="${OPTARG}"
1004 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1007 OSM_VCA_APIPROXY
="${OPTARG}"
1010 LXD_CLOUD_FILE
="${OPTARG}"
1013 LXD_CRED_FILE
="${OPTARG}"
1016 CONTROLLER_NAME
="${OPTARG}"
1019 DOCKER_REGISTRY_URL
="${OPTARG}"
1022 DOCKER_PROXY_URL
="${OPTARG}"
1025 MODULE_DOCKER_TAG
="${OPTARG}"
1028 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1029 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1030 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1031 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1032 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1033 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1034 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1035 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1036 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1037 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1038 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1039 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1040 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1041 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1042 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1043 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1044 [ "${OPTARG}" == "pullimages" ] && continue
1045 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1046 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && OSM_INSTALLATION_TYPE
="Charmed" && continue
1047 [ "${OPTARG}" == "bundle" ] && continue
1048 [ "${OPTARG}" == "k8s" ] && continue
1049 [ "${OPTARG}" == "lxd" ] && continue
1050 [ "${OPTARG}" == "lxd-cred" ] && continue
1051 [ "${OPTARG}" == "microstack" ] && continue
1052 [ "${OPTARG}" == "overlay" ] && continue
1053 [ "${OPTARG}" == "only-vca" ] && continue
1054 [ "${OPTARG}" == "small-profile" ] && continue
1055 [ "${OPTARG}" == "vca" ] && continue
1056 [ "${OPTARG}" == "ha" ] && continue
1057 [ "${OPTARG}" == "tag" ] && continue
1058 [ "${OPTARG}" == "registry" ] && continue
1059 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1060 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1061 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1062 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1063 echo -e "Invalid option: '--$OPTARG'\n" >&2
1067 echo "Option -$OPTARG requires an argument" >&2
1071 echo -e "Invalid option: '-$OPTARG'\n" >&2
1086 source $OSM_DEVOPS/common
/all_funcs
1088 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1089 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1091 # Uninstall if "--uninstall"
1092 if [ -n "$UNINSTALL" ]; then
1093 if [ -n "$CHARMED" ]; then
1094 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1095 FATAL_TRACK charmed_uninstall
"charmed_uninstall.sh failed"
1097 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@" || \
1098 FATAL_TRACK community_uninstall
"uninstall_osm.sh failed"
1104 # Charmed installation
1105 if [ -n "$CHARMED" ]; then
1106 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1107 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
1108 ${OSM_DEVOPS}/installers
/charmed_install.sh
--tag $OSM_DOCKER_TAG "$@" || \
1109 FATAL_TRACK charmed_install
"charmed_install.sh failed"
1110 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README2.txt
&> /dev
/null
1111 track end installation_type
$OSM_INSTALLATION_TYPE
1116 # Installation to Openstack
1117 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1118 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1123 # Community_installer
1124 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1125 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1126 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1128 # if develop, we force master
1129 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1131 check_packages
"git wget curl tar snapd"
1133 sudo snap
install jq
1134 if [ -z "$OSM_DEVOPS" ]; then
1135 if [ -n "$TEST_INSTALLER" ]; then
1136 echo -e "\nUsing local devops repo for OSM installation"
1137 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1139 echo -e "\nCreating temporary dir for OSM installation"
1140 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1141 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1143 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1145 if [ -z "$COMMIT_ID" ]; then
1146 echo -e "\nGuessing the current stable release"
1147 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1148 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1150 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1151 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1153 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1155 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1159 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1160 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1161 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1162 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1164 #Installation starts here
1165 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README.txt
&> /dev
/null
1166 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"