3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " --ng-sa: install Airflow and Pushgateway to get VNF and NS status (experimental)"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 echo -e " --showopts: print chosen options and exit (only for debugging)"
71 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
72 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
73 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
74 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
75 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password
{
90 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
91 password_file
="${HOME}/.local/share/juju/accounts.yaml"
92 local controller_name
=$1
93 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
94 sed -ne "s|^\($s\):|\1|" \
95 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
96 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
97 awk -F$fs -v controller
=$controller_name '{
98 indent = length($1)/2;
100 for (i in vname) {if (i > indent) {delete vname[i]}}
101 if (length($3) > 0) {
102 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
103 if (match(vn,controller) && match($2,"password")) {
108 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
111 function set_vca_variables
() {
112 OSM_VCA_CLOUDNAME
="lxd-cloud"
113 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
114 if [ -z "$OSM_VCA_HOST" ]; then
115 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
116 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
119 if [ -z "$OSM_VCA_SECRET" ]; then
120 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
121 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
122 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
124 if [ -z "$OSM_VCA_PUBKEY" ]; then
125 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
126 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
128 if [ -z "$OSM_VCA_CACERT" ]; then
129 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
130 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
131 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
135 function generate_secret
() {
136 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
137 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
141 function check_packages
() {
143 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
144 for PACKAGE
in ${NEEDED_PACKAGES} ; do
146 if [ $?
-ne 0 ]; then
147 echo -e "Package ${PACKAGE} is not installed."
148 echo -e "Updating apt-cache ..."
150 echo -e "Installing ${PACKAGE} ..."
151 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
154 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
158 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
159 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
160 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
161 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
162 read -e -p "$1" USER_CONFIRMATION
164 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
165 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
166 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
167 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
168 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
170 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
173 function install_osmclient
(){
174 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
175 CLIENT_RELEASE
=${RELEASE#"-R "}
176 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
177 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
178 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
179 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
180 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
181 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
183 sudo apt-get
install -y python3-pip
184 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
185 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
186 sudo apt-get
install -y python3-osm-im python3-osmclient
187 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
188 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
190 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
191 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev libmagic1
192 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
194 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
195 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
196 echo -e "\nOSM client installed"
197 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
198 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
199 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
200 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
202 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
203 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
204 echo " export OSM_HOSTNAME=<OSM_host>"
206 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
210 function docker_login
() {
211 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
213 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
214 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
215 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
218 function generate_docker_images
() {
219 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
220 echo "Pulling and generating docker images"
221 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
223 echo "Pulling docker images"
225 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
226 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
227 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
230 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
231 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
234 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
235 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
238 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
239 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
240 sg docker
-c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL
"cannot get kiwigrid k8s-sidecar docker image"
243 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
244 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
247 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
248 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
251 if [ -n "$PULL_IMAGES" ]; then
252 echo "Pulling OSM docker images"
253 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient
; do
254 module_lower
=${module,,}
255 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
258 module_tag
="${OSM_DOCKER_TAG}"
259 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
260 module_tag
="${MODULE_DOCKER_TAG}"
262 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
263 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
266 _build_from
=$COMMIT_ID
267 [ -z "$_build_from" ] && _build_from
="latest"
268 echo "OSM Docker images generated from $_build_from"
269 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
270 trap 'rm -rf "${LWTEMPDIR}"' EXIT
271 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
272 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
273 module_lower
=${module,,}
274 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
277 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
278 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
279 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
282 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
283 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
284 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
285 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
286 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
287 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
289 echo "Finished generation of docker images"
292 echo "Finished pulling and generating docker images"
293 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
296 function cmp_overwrite
() {
297 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
300 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
301 if [ -f "${file2}" ]; then
302 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
304 cp -b ${file1} ${file2}
307 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
310 function generate_k8s_manifest_files() {
311 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
312 #Kubernetes resources
313 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
314 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
317 function generate_docker_env_files() {
318 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
319 echo "Doing a backup of existing env files
"
320 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
321 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
322 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
323 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
324 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
325 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
326 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
327 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
328 if [ -n "${INSTALL_NGSA}" ]; then
329 sudo cp $OSM_DOCKER_WORK_DIR/ngsa.env{,~}
332 echo "Generating docker env files
"
334 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
335 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
338 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
339 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
341 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
344 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
345 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
347 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
350 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
351 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
353 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
356 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
357 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
359 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
362 if [ -n "$OSM_VCA_APIPROXY" ]; then
363 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
364 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
366 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
370 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
371 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
374 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
375 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
378 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
379 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
381 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
384 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
385 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
387 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
391 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
392 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
393 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
395 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
396 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
398 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
399 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
403 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
404 SERVICE_PASSWORD
=$
(generate_secret
)
405 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
406 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
408 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
409 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
410 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
411 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
415 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
416 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
417 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
421 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
422 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
423 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
424 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
427 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
428 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
430 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
433 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
434 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
436 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
439 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
440 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
442 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
445 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
446 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
448 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
452 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
453 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
457 if [ -n "${INSTALL_NGSA}" ] && [ ! -f $OSM_DOCKER_WORK_DIR/ngsa.env
]; then
458 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ngsa.env
461 echo "Finished generation of docker env files"
462 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
465 #creates secrets from env files which will be used by containers
466 function kube_secrets
(){
467 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
468 kubectl create ns
$OSM_STACK_NAME
469 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
470 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
471 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
472 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
473 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
474 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
475 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
476 if [ -n "${INSTALL_NGSA}" ]; then
477 kubectl create secret generic ngsa-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ngsa.env
479 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
482 #deploys osm pods and services
483 function deploy_osm_services
() {
484 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
485 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
486 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
489 #deploy charmed services
490 function deploy_charmed_services
() {
491 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
492 juju add-model
$OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
493 juju deploy ch
:mongodb-k8s
-m $OSM_STACK_NAME
494 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
497 function deploy_osm_pla_service
() {
498 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
499 # corresponding to deploy_osm_services
500 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
501 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
504 function install_osm_ngsa_service
() {
505 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
506 $OSM_DEVOPS/installers
/install_ngsa.sh
-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
507 FATAL_TRACK install_osm_ngsa_service
"install_ngsa.sh failed"
508 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
511 function parse_yaml
() {
512 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
516 for module
in $services; do
517 if [ "$module" == "pla" ]; then
518 if [ -n "$INSTALL_PLA" ]; then
519 echo "Updating K8s manifest file from opensourcemano\/pla:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/pla:${TAG}"
520 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
524 if [ "$module" == "ng-prometheus
" ]; then
527 echo "Updating K8s manifest
file from opensourcemano\
/${image}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${image}:${TAG}"
528 sudo sed -i "s
#opensourcemano/${image}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${image}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
531 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
534 function update_manifest_files
() {
535 osm_services
="nbi lcm ro pol mon ng-ui keystone pla prometheus ng-prometheus"
537 for module
in $osm_services; do
538 module_upper
="${module^^}"
539 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
540 list_of_services
="$list_of_services $module"
543 if [ ! "$OSM_DOCKER_TAG" == "12" ]; then
544 parse_yaml
$OSM_DOCKER_TAG $list_of_services
546 if [ -n "$MODULE_DOCKER_TAG" ]; then
547 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
549 # The manifest for prometheus is prometheus.yaml or ng-prometheus.yaml, depending on the installation option
550 if [ -n "$INSTALL_NGSA" ]; then
551 sudo
rm -f ${OSM_K8S_WORK_DIR}/prometheus.yaml
553 sudo
rm -f ${OSM_K8S_WORK_DIR}/ng-prometheus.yaml
555 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
558 function namespace_vol
() {
559 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
560 # List of services with a volume mounted in path /var/lib/osm
562 for osm
in $osm_services; do
563 if [ -f "$OSM_K8S_WORK_DIR/$osm.yaml" ] ; then
564 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
567 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
570 function add_local_k8scluster
() {
571 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
572 /usr
/bin
/osm
--all-projects vim-create \
573 --name _system-osm-vim \
574 --account_type dummy \
575 --auth_url http
://dummy \
576 --user osm
--password osm
--tenant osm \
577 --description "dummy" \
578 --config '{management_network_name: mgmt}'
579 /usr
/bin
/osm
--all-projects k8scluster-add \
580 --creds ${HOME}/.kube
/config \
581 --vim _system-osm-vim \
582 --k8s-nets '{"net1": null}' \
584 --description "OSM Internal Cluster" \
586 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
589 function configure_apt_proxy
() {
590 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
592 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
593 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
594 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
595 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
596 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
599 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
601 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
602 track prereq apt_proxy_configured_ok
603 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
606 function install_osm
() {
607 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
610 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
612 track checks checkingroot_ok
613 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
614 track checks noroot_ok
616 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
617 1. Install and configure LXD
620 4. Disable swap space
621 5. Install and initialize Kubernetes
623 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
624 track checks proceed_ok
626 echo "Installing OSM"
628 echo "Determining IP address of the interface with the default route"
629 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
630 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
631 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
632 DEFAULT_IP
=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
633 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
635 # configure apt proxy
636 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
638 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
639 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
640 LXD_INSTALL_OPTS
="-D ${OSM_DEVOPS} -i ${DEFAULT_IP} ${DEBUG_INSTALL}"
641 $OSM_DEVOPS/installers
/install_lxd.sh
${LXD_INSTALL_OPTS} || FATAL_TRACK lxd
"install_lxd.sh failed"
644 track prereq prereqok_ok
646 if [ ! -n "$INSTALL_NODOCKER" ]; then
647 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
648 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
649 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce
"install_docker_ce.sh failed"
652 track docker_ce docker_ce_ok
654 echo "Creating folders for installation"
655 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
656 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
657 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
659 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
660 FATAL_TRACK k8scluster
"install_kubeadm_cluster.sh failed"
661 track k8scluster k8scluster_ok
663 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
664 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
665 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
666 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
667 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
668 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS} || FATAL_TRACK juju
"install_juju.sh failed"
672 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
673 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
674 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
677 # Deploy OSM services
678 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
679 track docker_images docker_images_ok
681 generate_k8s_manifest_files
682 track osm_files manifest_files_ok
683 generate_docker_env_files
684 track osm_files env_files_ok
686 deploy_charmed_services
687 track deploy_osm deploy_charmed_services_ok
689 track deploy_osm kube_secrets_ok
690 update_manifest_files
691 track deploy_osm update_manifest_files_ok
693 track deploy_osm namespace_vol_ok
695 track deploy_osm deploy_osm_services_k8s_ok
696 if [ -n "$INSTALL_PLA" ]; then
697 # optional PLA install
698 deploy_osm_pla_service
699 track deploy_osm deploy_osm_pla_ok
701 if [ -n "$INSTALL_K8S_MONITOR" ]; then
702 # install OSM MONITORING
703 install_k8s_monitoring
704 track deploy_osm install_k8s_monitoring_ok
706 if [ -n "$INSTALL_NGSA" ]; then
707 # optional PLA install
708 install_osm_ngsa_service
709 track deploy_osm install_osm_ngsa_ok
712 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
713 track osmclient osmclient_ok
715 echo -e "Checking OSM health state..."
716 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
717 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
718 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
719 track healthchecks osm_unhealthy didnotconverge
)
720 track healthchecks after_healthcheck_ok
723 track final_ops add_local_k8scluster_ok
725 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README2.txt
&> /dev
/null
728 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
732 function install_to_openstack
() {
733 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
736 FATAL
"OpenStack installer requires a valid external network name"
739 # Install Pip for Python3
740 sudo apt
install -y python3-pip python3-venv
741 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
743 # Create a venv to avoid conflicts with the host installation
744 python3
-m venv
$OPENSTACK_PYTHON_VENV
746 source $OPENSTACK_PYTHON_VENV/bin
/activate
748 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
749 python
-m pip
install -U wheel
750 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
752 # Install the Openstack cloud module (ansible>=2.10)
753 ansible-galaxy collection
install openstack.cloud
755 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
757 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
759 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
761 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
762 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
765 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
766 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
769 # Execute the Ansible playbook based on openrc or clouds.yaml
772 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
773 $OSM_DEVOPS/installers
/openstack
/site.yml
775 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
776 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
782 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
786 function install_k8s_monitoring
() {
787 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
788 # install OSM monitoring
789 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
790 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring
"k8s/install_osm_k8s_monitoring.sh failed"
791 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
794 function dump_vars
(){
795 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
796 echo "APT_PROXY_URL=$APT_PROXY_URL"
797 echo "DEVELOP=$DEVELOP"
798 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
799 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
800 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
801 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
802 echo "DOCKER_USER=$DOCKER_USER"
803 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
804 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
805 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
806 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
807 echo "INSTALL_LXD=$INSTALL_LXD"
808 echo "INSTALL_NGSA=$INSTALL_NGSA"
809 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
810 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
811 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
812 echo "INSTALL_ONLY=$INSTALL_ONLY"
813 echo "INSTALL_PLA=$INSTALL_PLA"
814 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
815 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
816 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
817 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
818 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
819 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
820 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
821 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
822 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
823 echo "OSM_DEVOPS=$OSM_DEVOPS"
824 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
825 echo "OSM_DOCKER_WORK_DIR=$OSM_DOCKER_WORK_DIR"
826 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
827 echo "OSM_K8S_WORK_DIR=$OSM_K8S_WORK_DIR"
828 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
829 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
830 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
831 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
832 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
833 echo "PULL_IMAGES=$PULL_IMAGES"
834 echo "RECONFIGURE=$RECONFIGURE"
835 echo "RELEASE=$RELEASE"
836 echo "REPOSITORY=$REPOSITORY"
837 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
838 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
839 echo "SHOWOPTS=$SHOWOPTS"
840 echo "TEST_INSTALLER=$TEST_INSTALLER"
841 echo "TO_REBUILD=$TO_REBUILD"
842 echo "UNINSTALL=$UNINSTALL"
843 echo "UPDATE=$UPDATE"
844 echo "Install from specific refspec (-b): $COMMIT_ID"
845 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
848 function parse_docker_registry_url
() {
849 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
850 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
851 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
852 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
853 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
857 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
858 echo "** Trapped CTRL-C"
859 FATAL
"User stopped the installation"
860 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
873 INSTALL_FROM_SOURCE
=""
880 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
881 LXD_REPOSITORY_PATH
=""
882 INSTALL_LIGHTWEIGHT
="y"
883 INSTALL_TO_OPENSTACK
=""
884 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
885 OPENSTACK_PUBLIC_NET_NAME
=""
886 OPENSTACK_ATTACH_VOLUME
="false"
887 OPENSTACK_SSH_KEY_FILE
=""
888 OPENSTACK_USERDATA_FILE
=""
889 OPENSTACK_VM_NAME
="server-osm"
890 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
896 INSTALL_K8S_MONITOR
=""
897 INSTALL_NOHOSTCLIENT
=""
898 INSTALL_CACHELXDIMAGES
=""
903 OSM_VCA_CLOUDNAME
="localhost"
904 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
908 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
909 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
910 OSM_WORK_DIR
="/etc/osm"
911 OSM_DOCKER_WORK_DIR
="${OSM_WORK_DIR}/docker"
912 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
913 OSM_HELM_WORK_DIR
="${OSM_WORK_DIR}/helm"
914 OSM_HOST_VOL
="/var/lib/osm"
915 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
916 OSM_DOCKER_TAG
=latest
917 DOCKER_USER
=opensourcemano
920 KIWIGRID_K8S_SIDECAR_TAG
="1.15.6"
921 PROMETHEUS_TAG
=v2.28
.1
923 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
924 PROMETHEUS_CADVISOR_TAG
=latest
926 OSM_DATABASE_COMMONKEY
=
927 ELASTIC_VERSION
=6.4.2
928 ELASTIC_CURATOR_VERSION
=5.5.4
929 POD_NETWORK_CIDR
=10.244.0.0/16
930 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
931 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
935 OSM_INSTALLATION_TYPE
="Default"
937 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
940 APT_PROXY_URL
=${OPTARG}
947 REPOSITORY
="${OPTARG}"
948 REPO_ARGS
+=(-r "$REPOSITORY")
951 REPOSITORY_KEY
="${OPTARG}"
952 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
955 REPOSITORY_BASE
="${OPTARG}"
956 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
960 REPO_ARGS
+=(-R "$RELEASE")
963 OSM_DEVOPS
="${OPTARG}"
967 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
970 INSTALL_TO_OPENSTACK
="y"
971 if [ -n "${OPTARG}" ]; then
972 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
974 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
979 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
982 OPENSTACK_USERDATA_FILE
="${OPTARG}"
985 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
988 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
989 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
990 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
991 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
992 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
993 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
994 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
995 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
996 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
997 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
998 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
999 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1000 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1001 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1002 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1005 OSM_VCA_HOST
="${OPTARG}"
1008 OSM_VCA_SECRET
="${OPTARG}"
1011 OSM_STACK_NAME
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1014 OSM_DOCKER_TAG
="${OPTARG}"
1015 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1018 DOCKER_USER
="${OPTARG}"
1021 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1024 OSM_VCA_APIPROXY
="${OPTARG}"
1027 LXD_CLOUD_FILE
="${OPTARG}"
1030 LXD_CRED_FILE
="${OPTARG}"
1033 CONTROLLER_NAME
="${OPTARG}"
1036 DOCKER_REGISTRY_URL
="${OPTARG}"
1039 DOCKER_PROXY_URL
="${OPTARG}"
1042 MODULE_DOCKER_TAG
="${OPTARG}"
1045 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1046 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1047 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1048 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1049 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1050 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1051 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1052 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1053 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1054 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1055 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1056 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1057 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1058 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1059 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1060 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1061 [ "${OPTARG}" == "pullimages" ] && continue
1062 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1063 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && OSM_INSTALLATION_TYPE
="Charmed" && continue
1064 [ "${OPTARG}" == "bundle" ] && continue
1065 [ "${OPTARG}" == "k8s" ] && continue
1066 [ "${OPTARG}" == "lxd" ] && continue
1067 [ "${OPTARG}" == "lxd-cred" ] && continue
1068 [ "${OPTARG}" == "microstack" ] && continue
1069 [ "${OPTARG}" == "overlay" ] && continue
1070 [ "${OPTARG}" == "only-vca" ] && continue
1071 [ "${OPTARG}" == "small-profile" ] && continue
1072 [ "${OPTARG}" == "vca" ] && continue
1073 [ "${OPTARG}" == "ha" ] && continue
1074 [ "${OPTARG}" == "tag" ] && continue
1075 [ "${OPTARG}" == "registry" ] && continue
1076 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1077 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA
="y" && continue
1078 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1079 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1080 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1081 echo -e "Invalid option: '--$OPTARG'\n" >&2
1085 echo "Option -$OPTARG requires an argument" >&2
1089 echo -e "Invalid option: '-$OPTARG'\n" >&2
1104 source $OSM_DEVOPS/common
/all_funcs
1106 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1107 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1109 # Uninstall if "--uninstall"
1110 if [ -n "$UNINSTALL" ]; then
1111 if [ -n "$CHARMED" ]; then
1112 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1113 FATAL_TRACK charmed_uninstall
"charmed_uninstall.sh failed"
1115 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@" || \
1116 FATAL_TRACK community_uninstall
"uninstall_osm.sh failed"
1122 # Charmed installation
1123 if [ -n "$CHARMED" ]; then
1124 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1125 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
1126 ${OSM_DEVOPS}/installers
/charmed_install.sh
--tag $OSM_DOCKER_TAG "$@" || \
1127 FATAL_TRACK charmed_install
"charmed_install.sh failed"
1128 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README2.txt
&> /dev
/null
1129 track end installation_type
$OSM_INSTALLATION_TYPE
1134 # Installation to Openstack
1135 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1136 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1141 # Community_installer
1142 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1143 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1144 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1146 # if develop, we force master
1147 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1149 check_packages
"git wget curl tar snapd"
1151 sudo snap
install jq
1152 if [ -z "$OSM_DEVOPS" ]; then
1153 if [ -n "$TEST_INSTALLER" ]; then
1154 echo -e "\nUsing local devops repo for OSM installation"
1155 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1157 echo -e "\nCreating temporary dir for OSM installation"
1158 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1159 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1161 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1163 if [ -z "$COMMIT_ID" ]; then
1164 echo -e "\nGuessing the current stable release"
1165 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1166 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1168 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1169 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1171 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1173 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1177 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1178 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1179 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1180 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1182 #Installation starts here
1183 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README.txt
&> /dev
/null
1184 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"