This fixes the logging in the juju k8s code base
which wasn't showing up in the LCM logs.
This logging follows the same standard as the rest
of LCM code base.
Change-Id: I569d2bc1e3c50e73ac9af129f1d59775b89cb3ca
Signed-off-by: Dominik Fleischmann <dominik.fleischmann@canonical.com>
(cherry picked from commit
2f2832cc214c058d3fce343cdd459ccddd770dad)
from juju.model import Model
from juju.errors import JujuAPIError, JujuError
from juju.model import Model
from juju.errors import JujuAPIError, JujuError
from n2vc.k8s_conn import K8sConnector
import os
from n2vc.k8s_conn import K8sConnector
import os
db: object,
kubectl_command: str = '/usr/bin/kubectl',
juju_command: str = '/usr/bin/juju',
db: object,
kubectl_command: str = '/usr/bin/kubectl',
juju_command: str = '/usr/bin/juju',
on_update_db=None,
):
"""
on_update_db=None,
):
"""
- self.info('Initializing K8S Juju connector')
+ self.log.debug('Initializing K8S Juju connector')
self.authenticated = False
self.models = {}
self.authenticated = False
self.models = {}
- self.log = logging.getLogger(__name__)
self.juju_command = juju_command
self.juju_secret = ""
self.juju_command = juju_command
self.juju_secret = ""
- self.info('K8S Juju connector initialized')
+ self.log.debug('K8S Juju connector initialized')
"""Initialization"""
async def init_env(
"""Initialization"""
async def init_env(
# Name the new k8s cloud
k8s_cloud = "k8s-{}".format(cluster_uuid)
# Name the new k8s cloud
k8s_cloud = "k8s-{}".format(cluster_uuid)
- print("Adding k8s cloud {}".format(k8s_cloud))
+ self.log.debug("Adding k8s cloud {}".format(k8s_cloud))
await self.add_k8s(k8s_cloud, k8s_creds)
# Bootstrap Juju controller
await self.add_k8s(k8s_cloud, k8s_creds)
# Bootstrap Juju controller
- print("Bootstrapping...")
+ self.log.debug("Bootstrapping...")
await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer)
await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer)
- print("Bootstrap done.")
+ self.log.debug("Bootstrap done.")
# Get the controller information
# Parse ~/.local/share/juju/controllers.yaml
# controllers.testing.api-endpoints|ca-cert|uuid
# Get the controller information
# Parse ~/.local/share/juju/controllers.yaml
# controllers.testing.api-endpoints|ca-cert|uuid
- print("Getting controller endpoints")
+ self.log.debug("Getting controller endpoints")
with open(os.path.expanduser(
"~/.local/share/juju/controllers.yaml"
)) as f:
with open(os.path.expanduser(
"~/.local/share/juju/controllers.yaml"
)) as f:
# Parse ~/.local/share/juju/accounts
# controllers.testing.user|password
# Parse ~/.local/share/juju/accounts
# controllers.testing.user|password
- print("Getting accounts")
+ self.log.debug("Getting accounts")
with open(os.path.expanduser(
"~/.local/share/juju/accounts.yaml"
)) as f:
with open(os.path.expanduser(
"~/.local/share/juju/accounts.yaml"
)) as f:
self.juju_user = controller['user']
self.juju_secret = controller['password']
self.juju_user = controller['user']
self.juju_secret = controller['password']
- print("user: {}".format(self.juju_user))
- print("secret: {}".format(self.juju_secret))
- print("endpoint: {}".format(self.juju_endpoint))
- print("ca-cert: {}".format(self.juju_ca_cert))
-
# raise Exception("EOL")
self.juju_public_key = None
# raise Exception("EOL")
self.juju_public_key = None
# Store the cluster configuration so it
# can be used for subsequent calls
# Store the cluster configuration so it
# can be used for subsequent calls
- print("Setting config")
+ self.log.debug("Setting config")
await self.set_config(cluster_uuid, config)
else:
await self.set_config(cluster_uuid, config)
else:
# Destroy the model
namespace = self.get_namespace(cluster_uuid)
if await self.has_model(namespace):
# Destroy the model
namespace = self.get_namespace(cluster_uuid)
if await self.has_model(namespace):
- print("[reset] Destroying model")
+ self.log.debug("[reset] Destroying model")
await self.controller.destroy_model(
namespace,
destroy_storage=True
)
# Disconnect from the controller
await self.controller.destroy_model(
namespace,
destroy_storage=True
)
# Disconnect from the controller
- print("[reset] Disconnecting controller")
+ self.log.debug("[reset] Disconnecting controller")
await self.logout()
# Destroy the controller (via CLI)
await self.logout()
# Destroy the controller (via CLI)
- print("[reset] Destroying controller")
+ self.log.debug("[reset] Destroying controller")
await self.destroy_controller(cluster_uuid)
await self.destroy_controller(cluster_uuid)
- print("[reset] Removing k8s cloud")
+ self.log.debug("[reset] Removing k8s cloud")
k8s_cloud = "k8s-{}".format(cluster_uuid)
await self.remove_cloud(k8s_cloud)
except Exception as ex:
k8s_cloud = "k8s-{}".format(cluster_uuid)
await self.remove_cloud(k8s_cloud)
except Exception as ex:
- print("Caught exception during reset: {}".format(ex))
+ self.log.debug("Caught exception during reset: {}".format(ex))
"""
if not self.authenticated:
"""
if not self.authenticated:
- print("[install] Logging in to the controller")
+ self.log.debug("[install] Logging in to the controller")
await self.login(cluster_uuid)
##
await self.login(cluster_uuid)
##
# Raise named exception that the bundle could not be found
raise Exception()
# Raise named exception that the bundle could not be found
raise Exception()
- print("[install] deploying {}".format(bundle))
+ self.log.debug("[install] deploying {}".format(bundle))
await model.deploy(bundle)
# Get the application
if atomic:
# applications = model.applications
await model.deploy(bundle)
# Get the application
if atomic:
# applications = model.applications
- print("[install] Applications: {}".format(model.applications))
+ self.log.debug("[install] Applications: {}".format(model.applications))
for name in model.applications:
for name in model.applications:
- print("[install] Waiting for {} to settle".format(name))
+ self.log.debug("[install] Waiting for {} to settle".format(name))
application = model.applications[name]
try:
# It's not enough to wait for all units to be active;
# the application status needs to be active as well.
application = model.applications[name]
try:
# It's not enough to wait for all units to be active;
# the application status needs to be active as well.
- print("Waiting for all units to be active...")
+ self.log.debug("Waiting for all units to be active...")
await model.block_until(
lambda: all(
unit.agent_status == 'idle'
await model.block_until(
lambda: all(
unit.agent_status == 'idle'
- print("All units active.")
+ self.log.debug("All units active.")
except concurrent.futures._base.TimeoutError:
except concurrent.futures._base.TimeoutError:
- print("[install] Timeout exceeded; resetting cluster")
+ self.log.debug("[install] Timeout exceeded; resetting cluster")
await self.reset(cluster_uuid)
return False
# Wait for the application to be active
if model.is_connected():
await self.reset(cluster_uuid)
return False
# Wait for the application to be active
if model.is_connected():
- print("[install] Disconnecting model")
+ self.log.debug("[install] Disconnecting model")
await model.disconnect()
return kdu_instance
await model.disconnect()
return kdu_instance
"""
# TODO: This should be returned in an agreed-upon format
for name in bundle['applications']:
"""
# TODO: This should be returned in an agreed-upon format
for name in bundle['applications']:
- print(model.applications)
+ self.log.debug(model.applications)
application = model.applications[name]
application = model.applications[name]
+ self.log.debug(application)
path = bundle['applications'][name]['charm']
path = bundle['applications'][name]['charm']
"""
cmd = [self.juju_command, "add-k8s", "--local", cloud_name]
"""
cmd = [self.juju_command, "add-k8s", "--local", cloud_name]
process = await asyncio.create_subprocess_exec(
*cmd,
process = await asyncio.create_subprocess_exec(
*cmd,
return_code = process.returncode
return_code = process.returncode
- print("add-k8s return code: {}".format(return_code))
+ self.log.debug("add-k8s return code: {}".format(return_code))
if return_code > 0:
raise Exception(stderr)
if return_code > 0:
raise Exception(stderr)
"""
cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid, "--config", "controller-service-type=loadbalancer"]
"""
cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid, "--config", "controller-service-type=loadbalancer"]
- print("Bootstrapping controller {} in cloud {}".format(
+ self.log.debug("Bootstrapping controller {} in cloud {}".format(
cluster_uuid, cloud_name
))
cluster_uuid, cloud_name
))
self.authenticated = True
self.log.debug("JujuApi: Logged into controller")
except Exception as ex:
self.authenticated = True
self.log.debug("JujuApi: Logged into controller")
except Exception as ex:
self.log.debug("Caught exception: {}".format(ex))
pass
else:
self.log.debug("Caught exception: {}".format(ex))
pass
else:
async def logout(self):
"""Logout of the Juju controller."""
async def logout(self):
"""Logout of the Juju controller."""
+ self.log.debug("[logout]")
if not self.authenticated:
return False
for model in self.models:
if not self.authenticated:
return False
for model in self.models:
- print("Logging out of model {}".format(model))
+ self.log.debug("Logging out of model {}".format(model))
await self.models[model].disconnect()
if self.controller:
await self.models[model].disconnect()
if self.controller:
cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid)
if not os.path.exists(cluster_config):
cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid)
if not os.path.exists(cluster_config):
- print("Writing config to {}".format(cluster_config))
+ self.log.debug("Writing config to {}".format(cluster_config))
with open(cluster_config, 'w') as f:
f.write(yaml.dump(config, Dumper=yaml.Dumper))
with open(cluster_config, 'w') as f:
f.write(yaml.dump(config, Dumper=yaml.Dumper))