Adding Kafka charm

Change-Id: Ie1e55c79d5cab9bd51bed91f378abce37f31218b
Signed-off-by: sousaedu <eduardo.sousa@canonical.com>
diff --git a/installers/charm/kafka-k8s/reactive/kafka.py b/installers/charm/kafka-k8s/reactive/kafka.py
new file mode 100644
index 0000000..72ff5bc
--- /dev/null
+++ b/installers/charm/kafka-k8s/reactive/kafka.py
@@ -0,0 +1,139 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+from charms.layer.caas_base import pod_spec_set
+from charms.reactive import when, when_not, hook
+from charms.reactive import endpoint_from_flag
+from charms.reactive.flags import set_flag, clear_flag
+from charmhelpers.core.hookenv import log, metadata, config
+from charms import layer
+from charms.osm.k8s import get_service_ip
+
+
+@hook("upgrade-charm")
+@when("leadership.is_leader")
+def upgrade():
+    clear_flag("kafka-k8s.configured")
+
+
+@when("config.changed")
+@when("leadership.is_leader")
+def restart():
+    clear_flag("kafka-k8s.configured")
+
+
+@when_not("zookeeper.ready")
+@when("leadership.is_leader")
+def waiting_for_zookeeper():
+    layer.status.waiting("Waiting for Zookeeper to be ready")
+
+
+@when("zookeeper.ready")
+@when_not("kafka-k8s.configured")
+@when("leadership.is_leader")
+def configure():
+    layer.status.maintenance("Configuring kafka container")
+    try:
+        zookeeper = endpoint_from_flag("zookeeper.ready")
+        zk_units = zookeeper.zookeepers()
+        zk_unit = zk_units[0]
+        if zk_unit["port"]:
+            cfg = config()
+            zookeeper_uri = ""
+            zk_pod_base_name = "zookeeper-k8s"
+            zk_service_name = cfg.get("zookeeper-service-name")
+            zk_num = cfg.get("zookeeper-units")
+            if zk_num == 1:
+                zookeeper_uri = "{}:{}".format(zk_unit["host"], zk_unit["port"])
+            else:
+                for i in range(0, zk_num):
+                    if i:
+                        zookeeper_uri += ","
+                    zookeeper_uri += "{}-{}.{}:{}".format(
+                        zk_pod_base_name, i, zk_service_name, zk_unit["port"]
+                    )
+            spec = make_pod_spec(zookeeper_uri)
+            log("set pod spec:\n{}".format(spec))
+            pod_spec_set(spec)
+            set_flag("kafka-k8s.configured")
+    except Exception as e:
+        layer.status.blocked("k8s spec failed to deploy: {}".format(e))
+
+
+@when("kafka-k8s.configured")
+def set_kafka_active():
+    layer.status.active("ready")
+
+
+@when("zookeeper.ready")
+@when_not("leadership.is_leader")
+def non_leaders_active():
+    layer.status.active("ready")
+
+
+@when("kafka-k8s.configured")
+@when("kafka.joined", "zookeeper.ready")
+def serve_client():
+    layer.status.maintenance("Sending kafka configuration")
+    try:
+        kafka = endpoint_from_flag("kafka.joined")
+        zookeeper = endpoint_from_flag("zookeeper.ready")
+        if zookeeper and kafka:
+            service_ip = get_service_ip("kafka")
+            if service_ip:
+                kafka.send_connection(
+                    get_kafka_port(), service_ip,
+                )
+                kafka.send_zookeepers(zookeeper.zookeepers())
+                clear_flag("kafka.joined")
+    except Exception as e:
+        log("Fail sending kafka configuration: {}".format(e))
+
+
+def make_pod_spec(zookeeper_uri):
+    """Make pod specification for Kubernetes
+
+    Args:
+        zookeeper_uri (str): Zookeeper hosts appended by comma.
+    Returns:
+        pod_spec: Pod specification for Kubernetes
+    """
+
+    with open("reactive/spec_template.yaml") as spec_file:
+        pod_spec_template = spec_file.read()
+
+    md = metadata()
+    cfg = config()
+
+    data = {
+        "name": md.get("name"),
+        "docker_image": cfg.get("image"),
+        "port": get_kafka_port(),
+        "zookeeper_uri": zookeeper_uri,
+    }
+    data.update(cfg)
+    return pod_spec_template % data
+
+
+def get_kafka_port():
+    """Returns Kafka port"""
+    cfg = config()
+    return cfg.get("advertised-port")
diff --git a/installers/charm/kafka-k8s/reactive/spec_template.yaml b/installers/charm/kafka-k8s/reactive/spec_template.yaml
new file mode 100644
index 0000000..f083084
--- /dev/null
+++ b/installers/charm/kafka-k8s/reactive/spec_template.yaml
@@ -0,0 +1,140 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+version: 2
+containers:
+  - name: %(name)s
+    image: %(docker_image)s
+    ports:
+      - containerPort: %(advertised-port)s
+        protocol: TCP
+    config:
+      ALLOW_ANONYMOUS_LOGIN: 'yes'
+      ENABLE_AUTO_EXTEND: "true"
+      KAFKA_ADVERTISED_HOST_NAME: %(advertised-hostname)s
+      KAFKA_ADVERTISED_PORT: %(advertised-port)s
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
+      KAFKA_RESERVED_BROKER_MAX_ID: "999999999"
+    command:
+      - sh
+      - -c
+      - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
+        --override listeners=PLAINTEXT://:%(advertised-port)s \
+        --override zookeeper.connect=%(zookeeper_uri)s \
+        --override log.dir=/var/lib/kafka \
+        --override auto.create.topics.enable=true \
+        --override auto.leader.rebalance.enable=true \
+        --override background.threads=10 \
+        --override compression.type=producer \
+        --override delete.topic.enable=false \
+        --override leader.imbalance.check.interval.seconds=300 \
+        --override leader.imbalance.per.broker.percentage=10 \
+        --override log.flush.interval.messages=9223372036854775807 \
+        --override log.flush.offset.checkpoint.interval.ms=60000 \
+        --override log.flush.scheduler.interval.ms=9223372036854775807 \
+        --override log.retention.bytes=-1 \
+        --override log.retention.hours=168 \
+        --override log.roll.hours=168 \
+        --override log.roll.jitter.hours=0 \
+        --override log.segment.bytes=1073741824 \
+        --override log.segment.delete.delay.ms=60000 \
+        --override message.max.bytes=1000012 \
+        --override min.insync.replicas=1 \
+        --override num.io.threads=8 \
+        --override num.network.threads=%(kafka-units)s \
+        --override num.recovery.threads.per.data.dir=1 \
+        --override num.replica.fetchers=1 \
+        --override offset.metadata.max.bytes=4096 \
+        --override offsets.commit.required.acks=-1 \
+        --override offsets.commit.timeout.ms=5000 \
+        --override offsets.load.buffer.size=5242880 \
+        --override offsets.retention.check.interval.ms=600000 \
+        --override offsets.retention.minutes=1440 \
+        --override offsets.topic.compression.codec=0 \
+        --override offsets.topic.num.partitions=50 \
+        --override offsets.topic.replication.factor=%(kafka-units)s \
+        --override offsets.topic.segment.bytes=104857600 \
+        --override queued.max.requests=500 \
+        --override quota.consumer.default=9223372036854775807 \
+        --override quota.producer.default=9223372036854775807 \
+        --override replica.fetch.min.bytes=1 \
+        --override replica.fetch.wait.max.ms=500 \
+        --override replica.high.watermark.checkpoint.interval.ms=5000 \
+        --override replica.lag.time.max.ms=10000 \
+        --override replica.socket.receive.buffer.bytes=65536 \
+        --override replica.socket.timeout.ms=30000 \
+        --override request.timeout.ms=30000 \
+        --override socket.receive.buffer.bytes=102400 \
+        --override socket.request.max.bytes=104857600 \
+        --override socket.send.buffer.bytes=102400 \
+        --override unclean.leader.election.enable=true \
+        --override zookeeper.session.timeout.ms=6000 \
+        --override zookeeper.set.acl=false \
+        --override broker.id.generation.enable=true \
+        --override connections.max.idle.ms=600000 \
+        --override controlled.shutdown.enable=true \
+        --override controlled.shutdown.max.retries=3 \
+        --override controlled.shutdown.retry.backoff.ms=5000 \
+        --override controller.socket.timeout.ms=30000 \
+        --override default.replication.factor=1 \
+        --override fetch.purgatory.purge.interval.requests=1000 \
+        --override group.max.session.timeout.ms=300000 \
+        --override group.min.session.timeout.ms=6000 \
+        --override log.cleaner.backoff.ms=15000 \
+        --override log.cleaner.dedupe.buffer.size=134217728 \
+        --override log.cleaner.delete.retention.ms=86400000 \
+        --override log.cleaner.enable=true \
+        --override log.cleaner.io.buffer.load.factor=0.9 \
+        --override log.cleaner.io.buffer.size=524288 \
+        --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
+        --override log.cleaner.min.cleanable.ratio=0.5 \
+        --override log.cleaner.min.compaction.lag.ms=0 \
+        --override log.cleaner.threads=1 \
+        --override log.cleanup.policy=delete \
+        --override log.index.interval.bytes=4096 \
+        --override log.index.size.max.bytes=10485760 \
+        --override log.message.timestamp.difference.max.ms=9223372036854775807 \
+        --override log.message.timestamp.type=CreateTime \
+        --override log.preallocate=false \
+        --override log.retention.check.interval.ms=300000 \
+        --override max.connections.per.ip=2147483647 \
+        --override num.partitions=%(num-partitions)s \
+        --override producer.purgatory.purge.interval.requests=1000 \
+        --override replica.fetch.backoff.ms=1000 \
+        --override replica.fetch.max.bytes=1048576 \
+        --override replica.fetch.response.max.bytes=10485760 \
+        --override reserved.broker.max.id=1000 "
+    kubernetes:
+      readinessProbe:
+        tcpSocket:
+          port: %(advertised-port)s
+        timeoutSeconds: 5
+        periodSeconds: 5
+        initialDelaySeconds: 10
+      livenessProbe:
+        exec:
+          command: 
+          - sh 
+          - -c 
+          - "/opt/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server=localhost:%(advertised-port)s"
+        initialDelaySeconds: 60
+        timeoutSeconds: 10
+        periodSeconds: 5