services:
- elasticsearch:
- image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.3
- volumes:
- - ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- environment:
- ES_JAVA_OPTS: "-Xmx256m -Xms256m"
+ filebeat:
+ image: docker.elastic.co/beats/filebeat:${ELASTIC_VERSION:-6.4.2}
+ hostname: "{{.Node.Hostname}}-filebeat"
+ user: root
networks:
- elk
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ - /var/lib/docker/containers/:/var/lib/docker/containers/:ro
+ - ./filebeat.yml:/usr/share/filebeat/filebeat.yml
+ command: ["--strict.perms=false"]
- logstash:
- image: docker.elastic.co/logstash/logstash-oss:6.2.3
+ metricbeat:
+ image: docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION:-6.4.2}
+ user: root
+ deploy:
+ mode: global
volumes:
- - ./logstash.yml:/usr/share/logstash/config/logstash.yml:ro
- - ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
+ - /proc:/hostfs/proc:ro
+ - /sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro
+ - /:/hostfs:ro
+ - ./metricbeat.yml:/usr/share/metricbeat/metricbeat.yml
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ command: --strict.perms=false -e -c /usr/share/metricbeat/metricbeat.yml -system.hostfs=/hostfs # -e flag to log to stderr and disable syslog/file output
+ networks:
+ - elk
+ depends_on: ['elasticsearch', 'kibana']
+
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION:-6.4.2}
+ volumes:
+ - ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
+ ports:
+ - "9200:9200"
environment:
- LS_JAVA_OPTS: "-Xmx256m -Xms256m"
+ ES_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
- depends_on:
- - elasticsearch
kibana:
- image: docker.elastic.co/kibana/kibana-oss:6.2.3
+ image: docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION:-6.4.2}
volumes:
- ./kibana.yml:/usr/share/kibana/config/kibana.yml:ro
ports:
- - "${OSM_ELK_PORTS:-5601:5601}"
+ - "5601:5601"
networks:
- elk
depends_on:
networks:
elk:
external:
- name: ${OSM_NETWORK:-netOSM}
+ name: ${OSM_NETWORK:-netosm}
cluster.name: "docker-cluster"
discovery.type: single-node
discovery.zen.minimum_master_nodes: 1
+bootstrap.memory_lock: true
--- /dev/null
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+######################## Filebeat Configuration ############################
+
+filebeat.inputs:
+#------------------------------ Docker input --------------------------------
+# Experimental: Docker input reads and parses `json-file` logs from Docker
+- type: docker
+ enabled: true
+ # Combine partial lines flagged by `json-file` format
+ #combine_partials: true
+
+ # Use this to read from all containers, replace * with a container id to read from one:
+ containers:
+ # stream: all # can be all, stdout or stderr
+ ids:
+ - '*'
+
+processors:
+- add_docker_metadata: ~
+#
+# The following example enriches each event with host metadata.
+#
+#processors:
+#- add_host_metadata:
+# netinfo.enabled: false
+#
+
+#-------------------------- Elasticsearch output -------------------------------
+output.elasticsearch:
+ # Boolean flag to enable or disable the output module.
+ #enabled: true
+
+ # Array of hosts to connect to.
+ # Scheme and port can be left out and will be set to the default (http and 9200)
+ # In case you specify and additional path, the scheme is required: http://localhost:9200/path
+ # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
+ hosts: ["elasticsearch:9200"]
+
+
+logging.to_files: true
+++ /dev/null
-input {
- tcp {
- port => 5000
- }
- kafka {
- bootstrap_servers => 'kafka:9092'
- topics => ["alarm_response", "lcm_pm"]
- }
-}
-
-## Add your filters / logstash plugins configuration here
-
-output {
- elasticsearch {
- hosts => "elasticsearch:9200"
- }
-}
+++ /dev/null
----
-path.config: /usr/share/logstash/pipeline
-http.host: "0.0.0.0"
--- /dev/null
+metricbeat.modules:
+- module: docker
+ metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"]
+ hosts: ["unix:///var/run/docker.sock"]
+ period: 30s
+ enabled: true
+- module: mongodb
+ hosts: ["mongo"]
+ period: 30s
+ enabled: true
+- module: mysql
+ metricsets: ["status"]
+ hosts: ["tcp(ro-db:3306)/"]
+ username: root
+ password: "YjkzMDkxMzJhNGJiNzA0YjFiZTI5MzYw"
+ period: 30s
+ enabled: true
+- module: kafka
+ metricsets: ["consumergroup", "partition"]
+ period: 30s
+ hosts: ["kafka:9092"]
+ enabled: true
+- module: system
+ enabled: true
+ period: 30s
+ metricsets:
+ - cpu # CPU usage
+ - load # CPU load averages
+ - memory # Memory usage
+ - network # Network IO
+ - process # Per process metrics
+ - process_summary # Process summary
+ - uptime # System Uptime
+ #- core # Per CPU core usage
+ - diskio # Disk IO
+ - filesystem # File system usage for each mountpoint
+ - fsstat # File system summary metrics
+ #- raid # Raid
+ #- socket # Sockets and connection info (linux only)
+ processes: ['.*']
+ cpu.metrics: ["percentages"] # The other available options are normalized_percentages and ticks.
+ core.metrics: ["percentages"] # The other available option is ticks.
+
+setup.dashboards.enabled: false
+
+setup.kibana.host: "kibana"
+setup.kibana.protocol: "http"
+setup.kibana.username: ""
+setup.kibana.password: ""
+
+fields:
+ env: dev
+
+output.elasticsearch:
+ hosts: ["elasticsearch:9200"]
+
+logging.level: debug
+logging.to_files: true
+logging.files:
+ path: /var/log/metricbeat
+ name: metricbeat
+ keepfiles: 7
+ permissions: 0644