Skip to content

Instantly share code, notes, and snippets.

@zhiyu0729
Created September 3, 2018 08:40
Show Gist options
  • Select an option

  • Save zhiyu0729/156128f54118dc400c1d68e5cbbeb61b to your computer and use it in GitHub Desktop.

Select an option

Save zhiyu0729/156128f54118dc400c1d68e5cbbeb61b to your computer and use it in GitHub Desktop.
kubernetes fluentd elasticsearch index by namespaces
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-es
namespace: kube-system
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "pods"
verbs:
- "get"
- "watch"
- "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: fluentd-es
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: fluentd-es
apiGroup: ""
---
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.1.5
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
<match fluent.**>
# this tells fluentd to not output its log on stdout
@type null
</match>
<source>
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
tag raw.kubernetes.*
read_from_head true
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
# filter log
# <match kubernetes.var.log.containers.**kube-system**.log>
# @type null
# </match>
<filter kubernetes.**>
@type parser
key_name log
reserve_data true
remove_key_name_field true
<parse>
@type json
</parse>
</filter>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
@type forward
</source>
output.conf: |-
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
@type kubernetes_metadata
</filter>
# <filter **>
# @type record_transformer
# enable_ruby
# <record>
# index_name mtlab-aws-${record['kubernetes']['namespace_name']}
# </record>
# </filter>
# want to log
# <filter kubernetes.**>
# @type grep
# <regexp>
# key $.kubernetes.labels.fluentd
# pattern true
# </regexp>
# </filter>
# no want to log
<filter kubernetes.**>
@type grep
<exclude>
key $.kubernetes.labels.fluentd
pattern false
</exclude>
</filter>
<match **>
@id elasticsearch
@type elasticsearch_dynamic
@log_level info
type_name fluentd
include_tag_key true
host 172.31.38.25
port 9200
# host "#{ENV['FLUENT_ELASTICSEARCH_HOST']}"
# port "#{ENV['FLUENT_ELASTICSEARCH_PORT']}"
# scheme "#{ENV['FLUENT_ELASTICSEARCH_SCHEME'] || 'http'}"
# ssl_verify "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERIFY'] || 'true'}"
# user "#{ENV['FLUENT_ELASTICSEARCH_USER']}" # remove these lines if not needed
# password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}" # remove these lines if not needed
logstash_prefix newbis-${record['kubernetes']['namespace_name']}
logstash_format true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
queue_limit_length 8
overflow_action block
</buffer>
</match>
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: fluentd-es-v2.2.0
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: fluentd-es
kubernetes.io/cluster-service: 'true'
version: v2.2.0
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v2.2.0
template:
metadata:
creationTimestamp:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: 'true'
version: v2.2.0
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
volumes:
- name: varlog
hostPath:
path: "/var/log"
type: ''
- name: varlibdockercontainers
hostPath:
path: "/var/lib/docker/containers"
type: ''
# - name: newbis-docker
# hostPath:
# path: "/data/newbis_docker/containers"
# type: ''
- name: libsystemddir
hostPath:
path: "/usr/lib64"
type: ''
- name: config-volume
configMap:
name: fluentd-es-config-v0.1.5
defaultMode: 420
containers:
- name: fluentd-es
image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0
env:
- name: FLUENTD_ARGS
value: --no-supervisor -qq
resources:
limits:
cpu: '500m'
memory: 1Gi
requests:
cpu: '0'
memory: '0'
volumeMounts:
- name: varlog
mountPath: "/var/log"
- name: varlibdockercontainers
readOnly: true
mountPath: "/var/lib/docker/containers"
# - name: newbis-docker
# readOnly: true
# mountPath: "/data/newbis_docker/containers"
- name: libsystemddir
readOnly: true
mountPath: "/host/lib"
- name: config-volume
mountPath: "/etc/fluent/config.d"
terminationMessagePath: "/dev/termination-log"
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
nodeSelector:
beta.kubernetes.io/fluentd-ds-ready: 'true'
serviceAccountName: fluentd-es
serviceAccount: fluentd-es
securityContext: {}
# imagePullSecrets:
# - name: mtlab
schedulerName: default-scheduler
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
templateGeneration: 22
revisionHistoryLimit: 5
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment