forked from danielwegener/logback-kafka-appender
-
Notifications
You must be signed in to change notification settings - Fork 1
/
logback.xml
175 lines (149 loc) · 10.8 KB
/
logback.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- *** APPENDERS *** -->
<!-- Acts as fallback appender as well for KafkaAppender -->
<appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<provider class="net.logstash.logback.composite.loggingevent.ArgumentsJsonProvider" />
<timestamp />
<pattern>
<pattern>
{
"logger":"%logger",
"thread":"%thread",
"level": "%level",
"traceId": "%X{X-B3-TraceId:-}",
"spanId": "%X{X-B3-SpanId:-}",
"message":"%replace(%message){'null',''}",
"json_message":"#asJson{%message}",
"exception": "%ex"
}
</pattern>
</pattern>
</providers>
</encoder>
<target>System.out</target>
</appender>
<appender class="ch.qos.logback.core.ConsoleAppender" name="STDERR">
<encoder>
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
<target>System.err</target>
</appender>
<!-- This is the kafkaAppender -->
<!-- This example configuration is probably most unreliable under
failure conditions but won't block your application at all -->
<appender class="com.github.rahulsinghai.logback.kafka.KafkaAppender" name="very-relaxed-and-fast-kafka-appender">
<!-- Default encoder: a common `PatternLayoutEncoder` to encode every log message as an utf8-encoded string -->
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
<!-- use async delivery. the application threads are not blocked by logging -->
<deliveryStrategy class="com.github.rahulsinghai.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
<!-- wait indefinitely until the kafka producer was able to send the message -->
<!--deliveryStrategy class="com.github.rahulsinghai.logback.kafka.delivery.BlockingDeliveryStrategy" >
<timeout>0</timeout>
</deliveryStrategy-->
<!-- Optional parameter to use a fixed partition -->
<!-- <partition>0</partition> -->
<!-- Optional parameter to include log timestamps into the kafka message -->
<!-- <appendTimestamp>true</appendTimestamp> -->
<!-- we don't care how the log messages will be partitioned -->
<keyingStrategy class="com.github.rahulsinghai.logback.kafka.keying.NoKeyKeyingStrategy"/>
<!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
<!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
<!-- bootstrap.servers is the only mandatory producerConfig -->
<producerConfig>bootstrap.servers=localhost:9092</producerConfig>
<!-- The number of acknowledgments the producer requires the leader to have received before considering a request complete -->
<!-- Default is all; 0 -> don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- Default is 16384 bytes; producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition -->
<producerConfig>batch.size=32768</producerConfig>
<!-- Default is 0; wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=1000</producerConfig>
<!-- Default is 33554432 (32 MB); total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will block for max.block.ms after which it will throw an exception. -->
<producerConfig>buffer.memory=33554432</producerConfig>
<!-- Default is 60000 (1 minute); 0 -> even if the producer buffer runs full, do not block the application but start to drop messages -->
<producerConfig>max.block.ms=0</producerConfig>
<!-- Default is 30000 (30 seconds); the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. -->
<producerConfig>request.timeout.ms=5000</producerConfig>
<!-- Default is 2147483647; Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. -->
<producerConfig>retries=0</producerConfig>
<!-- Default is true; the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream -->
<producerConfig>enable.idempotence=false</producerConfig>
<!-- Default is 5; The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this config is set to be greater than 1 and enable.idempotence is set to false, there is a risk of message re-ordering after a failed send due to retries (i.e., if retries are enabled). -->
<producerConfig>max.in.flight.requests.per.connection=5</producerConfig>
<!-- Default is 120000 (2 minutes); An upper bound on the time to report success or failure after a call to send() returns. It should be greater than or equal to the sum of request.timeout.ms and linger.ms-->
<producerConfig>delivery.timeout.ms=7000</producerConfig>
<!-- define a client-id that you use to identify yourself against the kafka broker -->
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-kafka-appender</producerConfig>
<producerConfig>enable.idempotence=false</producerConfig>
<!-- Kerberized Kafka cluster -->
<clientJaasConfPath>/kafka_2.13-2.8.1/jaas.conf</clientJaasConfPath>
<kerb5ConfPath>/kafka_2.13-2.8.1/krb5.conf</kerb5ConfPath>
<producerConfig>security.protocol=SASL_PLAINTEXT</producerConfig>
<producerConfig>sasl.mechanism=GSSAPI</producerConfig>
<producerConfig>sasl.kerberos.service.name=kafka</producerConfig>
<topic>boring-logs</topic>
<!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. -->
<!-- Adding below line will add a fallback appender when kafka is not available. -->
<!--appender-ref ref="STDOUT"/ -->
</appender>
<!-- This example configuration is more restrictive and will try to ensure that every message
is eventually delivered in an ordered fashion (as long the logging application stays alive) -->
<appender class="com.github.rahulsinghai.logback.kafka.KafkaAppender"
name="very-restrictive-kafka-appender">
<appender-ref ref="STDERR"/>
<!-- block the logging application thread if the kafka appender cannot keep up with sending the log messages -->
<deliveryStrategy
class="com.github.rahulsinghai.logback.kafka.delivery.BlockingDeliveryStrategy">
<!-- wait indefinitely until the kafka producer was able to send the message -->
<timeout>0</timeout>
</deliveryStrategy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
<!-- ensure that every message sent by the executing host is partitioned to the same partition strategy -->
<keyingStrategy
class="com.github.rahulsinghai.logback.kafka.keying.HostNameKeyingStrategy"/>
<!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
<!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
<!-- bootstrap.servers is the only mandatory producerConfig -->
<producerConfig>bootstrap.servers=localhost:9092</producerConfig>
<!-- restrict the size of the buffered batches to 8MB (default is 32MB) -->
<producerConfig>buffer.memory=8388608</producerConfig>
<!-- If the kafka broker is not online when we try to log, just block until it becomes available -->
<producerConfig>metadata.fetch.timeout.ms=99999999999</producerConfig>
<!-- define a client-id that you use to identify yourself against the kafka broker -->
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-restrictive</producerConfig>
<!-- use gzip to compress each batch of log messages. valid values: none, gzip, snappy -->
<producerConfig>compression.type=gzip</producerConfig>
<!-- Log every log message that could not be sent to kafka to STDERR -->
<topic>important-logs</topic>
</appender>
<!-- Logs asynchronously. It acts solely as an event dispatcher and must therefore reference another appender in order to do anything useful. -->
<!-- Drops events of level TRACE, DEBUG and INFO if its queue is 80% full -->
<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<!-- When the blocking queue has 20% capacity remaining, it will drop events of level TRACE, DEBUG and INFO, keeping only events of level WARN and ERROR -->
<discardingThreshold>20</discardingThreshold>
<queueSize>256</queueSize>
<!-- if neverBlock is set to true, the async appender discards messages when its internal queue is full -->
<neverBlock>true</neverBlock>
<!-- Mandatory: AsyncAppender requires another appender-ref to which logs will be forwarded asynchronously -->
<appender-ref ref="very-relaxed-and-fast-kafka-appender" />
</appender>
<!-- *** LOGGERS *** -->
<!-- All logs generated by Main class go to KafkaAppender with additivity=false (logs will not be forwarded to parent/root logger) -->
<!-- If we need to show this logs in pod console, then turn additivity to true -->
<!-- You can set individual classes with debug level -->
<logger
name="io.github.rahulsinghai.logback.kafka.appender.example.Main" level="debug" additivity="false">
<appender-ref ref="ASYNC" />
</logger>
<!-- Don't set root level below "info" if using kafka appender. For other appenders, you can set a lower log level here: -->
<root level="info">
<appender-ref ref="STDOUT"/>
<appender-ref ref="very-restrictive-kafka-appender"/>
</root>
</configuration>