Skip to content

Commit

Permalink
Issue #000 fix: Fixed logger appender for LMS-service (#2494)
Browse files Browse the repository at this point in the history
  • Loading branch information
Pradyumna authored Apr 29, 2021
1 parent 9eedae9 commit a64156e
Showing 1 changed file with 69 additions and 35 deletions.
104 changes: 69 additions & 35 deletions ansible/roles/stack-sunbird/templates/lms-service_logback.xml
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,70 @@
<appender-ref ref="STDOUT" />
</appender>

<appender name="queryLoggerAppender" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<layout class="ch.qos.logback.contrib.json.classic.JsonLayout">
<timestampFormat>yyyy-MM-dd'T'HH:mm:ss.SSSX</timestampFormat>
<timestampFormatTimezoneId>Etc/UTC</timestampFormatTimezoneId>
<fieldNames>
<timestamp>timestamp</timestamp>
<message>msg</message>
<logger>lname</logger>
<thread>tname</thread>
<levelValue>[ignore]</levelValue>
<version>[ignore]</version>
</fieldNames>
</layout>

<logger name="play" level="INFO" />
<logger name="defaultLogger" level="INFO" />
<!-- Telemetry Loggers-->

<root level="INFO">
<appender-ref ref="ASYNCSTDOUT" />
</root>


<appender name="kafka-appender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%msg</pattern>
</encoder>

<topic>${sunbird_env_name}.telemetry.raw</topic>
<!-- ensure that every message sent by the executing host is partitioned to the same partition strategy -->
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
<!-- block the logging application thread if the kafka appender cannot keep up with sending the log messages -->
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

<!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
<!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
<!-- bootstrap.servers is the only mandatory producerConfig -->
<producerConfig>bootstrap.servers=${kafka_urls}</producerConfig>
<!-- don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=15000</producerConfig>
<!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
<producerConfig>max.block.ms=0</producerConfig>
<!-- define a client-id that you use to identify yourself against the kafka broker -->
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>

<!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. -->

</appender>

<appender name="query-kafka-appender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%msg</pattern>
</encoder>

<topic>${sunbird_env_name}.db.query.events</topic>
<!-- ensure that every message sent by the executing host is partitioned to the same partition strategy -->
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
<!-- block the logging application thread if the kafka appender cannot keep up with sending the log messages -->
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

<!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
<!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
<!-- bootstrap.servers is the only mandatory producerConfig -->
<producerConfig>bootstrap.servers=${kafka_urls}</producerConfig>
<!-- don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=15000</producerConfig>
<!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
<producerConfig>max.block.ms=0</producerConfig>
<!-- define a client-id that you use to identify yourself against the kafka broker -->
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>

<!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. -->

</appender>

<appender name="defaultLoggerAppender" class="ch.qos.logback.core.ConsoleAppender">
Expand Down Expand Up @@ -59,32 +108,17 @@
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder"/>
</appender>

<logger name="org.sunbird" level="INFO">
<appender-ref ref="customLoggerAppender"/>
</logger>

<logger name="org.sunbird" level="DEBUG">
<appender-ref ref="customLoggerAppender"/>
<logger name="TelemetryEventLogger" level="INFO">
<appender-ref ref="kafka-appender" />
</logger>

<logger name="org.sunbird" level="WARN">
<appender-ref ref="customLoggerAppender"/>
<logger name="queryLogger" level="DEBUG">
<appender-ref ref="query-kafka-appender" />
</logger>

<logger name="org.sunbird" level="ERROR">
<logger name="org.sunbird" level="INFO">
<appender-ref ref="customLoggerAppender"/>
</logger>

<logger name="play" level="INFO" />
<logger name="defaultLogger" level="INFO" />
<!-- Telemetry Loggers-->
<logger name="TelemetryEventLogger" level="INFO" />
<logger name="queryLogger" level="DEBUG">
<appender-ref ref="queryLoggerAppender" />
</logger>

<root level="INFO">
<appender-ref ref="ASYNCSTDOUT" />
</root>


</configuration>

0 comments on commit a64156e

Please sign in to comment.