From 4bfe6f1fcda524329fa0997b853ef48791d6d7db Mon Sep 17 00:00:00 2001 From: Ruslan Gainutdinov Date: Fri, 10 May 2024 11:28:34 +0300 Subject: [PATCH] Version 6.0.0 compatible with Graylog 6.0.0 --- pom.xml | 24 +- run-graylog | 8 +- run-graylog-mac | 12 +- sample-graylog.conf | 607 ++++++++++++------ .../graylog2/plugin/SyslogOutput.java | 4 +- .../graylog2/plugin/test/TestFullSender.java | 209 +++--- .../graylog2/plugin/test/TestTransparent.java | 322 +++++----- .../graylog2/plugin/TestMessageFactory.java | 7 + 8 files changed, 705 insertions(+), 488 deletions(-) create mode 100644 src/test/java/org/graylog2/plugin/TestMessageFactory.java diff --git a/pom.xml b/pom.xml index 7204032..fdca7d6 100644 --- a/pom.xml +++ b/pom.xml @@ -22,8 +22,9 @@ true true true - 4.2.6 - 0.9.60 + 6.0.0 + 0.9.61 + 7.0.0 @@ -51,6 +52,25 @@ 1.7.21 provided + + com.google.inject + guice + ${guice.version} + provided + + + com.google.inject.extensions + guice-assistedinject + ${guice.version} + provided + + + + org.graylog2.repackaged + uuid + 3.2.1 + provided + diff --git a/run-graylog b/run-graylog index 54ee821..3b941ec 100755 --- a/run-graylog +++ b/run-graylog @@ -1,6 +1,6 @@ #!/bin/bash HERE=$PWD -GL=~/Downloads/graylog-4.0.8 +GL=~/Downloads/graylog-6.0.0 TT=$GL/tmp mkdir -p $TT sudo umount $TT @@ -9,7 +9,7 @@ export JAVA_OPTS="-Djava.io.tmpdir=$TT" rm -Rf $GL/data mkdir -p $GL/data mvn package -DskipTests -cp target/graylog-output-syslog-4.0.8.jar $GL/plugin +cp target/graylog-output-syslog-6.0.0.jar $GL/plugin export GRAYLOG_CONF=$GL/graylog.conf sudo sysctl -w vm.max_map_count=262144 @@ -18,12 +18,12 @@ docker run --name elastic -p 9200:9200 -e "discovery.type=single-node" \ -e "cluster.routing.allocation.disk.threshold_enabled=false" \ -d elasticsearch:7.10.1 docker rm -f mongo -docker run --name mongo -p 27017:27017 -d mongo:3.6 +docker run --name mongo -p 27017:27017 -d mongo:5.0 docker start elastic docker start mongo sleep 10 $GL/bin/graylogctl run ## Run two consoles additionally: -## docker run -it -p 514:514/udp -p 514:514/tcp -p 601:601 --name syslog-ng balabit/syslog-ng:latest +## docker run -it -p 514:514/udp -p 514:514/tcp -p 601:601 --name syslog-ng balabit/syslog-ng:latest ## docker exec syslog-ng tail -f /var/log/messages \ No newline at end of file diff --git a/run-graylog-mac b/run-graylog-mac index f900390..cebae60 100755 --- a/run-graylog-mac +++ b/run-graylog-mac @@ -1,10 +1,12 @@ #!/bin/bash +set -e HERE=$PWD -VER=4.2.6 -if [ ! -f graylog-$VER.tgz ]; then - curl -O graylog-$VER.tgz $URL -fi +VER=6.0.0 URL=https://downloads.graylog.org/releases/graylog/graylog-$VER.tgz +if [ ! -f "graylog-$VER.tgz" ]; then + echo "Downloading $URL" + curl -L -o graylog-$VER.tgz $URL +fi if [ ! -d graylog-$VER ]; then tar -xzvf graylog-$VER.tgz fi @@ -27,7 +29,7 @@ docker run --name elastic -p 9200:9200 -e "discovery.type=single-node" \ -d elasticsearch:7.10.1 docker rm -f mongo -docker run --name mongo -p 27017:27017 -d mongo:3.6 +docker run --name mongo -p 27017:27017 -d mongo:5.0 docker start elastic docker start mongo sleep 10 diff --git a/sample-graylog.conf b/sample-graylog.conf index 3406107..0a38a40 100644 --- a/sample-graylog.conf +++ b/sample-graylog.conf @@ -6,7 +6,7 @@ # Characters that cannot be directly represented in this encoding can be written using Unicode escapes # as defined in https://docs.oracle.com/javase/specs/jls/se8/html/jls-3.html#jls-3.3, using the \u prefix. # For example, \u002c. -# +# # * Entries are generally expected to be a single line of the form, one of the following: # # propertyName=propertyValue @@ -14,7 +14,7 @@ # # * White space that appears between the property name and property value is ignored, # so the following are equivalent: -# +# # name=Stephen # name = Stephen # @@ -34,17 +34,17 @@ # Los Angeles # # This is equivalent to targetCities=Detroit,Chicago,Los Angeles (white space at the beginning of lines is ignored). -# +# # * The characters newline, carriage return, and tab can be inserted with characters \n, \r, and \t, respectively. -# +# # * The backslash character must be escaped as a double backslash. For example: -# +# # path=c:\\docs\\doc1 # # If you are running more than one instances of Graylog server you have to select one of these -# instances as master. The master will perform some periodical tasks that non-masters won't perform. -is_master = true +# instances as leader. The leader will perform some periodical tasks that non-leaders won't perform. +is_leader = true # The auto-generated node ID will be stored in this file and read after restarts. It is a good idea # to use an absolute file path here if you are starting Graylog server from init scripts or similar. @@ -52,10 +52,12 @@ node_id_file = ./node-id # You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters. # Generate one by using for example: pwgen -N 1 -s 96 +# ATTENTION: This value must be the same on all Graylog nodes in the cluster. +# Changing this value after installation will render all user sessions and encrypted values in the database invalid. (e.g. encrypted access tokens) password_secret = Dk3OjYXJQFFXXsKLTkSqzii4otbpZjsWo54O5ItU7pZ0fyoMeht1byGhOW8cRUNxL7tat6NeCrHELiyLPnD1InYIty4DCu7b # The default root user is named 'admin' -#root_username = admin +root_username = admin # You MUST specify a hash password for the root user (which you only need to initially set up the # system and in case you lose connectivity to your authentication backend) @@ -76,95 +78,114 @@ root_password_sha2 = a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a # Default is UTC #root_timezone = UTC -# Set plugin directory here (relative or absolute) -plugin_dir = plugin - -# REST API listen URI. Must be reachable by other Graylog server nodes if you run a cluster. -# When using Graylog Collectors, this URI will be used to receive heartbeat messages and must be accessible for all collectors. -rest_listen_uri = http://127.0.0.1:9000/api/ - -# REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri -# is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used. -# If set, this will be promoted in the cluster discovery APIs, so other nodes may try to connect on -# this address and it is used to generate URLs addressing entities in the REST API. (see rest_listen_uri) -# You will need to define this, if your Graylog server is running behind a HTTP proxy that is rewriting -# the scheme, host name or URI. -# This must not contain a wildcard address (0.0.0.0). -#rest_transport_uri = http://192.168.1.1:9000/api/ - -# Enable CORS headers for REST API. This is necessary for JS-clients accessing the server directly. -# If these are disabled, modern browsers will not be able to retrieve resources from the server. -# This is enabled by default. Uncomment the next line to disable it. -#rest_enable_cors = false - -# Enable GZIP support for REST API. This compresses API responses and therefore helps to reduce -# overall round trip times. This is enabled by default. Uncomment the next line to disable it. -#rest_enable_gzip = false +# Set the bin directory here (relative or absolute) +# This directory contains binaries that are used by the Graylog server. +# Default: bin +bin_dir = bin -# Enable HTTPS support for the REST API. This secures the communication with the REST API with -# TLS to prevent request forgery and eavesdropping. This is disabled by default. Uncomment the -# next line to enable it. -#rest_enable_tls = true +# Set the data directory here (relative or absolute) +# This directory is used to store Graylog server state. +data_dir = data -# The X.509 certificate chain file in PEM format to use for securing the REST API. -#rest_tls_cert_file = /path/to/graylog.crt - -# The PKCS#8 private key file in PEM format to use for securing the REST API. -#rest_tls_key_file = /path/to/graylog.key +# Set plugin directory here (relative or absolute) +plugin_dir = plugin -# The password to unlock the private key used for securing the REST API. -#rest_tls_key_password = secret +############### +# HTTP settings +############### -# The maximum size of the HTTP request headers in bytes. -#rest_max_header_size = 8192 +#### HTTP bind address +# +# The network interface used by the Graylog HTTP interface. +# +# This network interface must be accessible by all Graylog nodes in the cluster and by all clients +# using the Graylog web interface. +# +# If the port is omitted, Graylog will use port 9000 by default. +# +# Default: 127.0.0.1:9000 +http_bind_address = 127.0.0.1:9000 +#http_bind_address = [2001:db8::1]:9000 -# The size of the thread pool used exclusively for serving the REST API. -#rest_thread_pool_size = 16 +#### HTTP publish URI +# +# The HTTP URI of this Graylog node which is used to communicate with the other Graylog nodes in the cluster and by all +# clients using the Graylog web interface. +# +# The URI will be published in the cluster discovery APIs, so that other Graylog nodes will be able to find and connect to this Graylog node. +# +# This configuration setting has to be used if this Graylog node is available on another network interface than $http_bind_address, +# for example if the machine has multiple network interfaces or is behind a NAT gateway. +# +# If $http_bind_address contains a wildcard IPv4 address (0.0.0.0), the first non-loopback IPv4 address of this machine will be used. +# This configuration setting *must not* contain a wildcard address! +# +# Default: http://$http_bind_address/ +#http_publish_uri = http://192.168.1.1:9000/ -# Comma separated list of trusted proxies that are allowed to set the client address with X-Forwarded-For -# header. May be subnets, or hosts. -#trusted_proxies = 127.0.0.1/32, 0:0:0:0:0:0:0:1/128 +#### External Graylog URI +# +# The public URI of Graylog which will be used by the Graylog web interface to communicate with the Graylog REST API. +# +# The external Graylog URI usually has to be specified, if Graylog is running behind a reverse proxy or load-balancer +# and it will be used to generate URLs addressing entities in the Graylog REST API (see $http_bind_address). +# +# When using Graylog Collector, this URI will be used to receive heartbeat messages and must be accessible for all collectors. +# +# This setting can be overridden on a per-request basis with the "X-Graylog-Server-URL" HTTP request header. +# +# Default: $http_publish_uri +#http_external_uri = -# Enable the embedded Graylog web interface. -# Default: true -#web_enable = false +#### Enable CORS headers for HTTP interface +# +# This allows browsers to make Cross-Origin requests from any origin. +# This is disabled for security reasons and typically only needed if running graylog +# with a separate server for frontend development. +# +# Default: false +#http_enable_cors = false -# Web interface listen URI. -# Configuring a path for the URI here effectively prefixes all URIs in the web interface. This is a replacement -# for the application.context configuration parameter in pre-2.0 versions of the Graylog web interface. -#web_listen_uri = http://127.0.0.1:9000/ +#### Enable GZIP support for HTTP interface +# +# This compresses API responses and therefore helps to reduce +# overall round trip times. This is enabled by default. Uncomment the next line to disable it. +#http_enable_gzip = false -# Web interface endpoint URI. This setting can be overriden on a per-request basis with the X-Graylog-Server-URL header. -# Default: $rest_transport_uri -#web_endpoint_uri = +# The maximum size of the HTTP request headers in bytes. +#http_max_header_size = 8192 -# Enable CORS headers for the web interface. This is necessary for JS-clients accessing the server directly. -# If these are disabled, modern browsers will not be able to retrieve resources from the server. -#web_enable_cors = false +# The size of the thread pool used exclusively for serving the HTTP interface. +#http_thread_pool_size = 64 -# Enable/disable GZIP support for the web interface. This compresses HTTP responses and therefore helps to reduce -# overall round trip times. This is enabled by default. Uncomment the next line to disable it. -#web_enable_gzip = false +################ +# HTTPS settings +################ -# Enable HTTPS support for the web interface. This secures the communication of the web browser with the web interface -# using TLS to prevent request forgery and eavesdropping. -# This is disabled by default. Uncomment the next line to enable it and see the other related configuration settings. -#web_enable_tls = true +#### Enable HTTPS support for the HTTP interface +# +# This secures the communication with the HTTP interface with TLS to prevent request forgery and eavesdropping. +# +# Default: false +#http_enable_tls = true -# The X.509 certificate chain file in PEM format to use for securing the web interface. -#web_tls_cert_file = /path/to/graylog-web.crt +# The X.509 certificate chain file in PEM format to use for securing the HTTP interface. +#http_tls_cert_file = /path/to/graylog.crt -# The PKCS#8 private key file in PEM format to use for securing the web interface. -#web_tls_key_file = /path/to/graylog-web.key +# The PKCS#8 private key file in PEM format to use for securing the HTTP interface. +#http_tls_key_file = /path/to/graylog.key -# The password to unlock the private key used for securing the web interface. -#web_tls_key_password = secret +# The password to unlock the private key used for securing the HTTP interface. +#http_tls_key_password = secret -# The maximum size of the HTTP request headers in bytes. -#web_max_header_size = 8192 +# If set to "true", Graylog will periodically investigate indices to figure out which fields are used in which streams. +# It will make field list in Graylog interface show only fields used in selected streams, but can decrease system performance, +# especially on systems with great number of streams and fields. +stream_aware_field_types=false -# The size of the thread pool used exclusively for serving the web interface. -#web_thread_pool_size = 16 +# Comma separated list of trusted proxies that are allowed to set the client address with X-Forwarded-For +# header. May be subnets, or hosts. +#trusted_proxies = 127.0.0.1/32, 0:0:0:0:0:0:0:1/128 # List of Elasticsearch hosts Graylog should connect to. # Need to be specified as a comma-separated list of valid URIs for the http ports of your elasticsearch nodes. @@ -172,14 +193,25 @@ rest_listen_uri = http://127.0.0.1:9000/api/ # requires authentication. # # Default: http://127.0.0.1:9200 -#elasticsearch_hosts = http://node1:9200,http://user:password@node2:19200 +#elasticsearch_hosts = http://node1:9200,http://user:password@node2:9200 + +# Maximum number of attempts to connect to elasticsearch on boot for the version probe. +# +# Default: 0, retry indefinitely with the given delay until a connection could be established +#elasticsearch_version_probe_attempts = 5 -# Maximum amount of time to wait for successfull connection to Elasticsearch HTTP port. +# Waiting time in between connection attempts for elasticsearch_version_probe_attempts +# +# Default: 5s +#elasticsearch_version_probe_delay = 5s + +# Maximum amount of time to wait for successful connection to Elasticsearch HTTP port. # # Default: 10 Seconds #elasticsearch_connect_timeout = 10s # Maximum amount of time to wait for reading back a response from an Elasticsearch server. +# (e. g. during search, index creation, or index time-range calculations) # # Default: 60 seconds #elasticsearch_socket_timeout = 60s @@ -192,14 +224,14 @@ rest_listen_uri = http://127.0.0.1:9000/api/ # Maximum number of total connections to Elasticsearch. # -# Default: 20 -#elasticsearch_max_total_connections = 20 +# Default: 200 +#elasticsearch_max_total_connections = 200 # Maximum number of total connections per Elasticsearch route (normally this means per # elasticsearch server). # -# Default: 2 -#elasticsearch_max_total_connections_per_route = 2 +# Default: 20 +#elasticsearch_max_total_connections_per_route = 20 # Maximum number of times Graylog will retry failed requests to Elasticsearch. # @@ -225,37 +257,106 @@ rest_listen_uri = http://127.0.0.1:9000/api/ # Default: 30s # elasticsearch_discovery_frequency = 30s +# Set the default scheme when connecting to Elasticsearch discovered nodes +# +# Default: http (available options: http, https) +#elasticsearch_discovery_default_scheme = http + # Enable payload compression for Elasticsearch requests. # # Default: false #elasticsearch_compression_enabled = true -# Graylog will use multiple indices to store documents in. You can configured the strategy it uses to determine -# when to rotate the currently active write index. -# It supports multiple rotation strategies: +# Enable use of "Expect: 100-continue" Header for Elasticsearch index requests. +# If this is disabled, Graylog cannot properly handle HTTP 413 Request Entity Too Large errors. +# +# Default: true +#elasticsearch_use_expect_continue = true + +# Graylog uses Index Sets to manage settings for groups of indices. The default options for index sets are configurable +# for each index set in Graylog under System > Configuration > Index Set Defaults. +# The following settings are used to initialize in-database defaults on the first Graylog server startup. +# Specify these values if you want the Graylog server and indices to start with specific settings. + +# The prefix for the Default Graylog index set. +# +#elasticsearch_index_prefix = graylog + +# The name of the index template for the Default Graylog index set. +# +#elasticsearch_template_name = graylog-internal + +# The prefix for the for graylog event indices. +# +#default_events_index_prefix = gl-events + +# The prefix for graylog system event indices. +# +#default_system_events_index_prefix = gl-system-events + +# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea. +# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom +# Elasticsearch documentation: https://www.elastic.co/guide/en/elasticsearch/reference/2.3/analysis.html +# Note that this setting only takes effect on newly created indices. +# +#elasticsearch_analyzer = standard + +# How many Elasticsearch shards and replicas should be used per index? +# +#elasticsearch_shards = 1 +#elasticsearch_replicas = 0 + +# Maximum number of attempts to connect to datanode on boot. +# Default: 0, retry indefinitely with the given delay until a connection could be established +#datanode_startup_connection_attempts = 5 + +# Waiting time in between connection attempts for datanode_startup_connection_attempts +# +# Default: 5s +# datanode_startup_connection_delay = 5s + +# Disable the optimization of Elasticsearch indices after index cycling. This may take some load from Elasticsearch +# on heavily used systems with large indices, but it will decrease search performance. The default is to optimize +# cycled indices. +# +#disable_index_optimization = true + +# Optimize the index down to <= index_optimization_max_num_segments. A higher number may take some load from Elasticsearch +# on heavily used systems with large indices, but it will decrease search performance. The default is 1. +# +#index_optimization_max_num_segments = 1 + +# Time interval to trigger a full refresh of the index field types for all indexes. This will query ES for all indexes +# and populate any missing field type information to the database. +# +#index_field_type_periodical_full_refresh_interval = 5m + +# You can configure the default strategy used to determine when to rotate the currently active write index. +# Multiple rotation strategies are supported, the default being "time-size-optimizing": +# - "time-size-optimizing" tries to rotate daily, while focussing on optimal sized shards. +# The global default values can be configured with +# "time_size_optimizing_retention_min_lifetime" and "time_size_optimizing_retention_max_lifetime". # - "count" of messages per index, use elasticsearch_max_docs_per_index below to configure # - "size" per index, use elasticsearch_max_size_per_index below to configure -# valid values are "count", "size" and "time", default is "count" +# - "time" interval between index rotations, use elasticsearch_max_time_per_index to configure +# A strategy may be disabled by specifying the optional enabled_index_rotation_strategies list and excluding that strategy. # -# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these -# to your previous 1.x settings so they will be migrated to the database! -rotation_strategy = count +#enabled_index_rotation_strategies = count,size,time,time-size-optimizing + +# The default index rotation strategy to use. +#rotation_strategy = time-size-optimizing # (Approximate) maximum number of documents in an Elasticsearch index before a new index # is being created, also see no_retention and elasticsearch_max_number_of_indices. # Configure this if you used 'rotation_strategy = count' above. # -# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these -# to your previous 1.x settings so they will be migrated to the database! -elasticsearch_max_docs_per_index = 20000000 +#elasticsearch_max_docs_per_index = 20000000 # (Approximate) maximum size in bytes per Elasticsearch index on disk before a new index is being created, also see -# no_retention and elasticsearch_max_number_of_indices. Default is 1GB. +# no_retention and elasticsearch_max_number_of_indices. Default is 30GB. # Configure this if you used 'rotation_strategy = size' above. # -# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these -# to your previous 1.x settings so they will be migrated to the database! -#elasticsearch_max_size_per_index = 1073741824 +#elasticsearch_max_size_per_index = 32212254720 # (Approximate) maximum time before a new Elasticsearch index is being created, also see # no_retention and elasticsearch_max_number_of_indices. Default is 1 day. @@ -268,72 +369,67 @@ elasticsearch_max_docs_per_index = 20000000 # 12h = 12 hours # Permitted suffixes are: d for day, h for hour, m for minute, s for second. # -# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these -# to your previous 1.x settings so they will be migrated to the database! #elasticsearch_max_time_per_index = 1d -# Disable checking the version of Elasticsearch for being compatible with this Graylog release. -# WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss! -#elasticsearch_disable_version_check = true +# Controls whether empty indices are rotated. Only applies to the "time" rotation_strategy. +# +#elasticsearch_rotate_empty_index_set=false -# Disable message retention on this node, i. e. disable Elasticsearch index rotation. -#no_retention = false +# Provides a hard upper limit for the retention period of any index set at configuration time. +# +# This setting is used to validate the value a user chooses for the maximum number of retained indexes, when configuring +# an index set. However, it is only in effect, when a time-based rotation strategy is chosen. +# +# If a rotation strategy other than time-based is selected and/or no value is provided for this setting, no upper limit +# for index retention will be enforced. This is also the default. + +# Default: none +#max_index_retention_period = P90d -# How many indices do you want to keep? +# Optional upper bound on elasticsearch_max_time_per_index # -# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these -# to your previous 1.x settings so they will be migrated to the database! -elasticsearch_max_number_of_indices = 20 +#elasticsearch_max_write_index_age = 1d + +# Disable message retention on this node, i. e. disable Elasticsearch index rotation. +#no_retention = false # Decide what happens with the oldest indices when the maximum number of indices is reached. -# The following strategies are availble: +# The following strategies are available: # - delete # Deletes the index completely (Default) # - close # Closes the index and hides it from the system. Can be re-opened later. # -# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these -# to your previous 1.x settings so they will be migrated to the database! -retention_strategy = delete +#retention_strategy = delete -# How many Elasticsearch shards and replicas should be used per index? Note that this only applies to newly created indices. -# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these -# to your previous settings so they will be migrated to the database! -elasticsearch_shards = 4 -elasticsearch_replicas = 0 +# This configuration list limits the retention strategies available for user configuration via the UI +# The following strategies can be disabled: +# - delete # Deletes the index completely (Default) +# - close # Closes the index and hides it from the system. Can be re-opened later. +# - none # No operation is performed. The index stays open. (Not recommended) +# WARNING: At least one strategy must be enabled. Be careful when extending this list on existing installations! +disabled_retention_strategies = none,close -# Prefix for all Elasticsearch indices and index aliases managed by Graylog. +# How many indices do you want to keep for the delete and close retention types? # -# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these -# to your previous settings so they will be migrated to the database! -elasticsearch_index_prefix = graylog +#elasticsearch_max_number_of_indices = 20 -# Name of the Elasticsearch index template used by Graylog to apply the mandatory index mapping. -# Default: graylog-internal +# Disable checking the version of Elasticsearch for being compatible with this Graylog release. +# WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss! # -# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these -# to your previous settings so they will be migrated to the database! -#elasticsearch_template_name = graylog-internal +#elasticsearch_disable_version_check = true # Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only -# be enabled with care. See also: http://docs.graylog.org/en/2.1/pages/queries.html +# be enabled with care. See also: https://docs.graylog.org/docs/query-language allow_leading_wildcard_searches = false # Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and # should only be enabled after making sure your Elasticsearch cluster has enough memory. allow_highlighting = false -# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea. -# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom -# Elasticsearch documentation: https://www.elastic.co/guide/en/elasticsearch/reference/2.3/analysis.html -# Note that this setting only takes effect on newly created indices. -# -# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these -# to your previous settings so they will be migrated to the database! -elasticsearch_analyzer = standard - -# Global request timeout for Elasticsearch requests (e. g. during search, index creation, or index time-range -# calculations) based on a best-effort to restrict the runtime of Elasticsearch operations. -# Default: 1m -#elasticsearch_request_timeout = 1m +# Sets field value suggestion mode. The possible values are: +# 1. "off" - field value suggestions are turned off +# 2. "textual_only" - field values are suggested only for textual fields +# 3. "on" (default) - field values are suggested for all field types, even the types where suggestions are inefficient performance-wise +field_value_suggestion_mode = on # Global timeout for index optimization (force merge) requests. # Default: 1h @@ -341,8 +437,14 @@ elasticsearch_analyzer = standard # Maximum number of concurrently running index optimization (force merge) jobs. # If you are using lots of different index sets, you might want to increase that number. -# Default: 20 -#elasticsearch_index_optimization_jobs = 20 +# This value should be set lower than elasticsearch_max_total_connections_per_route, otherwise index optimization +# could deplete all the client connections to the search server and block new messages ingestion for prolonged +# periods of time. +# Default: 10 +#elasticsearch_index_optimization_jobs = 10 + +# Mute the logging-output of ES deprecation warnings during REST calls in the ES RestClient +#elasticsearch_mute_deprecation_warnings = true # Time interval for index range information cleanups. This setting defines how often stale index range information # is being purged from the database. @@ -367,27 +469,24 @@ output_flush_interval = 1 output_fault_count_threshold = 5 output_fault_penalty_seconds = 30 -# The number of parallel running processors. -# Raise this number if your buffers are filling up. -processbuffer_processors = 5 -outputbuffer_processors = 3 - -# The following settings (outputbuffer_processor_*) configure the thread pools backing each output buffer processor. -# See https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html for technical details - -# When the number of threads is greater than the core (see outputbuffer_processor_threads_core_pool_size), -# this is the maximum time in milliseconds that excess idle threads will wait for new tasks before terminating. -# Default: 5000 -#outputbuffer_processor_keep_alive_time = 5000 - -# The number of threads to keep in the pool, even if they are idle, unless allowCoreThreadTimeOut is set +# Number of process buffer processors running in parallel. +# By default, the value will be determined automatically based on the number of CPU cores available to the JVM, using +# the formula (<#cores> * 0.36 + 0.625) rounded to the nearest integer. +# Set this value explicitly to override the dynamically calculated value. Try raising the number if your buffers are +# filling up. +#processbuffer_processors = 5 + +# Number of output buffer processors running in parallel. +# By default, the value will be determined automatically based on the number of CPU cores available to the JVM, using +# the formula (<#cores> * 0.162 + 0.625) rounded to the nearest integer. +# Set this value explicitly to override the dynamically calculated value. Try raising the number if your buffers are +# filling up. +#outputbuffer_processors = 3 + +# The size of the thread pool in the output buffer processor. # Default: 3 #outputbuffer_processor_threads_core_pool_size = 3 -# The maximum number of threads to allow in the pool -# Default: 30 -#outputbuffer_processor_threads_max_pool_size = 30 - # UDP receive buffer size for all message inputs (e. g. SyslogUDPInput). #udp_recvbuffer_sizes = 1048576 @@ -409,20 +508,26 @@ processor_wait_strategy = blocking ring_size = 65536 inputbuffer_ring_size = 65536 -inputbuffer_processors = 2 inputbuffer_wait_strategy = blocking -# Enable the disk based message journal. +# Number of input buffer processors running in parallel. +#inputbuffer_processors = 2 + +# Manually stopped inputs are no longer auto-restarted. To re-enable the previous behavior, set auto_restart_inputs to true. +#auto_restart_inputs = true + +# Enable the message journal. message_journal_enabled = true -# The directory which will be used to store the message journal. The directory must me exclusively used by Graylog and +# The directory which will be used to store the message journal. The directory must be exclusively used by Graylog and # must not contain any other files than the ones created by Graylog itself. # # ATTENTION: -# If you create a seperate partition for the journal files and use a file system creating directories like 'lost+found' +# If you create a separate partition for the journal files and use a file system creating directories like 'lost+found' # in the root directory, you need to create a sub directory for your journal. # Otherwise Graylog will log an error message that the journal is corrupt and Graylog will not start. -message_journal_dir = data/journal +# Default: /journal +#message_journal_dir = data/journal # Journal hold messages before they could be written to Elasticsearch. # For a maximum of 12 hours or 5 GB whichever happens first. @@ -456,10 +561,6 @@ lb_recognition_period_seconds = 3 #stream_processing_timeout = 2000 #stream_processing_max_faults = 3 -# Length of the interval in seconds in which the alert conditions for all streams should be checked -# and alarms are being sent. -#alert_check_interval = 60 - # Since 0.21 the Graylog server supports pluggable output modules. This means a single message can be written to multiple # outputs. The next setting defines the timeout for a single output module, including the default output module where all # messages end up. @@ -467,8 +568,8 @@ lb_recognition_period_seconds = 3 # Time in milliseconds to wait for all message outputs to finish writing a single message. #output_module_timeout = 10000 -# Time in milliseconds after which a detected stale master node is being rechecked on startup. -#stale_master_timeout = 2000 +# Time in milliseconds after which a detected stale leader node is being rechecked on startup. +#stale_leader_timeout = 2000 # Time in milliseconds which Graylog is waiting for all threads to stop on shutdown. #shutdown_timeout = 30000 @@ -478,36 +579,47 @@ lb_recognition_period_seconds = 3 mongodb_uri = mongodb://localhost/graylog # Authenticate against the MongoDB server +# '+'-signs in the username or password need to be replaced by '%2B' #mongodb_uri = mongodb://grayloguser:secret@localhost:27017/graylog # Use a replica set instead of a single host -#mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog +#mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog?replicaSet=rs01 + +# DNS Seedlist https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format +#mongodb_uri = mongodb+srv://server.example.org/graylog # Increase this value according to the maximum connections your MongoDB server can handle from a single client # if you encounter MongoDB connection problems. mongodb_max_connections = 1000 -# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5 -# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, -# then 500 threads can block. More than that and an exception will be thrown. -# http://api.mongodb.com/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier -mongodb_threads_allowed_to_block_multiplier = 5 - -# Drools Rule File (Use to rewrite incoming log messages) -# See: http://docs.graylog.org/en/2.1/pages/drools.html -#rules_file = /etc/graylog/server/rules.drl +# Maximum number of attempts to connect to MongoDB on boot for the version probe. +# +# Default: 0, retry indefinitely until a connection can be established +#mongodb_version_probe_attempts = 5 # Email transport #transport_email_enabled = false #transport_email_hostname = mail.example.com #transport_email_port = 587 #transport_email_use_auth = true -#transport_email_use_tls = true -#transport_email_use_ssl = true #transport_email_auth_username = you@example.com #transport_email_auth_password = secret -#transport_email_subject_prefix = [graylog] #transport_email_from_email = graylog@example.com +#transport_email_socket_connection_timeout = 10s +#transport_email_socket_timeout = 10s + +# Encryption settings +# +# ATTENTION: +# Using SMTP with STARTTLS *and* SMTPS at the same time is *not* possible. + +# Use SMTP with STARTTLS, see https://en.wikipedia.org/wiki/Opportunistic_TLS +#transport_email_use_tls = true + +# Use SMTP over SSL (SMTPS), see https://en.wikipedia.org/wiki/SMTPS +# This is deprecated on most SMTP services! +#transport_email_use_ssl = false + # Specify and uncomment this if you want to include links to the stream in your stream alert mails. # This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users. @@ -544,46 +656,113 @@ mongodb_threads_allowed_to_block_multiplier = 5 # - 10.0.*,*.example.com #http_non_proxy_hosts = -# Disable the optimization of Elasticsearch indices after index cycling. This may take some load from Elasticsearch -# on heavily used systems with large indices, but it will decrease search performance. The default is to optimize -# cycled indices. -# -# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these -# to your previous settings so they will be migrated to the database! -#disable_index_optimization = true - -# Optimize the index down to <= index_optimization_max_num_segments. A higher number may take some load from Elasticsearch -# on heavily used systems with large indices, but it will decrease search performance. The default is 1. -# -# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these -# to your previous settings so they will be migrated to the database! -#index_optimization_max_num_segments = 1 - -# The threshold of the garbage collection runs. If GC runs take longer than this threshold, a system notification -# will be generated to warn the administrator about possible problems with the system. Default is 1 second. -#gc_warning_threshold = 1s - # Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds. #ldap_connection_timeout = 2000 -# Disable the use of SIGAR for collecting system stats -#disable_sigar = false +# Disable the use of a native system stats collector (currently OSHI) +#disable_native_system_stats_collector = false # The default cache time for dashboard widgets. (Default: 10 seconds, minimum: 1 second) #dashboard_widget_default_cache_time = 10s +# For some cluster-related REST requests, the node must query all other nodes in the cluster. This is the maximum number +# of threads available for this. Increase it, if '/cluster/*' requests take long to complete. +# Should be http_thread_pool_size * average_cluster_size if you have a high number of concurrent users. +#proxied_requests_thread_pool_size = 64 + +# The default HTTP call timeout for cluster-related REST requests. This timeout might be overriden for some +# resources in code or other configuration values. (some cluster metrics resources use a lower timeout) +#proxied_requests_default_call_timeout = 5s + +# The server is writing processing status information to the database on a regular basis. This setting controls how +# often the data is written to the database. +# Default: 1s (cannot be less than 1s) +#processing_status_persist_interval = 1s + +# Configures the threshold for detecting outdated processing status records. Any records that haven't been updated +# in the configured threshold will be ignored. +# Default: 1m (one minute) +#processing_status_update_threshold = 1m + +# Configures the journal write rate threshold for selecting processing status records. Any records that have a lower +# one minute rate than the configured value might be ignored. (dependent on number of messages in the journal) +# Default: 1 +#processing_status_journal_write_rate_threshold = 1 + # Automatically load content packs in "content_packs_dir" on the first start of Graylog. -#content_packs_loader_enabled = true +#content_packs_loader_enabled = false # The directory which contains content packs which should be loaded on the first start of Graylog. +# Default: /contentpacks #content_packs_dir = data/contentpacks # A comma-separated list of content packs (files in "content_packs_dir") which should be applied on # the first start of Graylog. # Default: empty -content_packs_auto_load = grok-patterns.json +#content_packs_auto_install = grok-patterns.json -# For some cluster-related REST requests, the node must query all other nodes in the cluster. This is the maximum number -# of threads available for this. Increase it, if '/cluster/*' requests take long to complete. -# Should be rest_thread_pool_size * average_cluster_size if you have a high number of concurrent users. -proxied_requests_thread_pool_size = 32 +# The allowed TLS protocols for system wide TLS enabled servers. (e.g. message inputs, http interface) +# Setting this to an empty value, leaves it up to system libraries and the used JDK to chose a default. +# Default: TLSv1.2,TLSv1.3 (might be automatically adjusted to protocols supported by the JDK) +#enabled_tls_protocols = TLSv1.2,TLSv1.3 + +# Enable Prometheus exporter HTTP server. +# Default: false +#prometheus_exporter_enabled = false + +# IP address and port for the Prometheus exporter HTTP server. +# Default: 127.0.0.1:9833 +#prometheus_exporter_bind_address = 127.0.0.1:9833 + +# Path to the Prometheus exporter core mapping file. If this option is enabled, the full built-in core mapping is +# replaced with the mappings in this file. +# This file is monitored for changes and updates will be applied at runtime. +# Default: none +#prometheus_exporter_mapping_file_path_core = prometheus-exporter-mapping-core.yml + +# Path to the Prometheus exporter custom mapping file. If this option is enabled, the mappings in this file are +# configured in addition to the built-in core mappings. The mappings in this file cannot overwrite any core mappings. +# This file is monitored for changes and updates will be applied at runtime. +# Default: none +#prometheus_exporter_mapping_file_path_custom = prometheus-exporter-mapping-custom.yml + +# Configures the refresh interval for the monitored Prometheus exporter mapping files. +# Default: 60s +#prometheus_exporter_mapping_file_refresh_interval = 60s + +# Optional allowed paths for Graylog data files. If provided, certain operations in Graylog will only be permitted +# if the data file(s) are located in the specified paths (for example, with the CSV File lookup adapter). +# All subdirectories of indicated paths are allowed by default. This Provides an additional layer of security, +# and allows administrators to control where in the file system Graylog users can select files from. +#allowed_auxiliary_paths = /etc/graylog/data-files,/etc/custom-allowed-path + +# Do not perform any preflight checks when starting Graylog +# Default: false +#skip_preflight_checks = false + +# Ignore any exceptions encountered when running migrations +# Use with caution - skipping failing migrations may result in an inconsistent DB state. +# Default: false +#ignore_migration_failures = false + +# Comma-separated list of notification types which should not emit a system event. +# Default: SIDECAR_STATUS_UNKNOWN which would create a new event whenever the status of a sidecar becomes "Unknown" +#system_event_excluded_types = SIDECAR_STATUS_UNKNOWN + +# RSS settings for content stream +#content_stream_rss_url = https://www.graylog.org/post +#content_stream_refresh_interval = 7d + +# Maximum value that can be set for an event limit. +# Default: 1000 +#event_definition_max_event_limit = 1000 + +# Optional limits on scheduling concurrency by job type. No more than the specified number of worker +# threads will be executing jobs of the specified type across the entire cluster. +# Default: no limitation +# Note: Monitor job queue metrics to avoid excessive backlog of unprocessed jobs when using this setting! +# Available job types in Graylog Open: +# check-for-cert-renewal-execution-v1 +# event-processor-execution-v1 +# notification-execution-v1 +#job_scheduler_concurrency_limits = event-processor-execution-v1:2,notification-execution-v1:2 diff --git a/src/main/java/com/wizecore/graylog2/plugin/SyslogOutput.java b/src/main/java/com/wizecore/graylog2/plugin/SyslogOutput.java index 88223d5..d8750c5 100644 --- a/src/main/java/com/wizecore/graylog2/plugin/SyslogOutput.java +++ b/src/main/java/com/wizecore/graylog2/plugin/SyslogOutput.java @@ -22,7 +22,7 @@ import org.graylog2.syslog4j.impl.message.processor.structured.StructuredSyslogMessageProcessor; import org.graylog2.syslog4j.impl.net.tcp.TCPNetSyslogConfig; import org.graylog2.syslog4j.impl.net.udp.UDPNetSyslogConfig; -import javax.inject.Inject; +import jakarta.inject.Inject; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -308,7 +308,9 @@ public ConfigurationRequest getRequestedConfiguration() { types.put("transparent", "transparent"); types.put("snare", "snare"); + // Make immutable map from types final Map formats = ImmutableMap.copyOf(types); + configurationRequest.addField(new DropdownField( "format", "Message format", "plain", formats, "Message format. For detailed explanation, see https://github.com/wizecore/graylog2-output-syslog", diff --git a/src/test/java/com/wizecore/graylog2/plugin/test/TestFullSender.java b/src/test/java/com/wizecore/graylog2/plugin/test/TestFullSender.java index 5d3d517..f83949c 100644 --- a/src/test/java/com/wizecore/graylog2/plugin/test/TestFullSender.java +++ b/src/test/java/com/wizecore/graylog2/plugin/test/TestFullSender.java @@ -1,103 +1,106 @@ -package com.wizecore.graylog2.plugin.test; - -import static org.junit.Assert.*; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.net.InetAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.util.Arrays; -import java.util.concurrent.Executors; - -import org.graylog2.plugin.Message; -import org.graylog2.syslog4j.Syslog; -import org.graylog2.syslog4j.SyslogConfigIF; -import org.graylog2.syslog4j.SyslogIF; -import org.graylog2.syslog4j.impl.net.tcp.TCPNetSyslogConfig; -import org.joda.time.DateTime; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.wizecore.graylog2.plugin.FullSender; - - -public class TestFullSender implements Runnable { - - - int portStart = 45000; - int portEnd = 45100; - int port = 0; - private ServerSocket listen; - private String hostname; - private int receivedSymbols; - - @Before - public void initTcpListener() throws IOException { - port = 0; - for (int i = portStart; i <= portEnd; i++) { - try { - InetAddress local = InetAddress.getLocalHost(); - hostname = local.getHostName(); - System.err.println("Trying to listen on tcp://" + hostname + ":" + i); - listen = new ServerSocket(i, 10, local); - Executors.newSingleThreadExecutor().execute(this); - port = i; - break; - } catch (IOException e) { - // Failed to create socket - } - } - if (port == 0) { - throw new IOException("Can`t bind to listen on one of ports " + portStart + "..." + portEnd); - } - } - - @After - public void countSymbols() throws IOException { - System.err.println("Received symbols total: " + receivedSymbols); - assertTrue(receivedSymbols >= 1024 * 16); - listen.close(); - } - - @Override - public void run() { - try { - Socket conn = listen.accept(); - BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream(), "UTF-8")); - String s = null; - while ((s = in.readLine()) != null) { - System.out.println(s); - receivedSymbols += s.length(); - } - conn.close(); - } catch (IOException e) { - - } - } - - @Test - public void testMessageTruncation() throws InterruptedException { - FullSender s = new FullSender(); - - SyslogConfigIF config = new TCPNetSyslogConfig(); - config.setHost(hostname); - config.setPort(port); - config.setTruncateMessage(true); - config.setMaxMessageLength(1024 * 16); - char[] buf = new char[16384]; - double decims = Math.ceil(buf.length / 10.0); - char[] dec = "1234567890".toCharArray(); - for (int i = 0; i < decims; i++) { - System.arraycopy(dec, 0, buf, i * 10, Math.min(buf.length - i * 10, 10)); - } - Message msg = new Message(new String(buf), "localhost", new DateTime()); - System.out.println("Original message: "); - System.out.println(msg); - SyslogIF syslog = Syslog.createInstance("tcp_" + System.currentTimeMillis(), config); - s.send(syslog, Syslog.LEVEL_INFO, msg); - Thread.sleep(1000); - } -} +package com.wizecore.graylog2.plugin.test; + +import static org.junit.Assert.*; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.Arrays; +import java.util.concurrent.Executors; + +import org.graylog2.plugin.Message; +import org.graylog2.plugin.MessageFactory; +import org.graylog2.plugin.TestMessageFactory; +import org.graylog2.syslog4j.Syslog; +import org.graylog2.syslog4j.SyslogConfigIF; +import org.graylog2.syslog4j.SyslogIF; +import org.graylog2.syslog4j.impl.net.tcp.TCPNetSyslogConfig; +import org.joda.time.DateTime; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.wizecore.graylog2.plugin.FullSender; + + +public class TestFullSender implements Runnable { + + + int portStart = 45000; + int portEnd = 45100; + int port = 0; + private ServerSocket listen; + private String hostname; + private int receivedSymbols; + + @Before + public void initTcpListener() throws IOException { + port = 0; + for (int i = portStart; i <= portEnd; i++) { + try { + InetAddress local = InetAddress.getLocalHost(); + hostname = local.getHostName(); + System.err.println("Trying to listen on tcp://" + hostname + ":" + i); + listen = new ServerSocket(i, 10, local); + Executors.newSingleThreadExecutor().execute(this); + port = i; + break; + } catch (IOException e) { + // Failed to create socket + } + } + if (port == 0) { + throw new IOException("Can`t bind to listen on one of ports " + portStart + "..." + portEnd); + } + } + + @After + public void countSymbols() throws IOException { + System.err.println("Received symbols total: " + receivedSymbols); + assertTrue(receivedSymbols >= 1024 * 16); + listen.close(); + } + + @Override + public void run() { + try { + Socket conn = listen.accept(); + BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream(), "UTF-8")); + String s = null; + while ((s = in.readLine()) != null) { + System.out.println(s); + receivedSymbols += s.length(); + } + conn.close(); + } catch (IOException e) { + + } + } + + @Test + public void testMessageTruncation() throws InterruptedException { + FullSender s = new FullSender(); + + SyslogConfigIF config = new TCPNetSyslogConfig(); + config.setHost(hostname); + config.setPort(port); + config.setTruncateMessage(true); + config.setMaxMessageLength(1024 * 16); + char[] buf = new char[16384]; + double decims = Math.ceil(buf.length / 10.0); + char[] dec = "1234567890".toCharArray(); + for (int i = 0; i < decims; i++) { + System.arraycopy(dec, 0, buf, i * 10, Math.min(buf.length - i * 10, 10)); + } + MessageFactory mf = TestMessageFactory.create(); + Message msg = mf.createMessage(new String(buf), "localhost", new DateTime()); + System.out.println("Original message: "); + System.out.println(msg); + SyslogIF syslog = Syslog.createInstance("tcp_" + System.currentTimeMillis(), config); + s.send(syslog, Syslog.LEVEL_INFO, msg); + Thread.sleep(1000); + } +} diff --git a/src/test/java/com/wizecore/graylog2/plugin/test/TestTransparent.java b/src/test/java/com/wizecore/graylog2/plugin/test/TestTransparent.java index 71ca2fe..8f0d01a 100644 --- a/src/test/java/com/wizecore/graylog2/plugin/test/TestTransparent.java +++ b/src/test/java/com/wizecore/graylog2/plugin/test/TestTransparent.java @@ -1,159 +1,163 @@ -package com.wizecore.graylog2.plugin.test; - -import com.eaio.uuid.UUID; -import com.wizecore.graylog2.plugin.TransparentSyslogSender; -import org.graylog2.inputs.converters.SyslogPriUtilities; -import org.graylog2.plugin.Message; -import org.graylog2.plugin.configuration.Configuration; -import org.graylog2.syslog4j.Syslog; -import org.graylog2.syslog4j.SyslogConfigIF; -import org.graylog2.syslog4j.SyslogIF; -import org.graylog2.syslog4j.impl.message.processor.SyslogMessageProcessor; -import org.graylog2.syslog4j.impl.net.tcp.TCPNetSyslogConfig; -import org.joda.time.DateTime; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.net.InetAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.util.Date; -import java.util.HashMap; -import java.util.concurrent.Executors; - - -public class TestTransparent implements Runnable { - int portStart = 45000; - int portEnd = 45100; - int port = 0; - private ServerSocket listen; - private String hostname; - private StringBuilder receivedSymbols = new StringBuilder(); - - @Before - public void initTcpListener() throws IOException { - port = 0; - for (int i = portStart; i <= portEnd; i++) { - try { - InetAddress local = InetAddress.getLocalHost(); - hostname = local.getHostName(); - System.err.println("Trying to listen on tcp://" + hostname + ":" + i); - listen = new ServerSocket(i, 10, local); - Executors.newSingleThreadExecutor().execute(this); - port = i; - break; - } catch (IOException e) { - // Failed to create socket - } - } - if (port == 0) { - throw new IOException("Can`t bind to listen on one of ports " + portStart + "..." + portEnd); - } - } - - @After - public void countSymbols() throws IOException { - System.err.println("Received symbols total: " + receivedSymbols); - listen.close(); - } - - @Override - public void run() { - try { - Socket conn = listen.accept(); - BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream(), "UTF-8")); - String s = null; - while ((s = in.readLine()) != null) { - System.out.println(s); - receivedSymbols.append(s); - receivedSymbols.append("\n"); - } - conn.close(); - } catch (IOException e) { - - } - } - - @Test - public void testConvertGraylogPriority() { - int p = 86; - int f = SyslogPriUtilities.facilityFromPriority(p); - int l = SyslogPriUtilities.levelFromPriority(p); - System.out.println("Facility:" + f); - System.out.println("Level:" + l); - } - - @Test - public void testMessageHeader() throws InterruptedException { - TransparentSyslogSender s = new TransparentSyslogSender(new Configuration(new HashMap())); - - SyslogConfigIF config = new TCPNetSyslogConfig(); - config.setHost(hostname); - config.setPort(port); - config.setTruncateMessage(true); - config.setMaxMessageLength(1024 * 16); - HashMap fields = new HashMap<>(); - fields.put(Message.FIELD_MESSAGE, "localhost Hello, world!"); - fields.put(Message.FIELD_SOURCE, "localhost"); - fields.put(Message.FIELD_TIMESTAMP, DateTime.parse("2020-01-01T12:34:56.789")); - fields.put(Message.FIELD_ID, (new UUID()).toString()); - fields.put("facility", "security/authorization"); - Message msg = new Message(fields); - System.out.println("Original message: "); - System.out.println(msg); - SyslogIF syslog = Syslog.createInstance("tcp_" + System.currentTimeMillis(), config); - syslog.setMessageProcessor(new SyslogMessageProcessor() { - @Override - public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalName, Date datetime) { - return ""; - } - - @Override - public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalTimestamp, boolean sendLocalName) { - return ""; - } - }); - s.send(syslog, Syslog.LEVEL_INFO, msg); - Thread.sleep(1000); - } - - @Test - public void testFacilityNum() throws InterruptedException { - TransparentSyslogSender s = new TransparentSyslogSender(new Configuration(new HashMap())); - - SyslogConfigIF config = new TCPNetSyslogConfig(); - config.setHost(hostname); - config.setPort(port); - config.setTruncateMessage(true); - config.setMaxMessageLength(1024 * 16); - HashMap fields = new HashMap<>(); - fields.put(Message.FIELD_MESSAGE, "localhost Hello, world!"); - fields.put(Message.FIELD_SOURCE, "localhost"); - fields.put(Message.FIELD_TIMESTAMP, DateTime.parse("2020-01-01T12:34:56.789")); - fields.put(Message.FIELD_ID, (new UUID()).toString()); - fields.put("facility", "security/authorization"); - fields.put("facility_num", 10); - - Message msg = new Message(fields); - System.out.println("Original message: "); - System.out.println(msg); - SyslogIF syslog = Syslog.createInstance("tcp_" + System.currentTimeMillis(), config); - syslog.setMessageProcessor(new SyslogMessageProcessor() { - @Override - public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalName, Date datetime) { - return ""; - } - - @Override - public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalTimestamp, boolean sendLocalName) { - return ""; - } - }); - s.send(syslog, Syslog.LEVEL_INFO, msg); - Thread.sleep(1000); - } -} +package com.wizecore.graylog2.plugin.test; + +import com.eaio.uuid.UUID; +import com.wizecore.graylog2.plugin.TransparentSyslogSender; +import org.graylog2.inputs.converters.SyslogPriUtilities; +import org.graylog2.plugin.Message; +import org.graylog2.plugin.MessageFactory; +import org.graylog2.plugin.TestMessageFactory; +import org.graylog2.plugin.configuration.Configuration; +import org.graylog2.syslog4j.Syslog; +import org.graylog2.syslog4j.SyslogConfigIF; +import org.graylog2.syslog4j.SyslogIF; +import org.graylog2.syslog4j.impl.message.processor.SyslogMessageProcessor; +import org.graylog2.syslog4j.impl.net.tcp.TCPNetSyslogConfig; +import org.joda.time.DateTime; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.Date; +import java.util.HashMap; +import java.util.concurrent.Executors; + + +public class TestTransparent implements Runnable { + int portStart = 45000; + int portEnd = 45100; + int port = 0; + private ServerSocket listen; + private String hostname; + private StringBuilder receivedSymbols = new StringBuilder(); + + @Before + public void initTcpListener() throws IOException { + port = 0; + for (int i = portStart; i <= portEnd; i++) { + try { + InetAddress local = InetAddress.getLocalHost(); + hostname = local.getHostName(); + System.err.println("Trying to listen on tcp://" + hostname + ":" + i); + listen = new ServerSocket(i, 10, local); + Executors.newSingleThreadExecutor().execute(this); + port = i; + break; + } catch (IOException e) { + // Failed to create socket + } + } + if (port == 0) { + throw new IOException("Can`t bind to listen on one of ports " + portStart + "..." + portEnd); + } + } + + @After + public void countSymbols() throws IOException { + System.err.println("Received symbols total: " + receivedSymbols); + listen.close(); + } + + @Override + public void run() { + try { + Socket conn = listen.accept(); + BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream(), "UTF-8")); + String s = null; + while ((s = in.readLine()) != null) { + System.out.println(s); + receivedSymbols.append(s); + receivedSymbols.append("\n"); + } + conn.close(); + } catch (IOException e) { + + } + } + + @Test + public void testConvertGraylogPriority() { + int p = 86; + int f = SyslogPriUtilities.facilityFromPriority(p); + int l = SyslogPriUtilities.levelFromPriority(p); + System.out.println("Facility:" + f); + System.out.println("Level:" + l); + } + + @Test + public void testMessageHeader() throws InterruptedException { + TransparentSyslogSender s = new TransparentSyslogSender(new Configuration(new HashMap())); + + SyslogConfigIF config = new TCPNetSyslogConfig(); + config.setHost(hostname); + config.setPort(port); + config.setTruncateMessage(true); + config.setMaxMessageLength(1024 * 16); + HashMap fields = new HashMap<>(); + fields.put(Message.FIELD_MESSAGE, "localhost Hello, world!"); + fields.put(Message.FIELD_SOURCE, "localhost"); + fields.put(Message.FIELD_TIMESTAMP, DateTime.parse("2020-01-01T12:34:56.789")); + fields.put(Message.FIELD_ID, (new UUID()).toString()); + fields.put("facility", "security/authorization"); + MessageFactory mf = TestMessageFactory.create(); + Message msg = mf.createMessage(fields); + System.out.println("Original message: "); + System.out.println(msg); + SyslogIF syslog = Syslog.createInstance("tcp_" + System.currentTimeMillis(), config); + syslog.setMessageProcessor(new SyslogMessageProcessor() { + @Override + public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalName, Date datetime) { + return ""; + } + + @Override + public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalTimestamp, boolean sendLocalName) { + return ""; + } + }); + s.send(syslog, Syslog.LEVEL_INFO, msg); + Thread.sleep(1000); + } + + @Test + public void testFacilityNum() throws InterruptedException { + TransparentSyslogSender s = new TransparentSyslogSender(new Configuration(new HashMap())); + + SyslogConfigIF config = new TCPNetSyslogConfig(); + config.setHost(hostname); + config.setPort(port); + config.setTruncateMessage(true); + config.setMaxMessageLength(1024 * 16); + HashMap fields = new HashMap<>(); + fields.put(Message.FIELD_MESSAGE, "localhost Hello, world!"); + fields.put(Message.FIELD_SOURCE, "localhost"); + fields.put(Message.FIELD_TIMESTAMP, DateTime.parse("2020-01-01T12:34:56.789")); + fields.put(Message.FIELD_ID, (new UUID()).toString()); + fields.put("facility", "security/authorization"); + fields.put("facility_num", 10); + MessageFactory mf = TestMessageFactory.create(); + Message msg = mf.createMessage(fields); + + System.out.println("Original message: "); + System.out.println(msg); + SyslogIF syslog = Syslog.createInstance("tcp_" + System.currentTimeMillis(), config); + syslog.setMessageProcessor(new SyslogMessageProcessor() { + @Override + public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalName, Date datetime) { + return ""; + } + + @Override + public String createSyslogHeader(int facility, int level, String localName, boolean sendLocalTimestamp, boolean sendLocalName) { + return ""; + } + }); + s.send(syslog, Syslog.LEVEL_INFO, msg); + Thread.sleep(1000); + } +} diff --git a/src/test/java/org/graylog2/plugin/TestMessageFactory.java b/src/test/java/org/graylog2/plugin/TestMessageFactory.java new file mode 100644 index 0000000..cb82512 --- /dev/null +++ b/src/test/java/org/graylog2/plugin/TestMessageFactory.java @@ -0,0 +1,7 @@ +package org.graylog2.plugin; + +public class TestMessageFactory { + public static MessageFactory create() { + return new DefaultMessageFactory(); + } +} \ No newline at end of file