Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Elastic SIEM #846

Open
wants to merge 19 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 17 additions & 1 deletion .env
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,22 @@ NEXTAUTH_SECRET=secret
RETRACED_HOST_URL=http://retraced-api:3000/auditlog
RETRACED_EXTERNAL_URL=http://localhost:3000/auditlog

# Export Logs
EXPORT_DATADOG_API_KEY=
EXPORT_WEBHOOK_URL=http://vector:9000
EXPORT_WEBHOOK_USERNAME=admin
EXPORT_WEBHOOK_PASSWORD=admin
EXPORT_DDSOURCE=local-dev-machine
EXPORT_DDTAGS="Audit-Logs, Retraced, BoxyHQ"
EXPORT_DATADOG_HOSTNAME="127.0.0.1"
EXPORT_SERVICE="Retraced-audit-logs"
EXPORT_DATADOG_REGION=us
EXPORT_DATADOG_SITE=datadoghq.com
# S3 Bucket
EXPORT_S3_BUCKET=
EXPORT_S3_REGION=
EXPORT_S3_ACCESS_KEY_ID=
EXPORT_S3_SECRET_ACCESS_KEY=
# OpenTelemetry
# https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
# If you have any issues with using the otel exporter and want to enable debug logs
Expand All @@ -40,4 +56,4 @@ OTEL_EXPORTER_OTLP_METRICS_HEADERS=
GEOIPUPDATE_LICENSE_KEY=
GEOIPUPDATE_ACCOUNT_ID=
GEOIPUPDATE_USE_MMDB=
GEOIPUPDATE_DB_DIR=/etc/mmdb
GEOIPUPDATE_DB_DIR=/etc/mmdb
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -106,5 +106,7 @@ test-results.xml
.env.development.local
.env.test.local
.env.production.local
vector/*
minio/*
mmdb/**/**
GeoIP.conf
GeoIP.conf
56 changes: 46 additions & 10 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -131,16 +131,16 @@ services:
# networks:
# - retraced

# kibana:
# image: docker.elastic.co/kibana/kibana:7.8.0
# environment:
# - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
# networks:
# - retraced
# depends_on:
# - elasticsearch
# ports:
# - 5601:5601
kibana:
image: docker.elastic.co/kibana/kibana:7.8.0
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
networks:
- retraced
depends_on:
- elasticsearch
ports:
- 5601:5601

retraced-dev-bootstrap:
build:
Expand Down Expand Up @@ -221,5 +221,41 @@ services:
depends_on:
- "retraced-api"
restart: "always"

vector:
image: timberio/vector:0.X-alpine
environment:
- EXPORT_WEBHOOK_USERNAME=${EXPORT_WEBHOOK_USERNAME}
- EXPORT_WEBHOOK_PASSWORD=${EXPORT_WEBHOOK_PASSWORD}
- EXPORT_DDSOURCE=${EXPORT_DDSOURCE}
- EXPORT_DDTAGS=${EXPORT_DDTAGS}
- EXPORT_DATADOG_HOSTNAME=${EXPORT_DATADOG_HOSTNAME}
- EXPORT_SERVICE=${EXPORT_SERVICE}
- EXPORT_DATADOG_API_KEY=${EXPORT_DATADOG_API_KEY}
- EXPORT_DATADOG_REGION=${EXPORT_DATADOG_REGION}
- EXPORT_DATADOG_SITE=${EXPORT_DATADOG_SITE}
- EXPORT_S3_BUCKET=${EXPORT_S3_BUCKET}
- EXPORT_S3_REGION=${EXPORT_S3_REGION}
- EXPORT_S3_ACCESS_KEY_ID=${EXPORT_S3_ACCESS_KEY_ID}
- EXPORT_S3_SECRET_ACCESS_KEY=${EXPORT_S3_SECRET_ACCESS_KEY}
volumes:
- ./vector.toml:/etc/vector/vector.toml
- ./vector/data:/var/lib/vector/
networks:
- retraced
depends_on:
- "minio"
- "elasticsearch"

minio:
image: quay.io/minio/minio:latest
ports:
- "9002:9000"
- "9001:9001"
volumes:
- ./minio/data:/data
networks:
- retraced
command: server /data --console-address ":9001"
volumes:
mmdb:
2 changes: 2 additions & 0 deletions src/_processor/workers/saveEventToElasticsearch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import _ from "lodash";
import moment from "moment";
import { Clock } from "../common";
import { ClientWithRetry, getESWithRetry } from "../../persistence/elasticsearch";
import sendToWebhook from "../../ee/export/index";
import { instrumented, recordOtelHistogram } from "../../metrics/opentelemetry/instrumentation";

export class ElasticsearchSaver {
Expand All @@ -25,6 +26,7 @@ export class ElasticsearchSaver {
const alias = `retraced.${jobObj.projectId}.${jobObj.environmentId}.current`;
try {
await this.esIndex(event, alias);
sendToWebhook(event);
} catch (e) {
e.retry = true;
throw e;
Expand Down
3 changes: 3 additions & 0 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ export default {
env.RETRACED_NO_ANALYTICS ||
process.env.DO_NOT_TRACK ||
env.DO_NOT_TRACK,
EXPORT_WEBHOOK_URL: process.env.EXPORT_WEBHOOK_URL || env.EXPORT_WEBHOOK_URL,
EXPORT_WEBHOOK_USERNAME: process.env.EXPORT_WEBHOOK_USERNAME || env.EXPORT_WEBHOOK_USERNAME,
EXPORT_WEBHOOK_PASSWORD: process.env.EXPORT_WEBHOOK_PASSWORD || env.EXPORT_WEBHOOK_PASSWORD,
GEOIPUPDATE_USE_MMDB: process.env.GEOIPUPDATE_USE_MMDB || env.GEOIPUPDATE_USE_MMDB,
GEOIPUPDATE_DB_DIR: process.env.GEOIPUPDATE_DB_DIR || env.GEOIPUPDATE_DB_DIR || "/etc/mmdb",
};
7 changes: 7 additions & 0 deletions src/ee/ENTERPRISE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Enterprise Edition

Welcome to the Enterprise Edition ("/ee") of BoxyHQ.

The [/ee](https://github.com/retracedhq/retraced/tree/main/ee) subfolder is the place for all the **Enterprise** features for this repository.

> _❗ NOTE: This section is copyrighted (unlike the rest of our [repository](https://github.com/retracedhq/retraced)). You are not allowed to use this code without obtaining a proper [license](https://boxyhq.com/pricing) first.❗_
1 change: 1 addition & 0 deletions src/ee/LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
The BoxyHQ Enterprise Edition (EE) license (the “EE License”)
3 changes: 3 additions & 0 deletions src/ee/export/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Export Audit-logs in real time using Vector

This feature uses vector to export auditlogs as they get indexed to datadog & other destinations.
25 changes: 25 additions & 0 deletions src/ee/export/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import axios from "axios";
import config from "../../config";
import { logger } from "../../logger";

export default function sendToWebhook(event: any): void {
if (config.EXPORT_WEBHOOK_URL) {
delete event.raw;
axios
.post(config.EXPORT_WEBHOOK_URL, event, {
auth:
config.EXPORT_WEBHOOK_USERNAME && config.EXPORT_WEBHOOK_PASSWORD
? {
username: config.EXPORT_WEBHOOK_USERNAME,
password: config.EXPORT_WEBHOOK_PASSWORD,
}
: undefined,
})
.catch(() => {
logger.info(`[VECTOR EXPORT] Failed to send to webhook`);
})
.then(() => {
logger.info(`[VECTOR EXPORT] Sent to webhook`);
});
}
}
128 changes: 128 additions & 0 deletions vector.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
data_dir = "/var/lib/vector/"
[api]
enabled = true
# The data source that Vector will collect logs from
[sources.webhook]
type = "http_server" # The protocol to use
address = "0.0.0.0:9000" # The address to bind to
healthcheck = true # Enable built-in health checks
body_size_limit = "1mb" # Maximum size of request body
auth.password = "${EXPORT_WEBHOOK_PASSWORD}"
auth.username = "${EXPORT_WEBHOOK_USERNAME}"

# The transformation(s) to apply to each event for DataDog
[transforms.add_datadog_info]
type = "remap"
inputs = [ "webhook" ]
source = """
# Set the values of the output object
.ddsource = "${EXPORT_DDSOURCE}"
.ddtags = "${EXPORT_DDTAGS}"
.hostname = "${EXPORT_DATADOG_HOSTNAME}"
.service = "${EXPORT_SERVICE}"
"""

# The transformation(s) to apply to each event for s3
[transforms.s3_transform]
type = "remap"
inputs = [ "webhook" ]
source = """
# Set the values of the output object
. = parse_json!(.message)
"""

[transforms.ecs_transform]
type = "remap"
inputs = [ "webhook" ]
source = """
# Set the values of the output object
. = parse_json!(.message)
.host.name = "localhost"
.host.ip = .source_ip
.event.action = .action
.event.code = .crud
.event.module = .component
if .event.received == null {
.event.received = now()
} else {
.event.received = format_timestamp!(.event.received, format: "%+")
}
.event.dataset = "Audit Log"
.user.id = .actor.id
.user.name = .actor.name
.user.domain = .actor.href
.service.id = .target.id
.service.name = .target.name
.service.address = .target.href
.service.type = .target.type
.group.id = .group.id
.group.name = .group.name
.group.domain = .group.href
.source.ip = .source_ip
.message = .description
if .event.is_failure == true {
.event.outcome = "failure"
} else {
.event.outcome = "success"
}
if .event.created == null {
.@timestamp = now()
} else {
.@timestamp = format_timestamp!(.event.created, format: "%+")
}
del(.actor)
del(.target)
del(.received)
del(.action)
del(.crud)
del(.is_failure)
del(.component)
del(.group)
del(.source_ip)
del(.description)
del(.created)
del(.canonical_time)
"""

# The destination(s) to send the events to
[sinks.datadog_sink]
type = "datadog_logs"
inputs = [ "add_datadog_info" ]
default_api_key = "${EXPORT_DATADOG_API_KEY}"
compression = "gzip"
region = "${EXPORT_DATADOG_REGION}"
site = "${EXPORT_DATADOG_SITE}"
acknowledgements.enabled = true
healthcheck.enabled = true
request.concurrency = 10
request.rate_limit_duration_secs = 1
request.rate_limit_num = 10
buffer.type = "disk"
# 1GB
buffer.max_size = 1073741952

[sinks.s3_sink]
type = "aws_s3"
inputs = ["s3_transform"]
bucket = "test"
region = "${EXPORT_S3_REGION}"
endpoint = "http://minio:9000"
encoding.codec = "json"
acknowledgements.enabled = true
auth.access_key_id="${EXPORT_S3_ACCESS_KEY_ID}"
auth.secret_access_key="${EXPORT_S3_SECRET_ACCESS_KEY}"
batch.max_events = 1000

[sinks.ecs_sink]
type = "elasticsearch"
inputs = [ "ecs_transform" ]
acknowledgements.enabled = true
api_version = "v7"
auth.strategy = "basic"
auth.user = "elastic"
auth.password = "changeme"
buffer.type = "memory"
buffer.max_events = 10
bulk.action = "index"
bulk.index = "vector-%Y-%m-%d"
endpoints = ["http://elasticsearch:9200"]
Loading