diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 0000000..261ea2e
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,71 @@
+name: Build documentation
+
+on:
+ push:
+ branches: [main]
+
+ # Only trigger workflow when documentation files are changed
+ paths:
+ - 'docs/**'
+ - 'mkdocs.yml'
+ - '.github/workflows/docs.yml'
+
+# Prevent this workflow from running concurrently with the helm-release.yml workflow
+concurrency:
+ group: "pages"
+ cancel-in-progress: false
+
+jobs:
+ docs:
+ runs-on: ubuntu-latest
+
+ permissions:
+ pages: write
+ id-token: write
+
+ environment:
+ name: github-pages
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup Python virtualenv
+ run: |
+ pip install --upgrade pip
+ python -m venv env
+ source env/bin/activate
+ pip install -r docs/requirements.txt
+
+ - name: Build documentation
+ run: |
+ source env/bin/activate
+ mkdocs build
+
+ - name: Add existing Helm repository index.yml file
+ env:
+ GH_TOKEN: ${{ github.token }}
+ run: |
+ PAGES_URL=$(gh api \
+ -H "Accept: application/vnd.github+json" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ /repos/${{ github.repository }}/pages \
+ | jq -r '.html_url')
+
+ if [[ "$PAGES_URL" != "null" ]]; then
+ HTTP_STATUS=$(curl -sL -w '%{http_code}' "${PAGES_URL%/}/index.yaml" -o site/index.yaml)
+ if [[ "$HTTP_STATUS" != "200" ]]; then
+ rm site/index.yaml
+ fi
+ fi
+
+ - name: Setup Github pages
+ uses: actions/configure-pages@v4
+
+ - name: Create Github pages artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: site
+
+ - name: Deploy documentation to Github pages
+ uses: actions/deploy-pages@v4
diff --git a/.github/workflows/helm-release.yml b/.github/workflows/helm-release.yml
index 73951f5..c1bff8d 100644
--- a/.github/workflows/helm-release.yml
+++ b/.github/workflows/helm-release.yml
@@ -14,17 +14,16 @@ on:
default: 'helm'
type: string
-env:
- PACKAGE_DIR: dist
-
-# Only allow one instance of this workflow to run at a time
+# Prevent this workflow from running concurrently with the docs.yml workflow
concurrency:
group: "pages"
- cancel-in-progress: true
+ cancel-in-progress: false
jobs:
verify:
+ name: Verify release
+
runs-on: ubuntu-latest
steps:
@@ -70,38 +69,42 @@ jobs:
false
fi
- release:
+ docs:
+ name: Build documentation
needs: verify
-
- # Provision a Github token with repository and pages write permissions
- permissions:
- contents: write
- pages: write
- id-token: write
-
- # Use the github-pages environment. The actions/deploy-pages workflow fails with a
- # "Invalid environment node id" error if an environment is not specified.
- # https://github.com/actions/deploy-pages/issues/271
- environment:
- name: github-pages
-
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- - name: Configure git
+ - name: Setup Python virtualenv
run: |
- git config user.name "$GITHUB_ACTOR"
- git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
+ pip install --upgrade pip
+ python -m venv env
+ source env/bin/activate
+ pip install -r docs/requirements.txt
- - name: Create a git tag for the release
- uses: EndBug/add-and-commit@v9
+ - name: Build documentation
+ run: |
+ source env/bin/activate
+ mkdocs build
+
+ - name: Store built documentation artifacts
+ uses: actions/upload-artifact@v4
with:
- message: "Nemesis v${{ inputs.version }}"
- push: true
- tag: "v${{ inputs.version }}"
+ name: docs
+ path: site
+
+ helm:
+ name: Package Helm charts
+ needs: verify
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
- name: Install Helm
env:
@@ -121,17 +124,15 @@ jobs:
done
done
- - name: Package Helm charts
+ - name: Create Chart packages
env:
- PACKAGE_DIR: ${{ env.PACKAGE_DIR }}
CHARTS_DIR: ${{ inputs.charts_dir }}
run: |
- mkdir -p $PACKAGE_DIR
- find $CHARTS_DIR -maxdepth 2 -mindepth 2 -type f -name "Chart.yaml" -printf '%h\n' | xargs -I % bash -c "helm package -d $PACKAGE_DIR %"
+ mkdir -p dist
+ find $CHARTS_DIR -maxdepth 2 -mindepth 2 -type f -name "Chart.yaml" -printf '%h\n' | xargs -I % bash -c "helm package -d dist %"
- name: Pull in previous index.yaml file if it exists
env:
- PACKAGE_DIR: ${{ env.PACKAGE_DIR }}
GH_TOKEN: ${{ github.token }}
run: |
PAGES_URL=$(gh api \
@@ -141,34 +142,79 @@ jobs:
| jq -r '.html_url')
if [[ "$PAGES_URL" != "null" ]]; then
- HTTP_STATUS=$(curl -sL -w '%{http_code}' "${PAGES_URL%/}/index.yaml" -o ${PACKAGE_DIR}/index.yaml)
+ HTTP_STATUS=$(curl -sL -w '%{http_code}' "${PAGES_URL%/}/index.yaml" -o dist/index.yaml)
if [[ "$HTTP_STATUS" != "200" ]]; then
- rm ${PACKAGE_DIR}/index.yaml
+ rm dist/index.yaml
fi
fi
- name: Update Helm repository index.yaml file
env:
- PACKAGE_DIR: ${{ env.PACKAGE_DIR }}
CHART_BASE_URL: ${{ github.server_url }}/${{ github.repository }}/releases/download/v${{ inputs.version }}
run: |
- if [ -f ${PACKAGE_DIR}/index.yaml ]; then
- helm repo index $PACKAGE_DIR --merge ${PACKAGE_DIR}/index.yaml --url $CHART_BASE_URL
+ if [ -f dist/index.yaml ]; then
+ helm repo index dist --merge dist/index.yaml --url $CHART_BASE_URL
else
- helm repo index $PACKAGE_DIR --url $CHART_BASE_URL
+ helm repo index dist --url $CHART_BASE_URL
fi
- - name: Create Github release with the Helm charts
- env:
- PACKAGE_DIR: ${{ env.PACKAGE_DIR }}
- VERSION: v${{ inputs.version }}
- GH_TOKEN: ${{ github.token }}
- run: gh release create ${VERSION} -R ${{ github.repository }} -t "Nemesis $VERSION" -n "Nemesis $VERSION release" $PACKAGE_DIR/*.tgz
+ - name: Store Helm chart artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: charts
+ path: dist
- - name: Remove packaged Helm charts
- env:
- PACKAGE_DIR: ${{ env.PACKAGE_DIR }}
- run: rm -f ${PACKAGE_DIR}/*.tgz
+ release:
+ name: Publish and release files
+ needs:
+ - verify
+ - docs
+ - helm
+
+ # Provision a Github token with repository and pages write permissions
+ permissions:
+ contents: write
+ pages: write
+ id-token: write
+
+ # Use the github-pages environment. The actions/deploy-pages workflow fails with a
+ # "Invalid environment node id" error if an environment is not specified.
+ # https://github.com/actions/deploy-pages/issues/271
+ environment:
+ name: github-pages
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Configure git
+ run: |
+ git config user.name "$GITHUB_ACTOR"
+ git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
+
+ - name: Create a git tag for the release
+ uses: EndBug/add-and-commit@v9
+ with:
+ message: "Nemesis v${{ inputs.version }}"
+ push: true
+ tag: "v${{ inputs.version }}"
+
+ - name: Download documentation site files
+ uses: actions/download-artifact@v4
+ with:
+ name: docs
+ path: site
+
+ - name: Download Helm chart files
+ uses: actions/download-artifact@v4
+ with:
+ name: charts
+ path: dist
+
+ - name: Merge Chart index.yaml file with documentation files
+ run: mv dist/index.yaml site/index.yaml
- name: Setup Github pages
uses: actions/configure-pages@v4
@@ -176,11 +222,17 @@ jobs:
- name: Create Github pages artifact
uses: actions/upload-pages-artifact@v3
with:
- path: ${{ env.PACKAGE_DIR }}
+ path: site
- - name: Deploy Helm chart repository to Github pages
+ - name: Deploy Github pages site
uses: actions/deploy-pages@v4
+ - name: Create Github release with the Helm charts
+ env:
+ VERSION: v${{ inputs.version }}
+ GH_TOKEN: ${{ github.token }}
+ run: gh release create ${VERSION} -R ${{ github.repository }} -t "Nemesis $VERSION" -n "Nemesis $VERSION release" dist/*.tgz
+
- name: Remove Github release and tag on failure
continue-on-error: true
if: ${{ failure() }}
diff --git a/.gitignore b/.gitignore
index bfde74b..5986748 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,4 +8,5 @@ __pycache__
nemesis.config
config.yml
submit_to_nemesis.yaml
-submit/
\ No newline at end of file
+submit/
+site
diff --git a/README.md b/README.md
index 6e69110..9c6b1b5 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
@@ -33,15 +33,15 @@ Built on Kubernetes with scale in mind, our goal with Nemesis was to create a ce
Nemesis aims to automate a number of repetitive tasks operators encounter on engagements, empower operators’ analytic capabilities and collective knowledge, and create structured and unstructured data stores of as much operational data as possible to help guide future research and facilitate offensive data analysis.
-# Setup / Installation
+## Setup / Installation
Follow the [quickstart guide](docs/quickstart.md)
Or see the full [setup instructions](docs/setup.md)
-# Usage
+## Usage
See the [Nemesis Usage Guide](docs/usage_guide.md).
-# Contributing / Development Environment Setup
+## Contributing / Development Environment Setup
See [development.md](./docs/development.md)
## Further Reading
@@ -54,7 +54,7 @@ See [development.md](./docs/development.md)
| *On (Structured) Data* | Jul 26, 2023 | https://posts.specterops.io/on-structured-data-707b7d9876c6 |
-# Acknowledgments
+## Acknowledgments
Nemesis is built on large chunk of other people's work. Throughout the codebase we've provided citations, references, and applicable licenses for anything used or adapted from public sources. If we're forgotten proper credit anywhere, please let us know or submit a pull request!
diff --git a/docs/access_nemesis.md b/docs/access_nemesis.md
index 3f10b68..eed16a6 100644
--- a/docs/access_nemesis.md
+++ b/docs/access_nemesis.md
@@ -7,9 +7,9 @@ If you use Minikube, by default, services are not exposed anywhere outside of th
In the examples below, the following assumptions are made:
- Minikube server IP: `192.168.230.42`.
-- Nemesis's [`nemesisHttpServer` option](../helm/nemesis/values.yaml) is configured to be `https://192.168.230.42:7443/`
+- Nemesis's [`nemesisHttpServer` option](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) is configured to be `https://192.168.230.42:7443/`
-To quickly setup an SSH port forward, you can use the [minikube_port_forward.sh](../scripts/minikube_port_forward.sh) script:
+To quickly setup an SSH port forward, you can use the [minikube_port_forward.sh](https://github.com/SpecterOps/Nemesis/blob/main/scripts/minikube_port_forward.sh) script:
```bash
cd Nemesis/scripts/
./minikube_port_forward.sh 7443
@@ -65,7 +65,7 @@ There's many ways you can do this (kubectl, SSH local port forward, Socat, IP ta
**SSH**
Using an SSH local port forward is our preferred method right now as it's simple to setup and proven reliable.
-Let's say you configure the [`nemesisHttpServer` option](../helm/nemesis/values.yaml#L8) to listen on port `:7443`. Running the following command on the k8s host will expose the Minikube's endpoint externally (output in Step 1) using an SSH local port forward:
+Let's say you configure the [`nemesisHttpServer` option](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml#L8) to listen on port `:7443`. Running the following command on the k8s host will expose the Minikube's endpoint externally (output in Step 1) using an SSH local port forward:
```bash
ssh -N -L :7443:192.168.49.2:30123
```
@@ -84,4 +84,4 @@ sudo setcap CAP_NET_BIND_SERVICE=+eip $(which kubectl)
```
## Accessing Nemesis via Docker Desktop
-Nemesis can run locally Docker Desktop. In that case, once Nemesis is deployed, you can access the nginx endpoint at `https://localhost/`.
\ No newline at end of file
+Nemesis can run locally Docker Desktop. In that case, once Nemesis is deployed, you can access the nginx endpoint at `https://localhost/`.
diff --git a/docs/development.md b/docs/development.md
index cd72204..d8afc49 100644
--- a/docs/development.md
+++ b/docs/development.md
@@ -20,13 +20,13 @@ sudo unzip protoc-21.5-linux-x86_64.zip -d /usr/local/
**Also ensure you have minikube and skaffold setup from the [setup](./setup.md) guide.**
-# Running Nemesis during Dev
+## Running Nemesis during Dev
-If you're doing general development, if you set the **operation.environment** variable in [values.yaml](../helm/nemesis/values.yaml) to *test* which will deploy everything without persistent storage. Then running `skaffold dev -m nemesis` will build the images and kick everything off.
+If you're doing general development, if you set the **operation.environment** variable in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) to *test* which will deploy everything without persistent storage. Then running `skaffold dev -m nemesis` will build the images and kick everything off.
-If you want to perform remote debugging for the `enrichment` container (see [remote_debugging.md](remote_debugging.md)) set the **operation.environment** variable in [values.yaml](../helm/nemesis/values.yaml) to *development* for the Helm chart which will deploy everything but the `enrichment` container without persistent storage. Run `skaffold dev -m nemesis` (or `helm install nemesis ./helm/nemesis --timeout '45m'` to use the public images) and then launching `skaffold dev -m enrichment` via VS Code will kick off the separate chart for just the enrichment container.
+If you want to perform remote debugging for the `enrichment` container (see [remote_debugging.md](remote_debugging.md)) set the **operation.environment** variable in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) to *development* for the Helm chart which will deploy everything but the `enrichment` container without persistent storage. Run `skaffold dev -m nemesis` (or `helm install nemesis ./helm/nemesis --timeout '45m'` to use the public images) and then launching `skaffold dev -m enrichment` via VS Code will kick off the separate chart for just the enrichment container.
-# Service Development
+## Service Development
The recommended way to develop a new (or modify a current) service is with VS Code
and a remote workspace. This allows you to write and debug code without having to
@@ -51,7 +51,7 @@ Once the remote session has been established:
**Note:** If you want to reset your Poetry environment, [see this post](https://stackoverflow.com/a/70064450).
-# Building and Troubleshooting Docker Images
+## Building and Troubleshooting Docker Images
You can build and troubleshoot Nemesis's docker containers using the docker CLI. For example, to troublehshoot the enrichment image you can do the following:
1. Build the image and give it a name of "test"
@@ -69,7 +69,7 @@ To build the images inside of k8s, you can use skaffold:
skaffold build
```
-# Testing file processing
+## Testing file processing
One can test file processing using the `./scripts/submit_to_nemesis.sh` script. To configure the script, modify the settings in `./cmd/enrichment/enrichment/cli/submit_to_nemesis/submit_to_nemesis.yaml`.
The `./sample_files/` folder contains many examples of files that Nemesis can process. For example, to test Nemesis's ability to extract a .ZIP file and process all the files inside of the zip, configure the YAML file and then run (make sure to specify the absolute path):
@@ -80,7 +80,7 @@ The `./sample_files/` folder contains many examples of files that Nemesis can pr
To see a list of all command line arguments run `./scripts/submit_to_nemesis.sh -h`.
-# kubectl / kubernetes version skews
+## kubectl / kubernetes version skews
According to [kubernetes](https://kubernetes.io/releases/version-skew-policy/#kubectl) it's the best practice to keep kubectl and the kubernetes image used by minikube in sync. You can tell the versions of both with:
@@ -110,7 +110,7 @@ You can then specify the kubernetes imge pulled by minikube with:
minikube start --kubernetes-version v1.25.4
```
-# ./scripts/
+## ./scripts/
The following describes the files in the ./scripts/ directory:
diff --git a/docs/elasticsearch-kibana.md b/docs/elasticsearch-kibana.md
index dd6bbe7..aa5a271 100644
--- a/docs/elasticsearch-kibana.md
+++ b/docs/elasticsearch-kibana.md
@@ -1,4 +1,5 @@
-# Infra
+# ELK
+
Elastic is deployed using [the Elastic operator and is installed via helm](https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-install-helm.html). The operator runs inside the `elastic-system` namespace and its logs can be viewed with the following command:
```bash
kubectl logs -n elastic-system sts/elastic-operator
@@ -6,13 +7,13 @@ kubectl logs -n elastic-system sts/elastic-operator
The custom resource definitions that the helm chart installs can all be found [here](https://github.com/elastic/cloud-on-k8s/tree/2.3/config/samples). The helm chart can be found [here](https://github.com/elastic/cloud-on-k8s/tree/main/deploy).
-# Storage
+## Storage
-By default this Elasticsearch instance uses a persistent data store. The size of the datastore can be adjusted in the [values.yaml](../helm/nemesis/values.yaml) file by modifying the `storage: 20Gi` in the `elasticsearch` config section.
+By default this Elasticsearch instance uses a persistent data store. The size of the datastore can be adjusted in the [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) file by modifying the `storage: 20Gi` in the `elasticsearch` config section.
-To use temporary storage that is wiped on every run, set "environment" to "test" at the top of [values.yaml](../helm/nemesis/values.yaml).
+To use temporary storage that is wiped on every run, set "environment" to "test" at the top of [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml).
-# Accessing Elastic/Kibana
+## Accessing Elastic/Kibana
To login to Elastic or Kibana, run `./scripts/get_service_credentials.sh` to get the basic auth credentials.
Example to confirm elastic is working:
@@ -22,11 +23,11 @@ export BASIC_AUTH_PASSWORD=$(kubectl get secret basic-auth -o jsonpath="{.data.p
curl -k -u "$BASIC_AUTH_USER:$BASIC_AUTH_PASSWORD" "https://localhost:8080/elastic/"
```
-# Troubleshooting
-## Helm can't install the elastic operator
+## Troubleshooting
+### Helm can't install the elastic operator
See https://github.com/elastic/cloud-on-k8s/issues/5325#issuecomment-1124682097. Confirmed that fix worked, as did upgrading minikube to at least v1.26.1
-## Elastic endpoints for troubleshooting
+### Elastic endpoints for troubleshooting
- Any issues with cluster allocation:
- https://localhost:8080/elastic/_cluster/allocation/explain
@@ -40,12 +41,12 @@ See https://github.com/elastic/cloud-on-k8s/issues/5325#issuecomment-1124682097.
By default, Elastic will not allocate additional shard if 90%+ of the hard disk in the pod is allocated. If Minikube fills up, the drive space will be reflected as also filled in the Elastic container. This causes Kibana to fail in object creation, and Kibana will be stuck in a non-functional startup look. The solution is to ssh into minikube with `minikube ssh` and then run `docker system prune` to free up resources.
-# Example queries:
+## Example queries:
An "easy" way to build elastic search queries is to do the search in Kibana's "Discover" page, click the "Inspect" button in the top right, and click on "Request":
![Getting ES query from Kibana](images/kibana-get-es-request.png)
-## 1 - Simple search via query string for a process name and message GUID:
+### 1 - Simple search via query string for a process name and message GUID:
```
curl -k -u nemesis:Qwerty12345 -XGET 'https://192.168.230.52:8080/elastic/process_category/_search?q=name:explorer.exe%20AND%20metadata.messageId:75f07c40-a4fe-4eb7-bfe6-4b1b04c79d4f&pretty'
```
@@ -53,7 +54,7 @@ curl -k -u nemesis:Qwerty12345 -XGET 'https://192.168.230.52:8080/elastic/proces
More info about the query string syntax [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax).
-## Same search as above, but only returning the name/category fields
+### Same search as above, but only returning the name/category fields
```bash
curl -k -X POST -u nemesis:Qwerty12345 -H 'Content-Type: application/json' 'https://192.168.230.52:8080/elastic/process_category/_search?pretty' -d '
{
@@ -80,7 +81,7 @@ curl -k -X POST -u nemesis:Qwerty12345 -H 'Content-Type: application/json' 'http
'
```
-# Backup and Restore Data
+## Backup and Restore Data
**Use case:** If the minikube node ever needs to be deleted (e.g., something goes wrong to start fresh or you're done with an op) and you want to backup Elastic's data.
**Backup**
diff --git a/docs/hasura.md b/docs/hasura.md
index 73a83c5..19424d9 100644
--- a/docs/hasura.md
+++ b/docs/hasura.md
@@ -1,4 +1,4 @@
-# Overview
+# Hasura Overview
Nemesis uses Hasura to wrap the PostgreSQL backend to easily build a GraphQL and REST API for the structure Nemesis data model.
@@ -14,7 +14,7 @@ There is a [quickstart to Hasura queries here](https://hasura.io/docs/latest/que
## Scripting
-Hasura allows for _external_ queries and subscriptions to the backend schema, very similar to Mythic (in fact, this is what we do for the [Mythic Connector](../cmd/connectors/mythic-connector/README.md)!)
+Hasura allows for _external_ queries and subscriptions to the backend schema, very similar to Mythic (in fact, this is what we do for the [Mythic Connector](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/mythic-connector#readme)!)
### Queries
diff --git a/docs/images/logo.png b/docs/images/logo.png
new file mode 100644
index 0000000..0871b27
Binary files /dev/null and b/docs/images/logo.png differ
diff --git a/img/nemesis_white.png b/docs/images/nemesis_white.png
similarity index 100%
rename from img/nemesis_white.png
rename to docs/images/nemesis_white.png
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..43298b7
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,59 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Overview
+
+Nemesis is an offensive data enrichment pipeline and operator support system.
+
+Built on Kubernetes with scale in mind, our goal with Nemesis was to create a centralized data processing platform that ingests data produced during offensive security assessments.
+
+Nemesis aims to automate a number of repetitive tasks operators encounter on engagements, empower operators’ analytic capabilities and collective knowledge, and create structured and unstructured data stores of as much operational data as possible to help guide future research and facilitate offensive data analysis.
+
+## Setup / Installation
+See the [setup instructions](setup.md).
+
+## Usage
+See the [Nemesis Usage Guide](usage_guide.md).
+
+## Contributing / Development Environment Setup
+See [development.md](development.md)
+
+## Further Reading
+
+| Post Name | Publication Date | Link |
+|---------------------------------------------|------------------|------------------------------------------------------------------------------------|
+| *Shadow Wizard Registry Gang: Structured Registry Querying* | Sep 5, 2023 | https://posts.specterops.io/shadow-wizard-registry-gang-structured-registry-querying-9a2fab62a26f |
+| *Hacking With Your Nemesis* | Aug 9, 2023 | https://posts.specterops.io/hacking-with-your-nemesis-7861f75fcab4 |
+| *Challenges In Post-Exploitation Workflows* | Aug 2, 2023 | https://posts.specterops.io/challenges-in-post-exploitation-workflows-2b3469810fe9 |
+| *On (Structured) Data* | Jul 26, 2023 | https://posts.specterops.io/on-structured-data-707b7d9876c6 |
+
+
+## Acknowledgments
+
+Nemesis is built on large chunk of other people's work. Throughout the codebase we've provided citations, references, and applicable licenses for anything used or adapted from public sources. If we're forgotten proper credit anywhere, please let us know or submit a pull request!
+
+We also want to acknowledge Evan McBroom, Hope Walker, and Carlo Alcantara from SpecterOps for their help with the initial Nemesis concept and amazing feedback throughout the development process.
diff --git a/docs/kubernetes.md b/docs/kubernetes.md
index b7f0e1a..64749ee 100644
--- a/docs/kubernetes.md
+++ b/docs/kubernetes.md
@@ -1,4 +1,4 @@
-# Basics
+# Kubernetes Basics
| Description | command |
|----------------------------|--------------------------------------------------------------------------------------------------------|
@@ -12,15 +12,15 @@
May consider aliasing `kubectl` to `k` just so you don't have to type it out each time.
-# Pods keep restarting due to OOMError
+## Pods keep restarting due to OOMError
`OOOError` = Out of memory error
Reasons why it might happen include:
* Pod doesn't have enough memory. Solution: Look at the pod's k8s code and check the `resources` section and increase the memory in [the limits/request section](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
-* The kubernetes node doesn't have enough memory to deploy the pods. Solution: If you're using Minikube, make sure the VM has enough memory and that minikube is configured to start with more memory (see the [preq instructions](prerequisites.md) for Minikube for how to configure the memory).
+* The kubernetes node doesn't have enough memory to deploy the pods. Solution: If you're using Minikube, make sure the VM has enough memory and that minikube is configured to start with more memory (see the preq instructions for Minikube for how to configure the memory).
* The application has a memory leak, and over time, consumes all the available memory dedicated to it. Solution: Fix the memory leak.
-# CrashLoopBackOff Error
+## CrashLoopBackOff Error
To get information about the pod: `kubectl describe pod [name] --namespace=[namespace]`
diff --git a/docs/nemesis_chart.md b/docs/nemesis_chart.md
index 816c865..f5da564 100644
--- a/docs/nemesis_chart.md
+++ b/docs/nemesis_chart.md
@@ -1,5 +1,5 @@
# Nemesis Helm Chart
-The [`nemesis` Helm chart](../helm/nemesis/) deploys Nemesis's services. You can run the chart with its default configuration using the following command:
+The [`nemesis` Helm chart](https://github.com/SpecterOps/Nemesis/tree/main/helm/nemesis) deploys Nemesis's services. You can run the chart with its default configuration using the following command:
```bash
helm install --repo https://specterops.github.io/Nemesis/ nemesis nemesis --timeout '45m' --set operation.nemesisHttpServer="https://192.168.6.9:443/"
@@ -30,7 +30,7 @@ $ curl -u $(kubectl get secret basic-auth -o jsonpath='{.data.username}' | base6
## Customizing the Deployment
-If you want customize the deployment (e.g., HTTP server URI, pod CPU/memory resources, Minio disk size), you need to download the `nemesis` chart's [values.yaml](../helm/nemesis/values.yaml) file, edit it, and then run the `nemesis` chart using the customize values. You can do so with the following commands:
+If you want customize the deployment (e.g., HTTP server URI, pod CPU/memory resources, Minio disk size), you need to download the `nemesis` chart's [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) file, edit it, and then run the `nemesis` chart using the customize values. You can do so with the following commands:
1. Download the quickstart chart's `values.yaml`:
```bash
diff --git a/docs/new_connector.md b/docs/new_connector.md
index 6bfdb16..827db14 100644
--- a/docs/new_connector.md
+++ b/docs/new_connector.md
@@ -11,13 +11,13 @@ Regardless of the connector actions, it will need to somehow save the following
| Project name | PROJECT-X |
| Expiration days (or date) | 100 (or 01/01/2024) |
-# Download Processing
+## Download Processing
File processing is the one flow that differs from other structured data ingestion. First, the file bytes need to be uploaded to Nemesis, and second, a metadata message needs to be posted to kick off processing.
-## Step 1 - File Upload
+### Step 1 - File Upload
-For a file to be processed, the raw file bytes first need to be posted to the correct API route for storage in the data lake. This is accomplished by POSTing the file bytes to the `https:///api/file` which returns a simple JSON response with an `object_id` field containing a UUID that references the uploaded file. For example, to do this in Python (as shown in [mythic-connector](../cmd/connectors/mythic-connector/sync.py)), you would run something like this:
+For a file to be processed, the raw file bytes first need to be posted to the correct API route for storage in the data lake. This is accomplished by POSTing the file bytes to the `https:///api/file` which returns a simple JSON response with an `object_id` field containing a UUID that references the uploaded file. For example, to do this in Python (as shown in [mythic-connector](https://github.com/SpecterOps/Nemesis/blob/main/cmd/connectors/mythic-connector/sync.py)), you would run something like this:
```python
basic = HTTPBasicAuth(NEMESIS_USERNAME, NEMESIS_PASSWORD)
@@ -32,9 +32,9 @@ curl -H "Content-Type: application/octet-stream" -v --user 'nemesis:Qwerty12345'
The `nemesis_file_id` is used in the `file_data` message in Step 2 below. This UUID is the unique reference for the file in Nemesis.
-## Step 2 - File Data Message
+### Step 2 - File Data Message
-After the file is uploaded to Nemesis, a [file_data](./odr/references/file_data.md) ODR message needs to be posted with file metadata information. The example from the [mythic-connector](../cmd/connectors/mythic-connector/sync.py) is:
+After the file is uploaded to Nemesis, a [file_data](odr/references/file_data.md) ODR message needs to be posted with file metadata information. The example from the [mythic-connector](https://github.com/SpecterOps/Nemesis/blob/main/cmd/connectors/mythic-connector/sync.py) is:
```python
metadata = {}
@@ -62,15 +62,15 @@ r = requests.request("POST", f"{NEMESIS_URL}/data", auth=basic, data=data, heade
*Note that timestamps need to be in ISO 8601 UTC form, e.g., 2023-08-01T22:51:35*
-# Other Structured Data
+## Other Structured Data
-For other types of structured data, only a single message needs to be posted to the `http:///api/data` API route, e.g. Step 2 in the downloading processing example. The `metadata["data_type"]` field should be one of the types defined in the [ODR](./odr/references/). The appropriate ODR document will also define the fields and structure needed for the datatype.
+For other types of structured data, only a single message needs to be posted to the `http:///api/data` API route, e.g. Step 2 in the downloading processing example. The `metadata["data_type"]` field should be one of the types defined in the [ODR](odr/references/). The appropriate ODR document will also define the fields and structure needed for the datatype.
Note that the "data" section of the message is an array of dictionaries, i.e., multiple instances of a datatype can be posted in a single message. For example, multiple process messages can exist in the single post.
-As an example, see the `handle_process()` function in the [mythic-connector](../cmd/connectors/mythic-connector/sync.py).
+As an example, see the `handle_process()` function in the [mythic-connector](https://github.com/SpecterOps/Nemesis/blob/main/cmd/connectors/mythic-connector/sync.py).
Example of many of the structured datatypes can be found in the `./sample_files/structured/` folder. Example of using these to submit process data:
```bash
curl -H "Content-Type: application/octet-stream" -k -v --user 'nemesis:Qwerty12345' --data-binary @./sample_files/structured/process_data.json https://192.168.230.42:8080/api/data
-```
\ No newline at end of file
+```
diff --git a/docs/odr/references/host_data/named_pipe.md b/docs/odr/references/host_data/named_pipe.md
index d8eaa16..a8fd240 100644
--- a/docs/odr/references/host_data/named_pipe.md
+++ b/docs/odr/references/host_data/named_pipe.md
@@ -18,4 +18,4 @@ Information about a Windows named pipe.
## Examples
-[named_pipes.json](../../../../sample_files/structured/named_pipes.json)
\ No newline at end of file
+[named_pipes.json](https://github.com/SpecterOps/Nemesis/blob/main/sample_files/structured/named_pipes.json)
diff --git a/docs/overview.md b/docs/overview.md
index 061ef0f..a2ea7ce 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -1,4 +1,4 @@
-# Goal
+# Overview
The goal of Nemesis is to create an extensible data-processing system for
Advesary Simulation operations which takes data collected from C2 agents and
@@ -40,7 +40,7 @@ with protobuf.
### ODR
The data that can be input into Nemesis is strictly defined in the [Operational Data
-Reference (ODR)](#odr/README.md).
+Reference (ODR)](odr/README.md).
#### ODR Protobuf
diff --git a/docs/postgres.md b/docs/postgres.md
index a31c1f6..3d989bc 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -1,17 +1,17 @@
-# Overview
+# PostgreSQL Overview
In addition to Elasticsearch for an unstructed/NoSQL approach, we are using PostgreSQL to store structured data such as DPAPI blobs/masterkeys/etc.
The database schema for Postgres is at `./helm/nemesis/files/postgres/nemesis.sql`. It mimics the Protobufs defined in ./packages/nemesis.proto, **but are not guaranteed to match!**
-We do not recommend interacting with Postgres directly- instead, use the [`/hasura/`](#hasura.md) endpoint
+We do not recommend interacting with Postgres directly- instead, use the [`/hasura/`](hasura.md) endpoint
-# Storage
+## Storage
-By default this PostgreSQL instance uses a persistent data store. The size of the datastore can be adjusted in [values.yaml](./helm/nemesis/values.yaml) by modifying the `storage: 15Gi` in the postgres section.
+By default this PostgreSQL instance uses a persistent data store. The size of the datastore can be adjusted in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) by modifying the `storage: 15Gi` in the postgres section.
-To use temporary storage that is wiped on every run, set the `operation.environment` value in [values.yaml](./helm/nemesis/values.yaml) to "test".
+To use temporary storage that is wiped on every run, set the `operation.environment` value in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) to "test".
## pgAdmin
-A pgAdmin interface is exposed at `NEMESIS_URL/pgadmin` with the credentials from [values.yaml](./helm/nemesis/values.yaml)
+A pgAdmin interface is exposed at `NEMESIS_URL/pgadmin` with the credentials from [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml)
diff --git a/docs/quickstart_chart.md b/docs/quickstart_chart.md
index 8ce1344..ce32c78 100644
--- a/docs/quickstart_chart.md
+++ b/docs/quickstart_chart.md
@@ -1,5 +1,5 @@
# Quickstart Helm Chart
-The purpose of the [`quickstart` Helm chart](../helm/quickstart/) is to configure and set secrets for each Nemesis service (e.g., usernames and passwords and ingress TLS certificates). You can run the quickstart chart with the following command:
+The purpose of the [`quickstart` Helm chart](https://github.com/SpecterOps/Nemesis/tree/main/helm/quickstart) is to configure and set secrets for each Nemesis service (e.g., usernames and passwords and ingress TLS certificates). You can run the quickstart chart with the following command:
```bash
helm install --repo https://specterops.github.io/Nemesis/ nemesis-quickstart quickstart
@@ -12,8 +12,8 @@ echo "Basic Auth Username: ${BASIC_AUTH_USER}"
echo "Basic Auth Password: ${BASIC_AUTH_PASSWORD}"
```
-# Customizing the Configuration
-If you want customize any of the services' secrets, you need to download the `quickstart` chart's [values.yaml](../helm/quickstart/values.yaml) file, edit it, and then run the `quickstart` chart using the customized values. You can do so with the following commands:
+## Customizing the Configuration
+If you want customize any of the services' secrets, you need to download the `quickstart` chart's [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/quickstart/values.yaml) file, edit it, and then run the `quickstart` chart using the customized values. You can do so with the following commands:
1. Download the quickstart chart's `values.yaml`:
```bash
diff --git a/docs/rabbitmq.md b/docs/rabbitmq.md
index 56b3ee4..ef50cd0 100644
--- a/docs/rabbitmq.md
+++ b/docs/rabbitmq.md
@@ -1,25 +1,25 @@
-# Design decisions
+# RabbitMQ Overview
* Didn't go with the RabbitMQ operator because it doesn't play nicely with skaffold and is a fairly simple wrapper for the statefulset
* Doesn't delete pods/statefulsets during shutdown
* Didn't go with just the docker container because to scale up you need to deploy them one at a time (so used a statefulset).
-# Scaling
+## Scaling
Rabbitmq scales vertifcally better (so give it more CPU + RAM)
Add more replicas by editing `./helm/nemesis/templates/rabbitmq.service.yaml`.
-# Performance testing
+## Performance testing
```
kubectl run perf-test -it --rm --image=pivotalrabbitmq/perf-test -- --uri amqp://rabbit:Qwerty12345@nemesis-rabbitmq-svc
```
-# Plumber example usage:
+## Plumber example usage:
plumber read rabbit --address="amqp://nemesis:Qwerty12345@192.168.230.42:5672" --queue-name="file_data" --exchange-name="nemesis" --binding-key=" " --queue-durable -f
-# References
+## References
* https://blog.rabbitmq.com/posts/2020/08/deploying-rabbitmq-to-kubernetes-whats-involved/
* https://github.com/GoogleCloudPlatform/click-to-deploy/tree/master/k8s/rabbitmq
\ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..bfcc47b
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,3 @@
+mkdocs==1.5.3
+mkdocs-material==9.5.17
+pygments==2.17.2
diff --git a/docs/requirements_docker_desktop.md b/docs/requirements_docker_desktop.md
index 7926007..f3021b3 100644
--- a/docs/requirements_docker_desktop.md
+++ b/docs/requirements_docker_desktop.md
@@ -52,7 +52,7 @@ helm repo add bitnami https://charts.bitnami.com/bitnami
Run `helm install --repo https://specterops.github.io/Nemesis/ nemesis-quickstart quickstart`
- If you want to edit any of the password values for Nemesis, edit them in [values.yaml](../helm/quickstart/values.yaml).
+ If you want to edit any of the password values for Nemesis, edit them in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/quickstart/values.yaml)
```
helm show values --repo https://specterops.github.io/Nemesis/ nemesis > quickstart-values.yaml
diff --git a/docs/setup.md b/docs/setup.md
index b4ba140..fdbfb3f 100644
--- a/docs/setup.md
+++ b/docs/setup.md
@@ -1,5 +1,6 @@
# Nemesis Installation and Setup
-1. Ensure the [requisite software/hardware is installed](./requirements.md).
+
+1. Ensure the [requisite software/hardware is installed](requirements.md).
2. Run the [`quickstart` Helm chart](quickstart_chart.md) to configure Nemesis's services and secrets.
@@ -11,28 +12,34 @@
If you run into any issues, please see [troubleshooting.md](troubleshooting.md) for common errors/issues.
-# Data Ingestion
+
+## Data Ingestion
+
Once Nemesis is running, data first needs to be ingested into the platform. Ingestion into Nemesis can occur in muliple ways, including
+
* [Auto-ingesting data from C2 platorms.](#nemesis-c2-connector-setup)
* Manually uploading files on the "File Upload" page in the Nemesis's Dashboard UI.
-* Using the [submit_to_nemesis](./submit_to_nemesis.md) CLI tool to submit files.
+* Using the [submit_to_nemesis](submit_to_nemesis.md) CLI tool to submit files.
* Writing custom tools to interact with [Nemesis's API](new_connector.md).
-## Nemesis C2 Connector Setup
+
+### Nemesis C2 Connector Setup
+
Nemesis includes connectors for various C2 platorms. The connectors hook into the C2 platforms and transfer data automatically into Nemesis. The `./cmd/connectors/` folder contains the following C2 connectors:
-- [Cobalt Strike](../cmd/connectors/cobaltstrike-nemesis-connector/README.md)
-- [Mythic](../cmd/connectors/mythic-connector/README.md)
-- [Sliver](../cmd/connectors/sliver-connector/README.md)
-- [OST Stage1](../cmd/connectors/stage1-connector/README.md)
-- [Metasploit](../cmd/connectors/metasploit-connector/README.md)
-- [Chrome Extension](../cmd/connectors/chrome-extension/README.md)
+- [Cobalt Strike](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/cobaltstrike-nemesis-connector#readme)
+- [Mythic](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/mythic-connector#readme)
+- [Sliver](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/sliver-connector#readme)
+- [OST Stage1](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/stage1-connector#readme)
+- [Metasploit](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/metasploit-connector#readme)
+- [Chrome Extension](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/chrome-extension#readme)
***Note: not all connectors have the same level of completeness! We intended to show the range of connectors possible, but there is not yet feature parity.***
-If you'd like to ingest data from another platform, see the documentation for [adding a new connector](./new_connector.md).
+If you'd like to ingest data from another platform, see the documentation for [adding a new connector](new_connector.md).
-# Nemesis Service Endpoints
+
+## Nemesis Service Endpoints
All Nemesis services are exposed through a single HTTP endpoint (defined in the NEMESIS_HTTP_SERVER environment variable) protected by HTTP basic auth credentials configured through the `BASIC_AUTH_USER` and `BASIC_AUTH_PASSWORD` settings.
@@ -53,12 +60,14 @@ To see a basic landing page with exposed services, go to http `NEMESIS_HTTP_SERV
| yara | /yara/ | N/A | N/A |
| crack-list | /crack-list/ | N/A | N/A |
-# (Optional) Install logging and monitoring services by running the following:
+
+## (Optional) Install logging and monitoring services by running the following:
```bash
helm install --repo https://specterops.github.io/Nemesis/ monitoring monitoring
```
-# (Optional) Install Metrics Server
+
+## (Optional) Install Metrics Server
Metrics Server is available but not installed by default. Enable it with the following:
```bash
@@ -72,35 +81,37 @@ metricsServer:
enabled: true
```
-If you have not installed Nemesis yet, see [Nemesis Chart](./nemesis_chart.md) or upgrade the installation:
+If you have not installed Nemesis yet, see [Nemesis Chart](nemesis_chart.md) or upgrade the installation:
```bash
helm upgrade --repo https://specterops.github.io/Nemesis/ [chart name] nemesis
```
-# (Optional) Changing Persistent File Storage
+
+## (Optional) Changing Persistent File Storage
Elasticsearch, PostgreSQL, and Minio (if using instead of AWS S3) have persistent storage volumes in the cluster.
-## File Storage Backend
-Nemesis can use AWS S3 (in conjunction with KMS for file encryption) for file storage by modifying the `storage` setting in [values.yaml](../helm/nemesis/values.yaml) and configuring the `aws` block.
+### File Storage Backend
+
+Nemesis can use AWS S3 (in conjunction with KMS for file encryption) for file storage by modifying the `storage` setting in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) and configuring the `aws` block.
By default, Nemesis uses Minio for file storage with a default storage size of `30Gi`.
-To change the size, modify the `minio.persistence.size` value in [values.yaml](../helm/nemesis/values.yaml) file.
+To change the size, modify the `minio.persistence.size` value in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml) file.
-## Elasticsearch
+### Elasticsearch
-The default storage size is 20Gi. To change this, modify the `elasticsearch.storage` value in [values.yaml](../helm/nemesis/values.yaml).
+The default storage size is 20Gi. To change this, modify the `elasticsearch.storage` value in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml).
-## PostgreSQL
+### PostgreSQL
-The default storage size is 20Gi. To change this, modify the `postgres.storage` value in [values.yaml](../helm/nemesis/values.yaml).
+The default storage size is 20Gi. To change this, modify the `postgres.storage` value in [values.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/values.yaml).
-# (Optional) Change Nemesis's Listening Port
+## (Optional) Change Nemesis's Listening Port
Create the `traefik-config.yaml` manifest with the following content:
@@ -119,15 +130,18 @@ spec:
exposedPort: 8443
```
-# (Optional) Deleting Running Pods
+## (Optional) Deleting Running Pods
+
-## Using Helm
+### Using Helm
`helm uninstall nemesis && kubectl delete all --all -n default`
-## Using Skaffold
+
+### Using Skaffold
`skaffold delete`
-# (Optional) Running Helm local charts
+
+## (Optional) Running Helm local charts
If you do not want to run the Helm charts hosted on `https://specterops.github.io/Nemesis/`, you can run them locally. For example:
```bash
helm install nemesis-quickstart ./helm/quickstart
@@ -136,10 +150,9 @@ helm install nemesis-monitoring ./helm/monitoring
```
-# Troubleshooting, Common Errors, and Support
-
+## Troubleshooting, Common Errors, and Support
-## Need additional help?
+### Need additional help?
If you run into any issues, please see [troubleshooting.md](troubleshooting.md) for common errors/issues.
Otherwise, [file an issue](https://github.com/SpecterOps/Nemesis/issues) or feel free to ask questions in the [#nemesis-chat channel](https://bloodhoundhq.slack.com/archives/C05KN15CCGP) in the Bloodhound Slack ([click here to join](https://ghst.ly/BHSlack)).
diff --git a/docs/stylesheets/colors.css b/docs/stylesheets/colors.css
new file mode 100644
index 0000000..57a719a
--- /dev/null
+++ b/docs/stylesheets/colors.css
@@ -0,0 +1,145 @@
+[data-md-color-scheme="nemesis-dark"] {
+ color-scheme: dark;
+
+ --md-primary-fg-color: #262730;
+
+ --mod-hue: 210;
+
+ --md-default-fg-color: hsla(var(--md-hue), 15%, 90%, 0.82);
+ --md-default-fg-color--light: hsla(var(--md-hue), 15%, 90%, 0.56);
+ --md-default-fg-color--lighter: hsla(var(--md-hue), 15%, 90%, 0.32);
+ --md-default-fg-color--lightest: hsla(var(--md-hue), 15%, 90%, 0.12);
+ --md-default-bg-color: #0E1116;
+ --md-default-bg-color--light: hsla(var(--md-hue), 15%, 14%, 0.54);
+ --md-default-bg-color--lighter: hsla(var(--md-hue), 15%, 14%, 0.26);
+ --md-default-bg-color--lightest: hsla(var(--md-hue), 15%, 14%, 0.07);
+
+ --md-code-fg-color: hsla(var(--md-hue), 18%, 86%, 0.82);
+ --md-code-bg-color: hsla(var(--md-hue), 15%, 18%, 1);
+
+ --md-code-hl-number-color: #619a8c;
+ --md-code-hl-special-color: var(--md-code-hl-number-color);
+ --md-code-hl-function-color: #5382db;
+ --md-code-hl-string-color: #61a242;
+ --md-code-hl-operator-color: #965822;
+ --md-code-hl-name-color: #e4e4e4;
+ --md-code-hl-keyword-color: #4b74c6;
+ --md-code-hl-punctuation-color: #808080;
+ --md-code-hl-constant-color: hsla(250, 62%, 70%, 1);
+ --md-code-hl-comment-color: rgb(98, 103, 119);
+ --md-code-hl-variable-color: #ffffff;
+ --md-code-hl-color: hsla(#{hex2hsl($clr-blue-a400)}, 1);
+ --md-code-hl-color--light: hsla(#{hex2hsl($clr-blue-a400)}, 0.1);
+ --md-code-hl-generic-color: var(--md-default-fg-color--light);
+
+ --md-typeset-color: var(--md-default-fg-color);
+ --md-typeset-a-color: #498AFB;
+
+ --md-typeset-kbd-color: hsla(var(--md-hue), 15%, 90%, 0.12);
+ --md-typeset-kbd-accent-color: hsla(var(--md-hue), 15%, 90%, 0.2);
+ --md-typeset-kbd-border-color: hsla(var(--md-hue), 15%, 14%, 1);
+
+ --md-typeset-mark-color: hsla(#{hex2hsl($clr-blue-a200)}, 0.3);
+
+ --md-typeset-table-color: hsla(var(--md-hue), 15%, 95%, 0.12);
+ --md-typeset-table-color--light: hsla(var(--md-hue), 15%, 95%, 0.035);
+
+ --md-admonition-fg-color: var(--md-default-fg-color);
+ --md-admonition-bg-color: var(--md-default-bg-color);
+
+ --md-footer-bg-color: hsla(var(--md-hue), 15%, 10%, 0.87);
+ --md-footer-bg-color--dark: hsla(var(--md-hue), 15%, 8%, 1);
+
+ --md-shadow-z1:
+ 0 #{px2rem(4px)} #{px2rem(10px)} hsla(0, 0%, 0%, 0.05),
+ 0 0 #{px2rem(1px)} hsla(0, 0%, 0%, 0.1);
+
+ --md-shadow-z2:
+ 0 #{px2rem(4px)} #{px2rem(10px)} hsla(0, 0%, 0%, 0.25),
+ 0 0 #{px2rem(1px)} hsla(0, 0%, 0%, 0.25);
+
+ --md-shadow-z3:
+ 0 #{px2rem(4px)} #{px2rem(10px)} hsla(0, 0%, 0%, 0.4),
+ 0 0 #{px2rem(1px)} hsla(0, 0%, 0%, 0.35);
+
+ img[src$="#only-light"],
+ img[src$="#gh-light-mode-only"] {
+ display: none;
+ }
+}
+
+[data-md-color-scheme="nemesis-light"] {
+ color-scheme: light;
+
+ --md-primary-fg-color: #262730;
+
+ --md-hue: 225deg;
+
+ --md-default-fg-color: hsla(0, 0%, 0%, 0.87);
+ --md-default-fg-color--light: hsla(0, 0%, 0%, 0.54);
+ --md-default-fg-color--lighter: hsla(0, 0%, 0%, 0.32);
+ --md-default-fg-color--lightest: hsla(0, 0%, 0%, 0.07);
+ --md-default-bg-color: hsla(0, 0%, 100%, 1);
+ --md-default-bg-color--light: hsla(0, 0%, 100%, 0.7);
+ --md-default-bg-color--lighter: hsla(0, 0%, 100%, 0.3);
+ --md-default-bg-color--lightest: hsla(0, 0%, 100%, 0.12);
+
+ --md-code-fg-color: hsla(200, 18%, 26%, 1);
+ --md-code-bg-color: hsla(200, 0%, 96%, 1);
+
+ --md-code-hl-color: hsla(#{hex2hsl($clr-blue-a200)}, 1);
+ --md-code-hl-color--light: hsla(#{hex2hsl($clr-blue-a200)}, 0.1);
+
+ --md-code-hl-number-color: hsla(0, 67%, 50%, 1);
+ --md-code-hl-special-color: hsla(340, 83%, 47%, 1);
+ --md-code-hl-function-color: hsla(291, 45%, 50%, 1);
+ --md-code-hl-constant-color: hsla(250, 63%, 60%, 1);
+ --md-code-hl-keyword-color: hsla(219, 54%, 51%, 1);
+ --md-code-hl-string-color: hsla(150, 63%, 30%, 1);
+ --md-code-hl-name-color: var(--md-code-fg-color);
+ --md-code-hl-operator-color: var(--md-default-fg-color--light);
+ --md-code-hl-punctuation-color: var(--md-default-fg-color--light);
+ --md-code-hl-comment-color: var(--md-default-fg-color--light);
+ --md-code-hl-generic-color: var(--md-default-fg-color--light);
+ --md-code-hl-variable-color: var(--md-default-fg-color--light);
+
+ --md-typeset-color: var(--md-default-fg-color);
+
+ --md-typeset-a-color: #498AFB;
+
+ --md-typeset-del-color: hsla(6, 90%, 60%, 0.15);
+ --md-typeset-ins-color: hsla(150, 90%, 44%, 0.15);
+
+ --md-typeset-kbd-color: hsla(0, 0%, 98%, 1);
+ --md-typeset-kbd-accent-color: hsla(0, 100%, 100%, 1);
+ --md-typeset-kbd-border-color: hsla(0, 0%, 72%, 1);
+
+ --md-typeset-mark-color: hsla(#{hex2hsl($clr-yellow-a200)}, 0.5);
+
+ --md-typeset-table-color: hsla(0, 0%, 0%, 0.12);
+ --md-typeset-table-color--light: hsla(0, 0%, 0%, 0.035);
+
+ --md-admonition-fg-color: var(--md-default-fg-color);
+ --md-admonition-bg-color: var(--md-default-bg-color);
+
+ --md-warning-fg-color: hsla(0, 0%, 0%, 0.87);
+ --md-warning-bg-color: hsla(60, 100%, 80%, 1);
+
+ --md-footer-fg-color: hsla(0, 0%, 100%, 1);
+ --md-footer-fg-color--light: hsla(0, 0%, 100%, 0.7);
+ --md-footer-fg-color--lighter: hsla(0, 0%, 100%, 0.45);
+ --md-footer-bg-color: hsla(0, 0%, 0%, 0.87);
+ --md-footer-bg-color--dark: hsla(0, 0%, 0%, 0.32);
+
+ --md-shadow-z1:
+ 0 #{px2rem(4px)} #{px2rem(10px)} hsla(0, 0%, 0%, 0.05),
+ 0 0 #{px2rem(1px)} hsla(0, 0%, 0%, 0.1);
+
+ --md-shadow-z2:
+ 0 #{px2rem(4px)} #{px2rem(10px)} hsla(0, 0%, 0%, 0.1),
+ 0 0 #{px2rem(1px)} hsla(0, 0%, 0%, 0.25);
+
+ --md-shadow-z3:
+ 0 #{px2rem(4px)} #{px2rem(10px)} hsla(0, 0%, 0%, 0.2),
+ 0 0 #{px2rem(1px)} hsla(0, 0%, 0%, 0.35);
+}
diff --git a/docs/submit_to_nemesis.md b/docs/submit_to_nemesis.md
index f5999f2..c9c594c 100644
--- a/docs/submit_to_nemesis.md
+++ b/docs/submit_to_nemesis.md
@@ -1,11 +1,12 @@
-# Overview of submit_to_nemesis
+# submit_to_nemesis
+
`submit_to_nemesis` is a CLI tool used to upload files to Nemesis. Its targeted audience is operators who want to upload files using the CLI and Nemesis developers who want to quickly test sample files.
-# Docker
+## Docker
-If you want to use the pre-build Docker container to submit artifacts to Nemesis, run [monitor_folder_docker.sh](../scripts/monitor_folder_docker.sh). The only requirement for the script is Docker and wget.
+If you want to use the pre-build Docker container to submit artifacts to Nemesis, run [monitor_folder_docker.sh](https://github.com/SpecterOps/Nemesis/blob/main/scripts/monitor_folder_docker.sh). The only requirement for the script is Docker and wget.
-# Requirements
+## Requirements
Install with the instructions below.
@@ -14,7 +15,7 @@ Python, Pyenv, and Poetry
To get Nemesis running, Python 3.11.2 is needed, as well as Pyenv/Poetry.
-## Install Pyenv
+### Install Pyenv
**Purpose:** Manages python environments in a sane way.
1. Install the [relevant prereqs specified by PyEnv](https://github.com/pyenv/pyenv/wiki#suggested-build-environment).
@@ -44,7 +45,7 @@ eval "$(pyenv init -)"
**Validation:** Running `python3 --version` should show version 3.11.2.
-## Install Poetry
+### Install Poetry
**Purpose:** Python package and dependency management tool.
```bash
python3 -c 'from urllib.request import urlopen; print(urlopen("https://install.python-poetry.org").read().decode())' | python3 -
@@ -59,7 +60,7 @@ Restart your shell.
**Validation:** Running `poetry --version` from the shell should output the current version.
-## Install Poetry Environment for Artifact Submission
+### Install Poetry Environment for Artifact Submission
**Purpose:** Install the Poetry environment for ./scripts/submit_to_nemesis.sh
`./scripts/submit_to_nemesis.sh` uses code from a Nemesis module that needs its Poetry environment installed first.
@@ -69,10 +70,10 @@ poetry -C ./cmd/enrichment/ install
```
-# Configuring
-To use `submit_to_nemesis`, one must edit the YAML configuration file found in `cmd/enrichment/enrichment/cli/submit_to_nemesis/submit_to_nemesis.yaml` ([link to YAML file](../cmd/enrichment/enrichment/cli/submit_to_nemesis/submit_to_nemesis.yaml)). This config file includes the credentials to authenticate to Nemesis, the location of the Nemesis server, and information about the operation that Nemesis will tag each uploaded file with (operator name, project, network, etc.).
+## Configuring
+To use `submit_to_nemesis`, one must edit the YAML configuration file found in `cmd/enrichment/enrichment/cli/submit_to_nemesis/submit_to_nemesis.yaml` ([link to YAML file](https://github.com/SpecterOps/Nemesis/blob/main/cmd/enrichment/enrichment/cli/submit_to_nemesis/submit_to_nemesis.yaml)). This config file includes the credentials to authenticate to Nemesis, the location of the Nemesis server, and information about the operation that Nemesis will tag each uploaded file with (operator name, project, network, etc.).
-# Usage
+## Usage
Once configured, in the root Nemesis directory run
```
./scripts/submit_to_nemesis.sh -h
@@ -100,4 +101,4 @@ Below are some example usage scenarios:
* Stress test the Nemesis installation by submitting a folder of files 100 times with 30 workers:
```
./scripts/submit_to_nemesis.sh --folder sample_files/ -w 30 -r 100
-```
\ No newline at end of file
+```
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 4f3c224..3e2ba1b 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -26,7 +26,7 @@ Then, reinstall everything but specify a higher timeout period (e.g., 90 minutes
helm install --repo https://specterops.github.io/Nemesis/ nemesis nemesis --timeout '90m'`
```
-While Nemesis is deploying, you can quickly monitor deployed pods by running the [watch_pods.sh](../scripts/watch_pods.sh) script. If the image is still pulling, usually the pod's status will be `ContainerCreating` or `Init: #/#`. You can run `kubectl describe pods ` to view some details about the pod, and if it's still pulling the image there will be an event similar to this:
+While Nemesis is deploying, you can quickly monitor deployed pods by running the [watch_pods.sh](https://github.com/SpecterOps/Nemesis/blob/main/scripts/watch_pods.sh) script. If the image is still pulling, usually the pod's status will be `ContainerCreating` or `Init: #/#`. You can run `kubectl describe pods ` to view some details about the pod, and if it's still pulling the image there will be an event similar to this:
```
Events:
Type Reason Age From Message
@@ -69,4 +69,4 @@ If minikube can connect to the internet but DNS isn't working, add the following
## Need additional help?
-Please [file an issue](https://github.com/SpecterOps/Nemesis/issues) or feel free to ask questions in the [#nemesis-chat channel](https://bloodhoundhq.slack.com/archives/C05KN15CCGP) in the Bloodhound Slack ([click here to join](https://ghst.ly/BHSlack)).
\ No newline at end of file
+Please [file an issue](https://github.com/SpecterOps/Nemesis/issues) or feel free to ask questions in the [#nemesis-chat channel](https://bloodhoundhq.slack.com/archives/C05KN15CCGP) in the Bloodhound Slack ([click here to join](https://ghst.ly/BHSlack)).
diff --git a/docs/usage_guide.md b/docs/usage_guide.md
index bcc0e57..7663e57 100644
--- a/docs/usage_guide.md
+++ b/docs/usage_guide.md
@@ -21,28 +21,29 @@ For a general overview of the Nemesis project structure, see [overview.md](overv
- [Elasticsearch/Kibana](#elasticsearchkibana)
-# Data Ingestion
+## Data Ingestion
Once Nemesis is running, data first needs to be ingested into the platform. Ingestion into Nemesis can occur in muliple ways, including:
+
* [Auto-ingesting data from C2 platorms.](#nemesis-c2-connector-setup)
* [Manually uploading files on the "File Upload" page in the Nemesis's Dashboard UI.](#manual-file-upload)
* Using the [submit_to_nemesis](submit_to_nemesis.md) CLI tool to submit files.
* Writing custom tools to interact with [Nemesis's API](new_connector.md).
-## Nemesis C2 Connector Setup
+### Nemesis C2 Connector Setup
Nemesis includes connectors for various C2 platorms. The connectors hook into the C2 platforms and transfer data automatically into Nemesis. The `./cmd/connectors/` folder contains the following C2 connectors:
-- [Cobalt Strike](../cmd/connectors/cobaltstrike-nemesis-connector/README.md)
-- [Mythic](../cmd/connectors/mythic-connector/README.md)
-- [Sliver](../cmd/connectors/sliver-connector/README.md)
-- [OST Stage1](../cmd/connectors/stage1-connector/README.md)
-- [Metasploit](../cmd/connectors/metasploit-connector/README.md)
-- [Chrome Extension](../cmd/connectors/chrome-extension/README.md)
+- [Cobalt Strike](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/cobaltstrike-nemesis-connector#readme)
+- [Mythic](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/mythic-connector#readme)
+- [Sliver](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/sliver-connector#readme)
+- [OST Stage1](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/stage1-connector#readme)
+- [Metasploit](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/metasploit-connector#readme)
+- [Chrome Extension](https://github.com/SpecterOps/Nemesis/tree/main/cmd/connectors/chrome-extension#readme)
***Note: not all connectors have the same level of completeness! We intended to show the range of connectors possible, but there is not yet feature parity.***
-# Nemesis Dashboard
+## Nemesis Dashboard
The main method for operators/analysts to interact with Nemesis data is through the Nemesis Dashboard. The dashboard can be accessed at `http://NEMESIS_IP:8080/dashboard/`. The initial display shows details about the number of processed files:
@@ -65,7 +66,7 @@ For each file entry, the file path, download timestamp, size (in bytes), SHA1 ha
The top icons by each file will let you download a file, view the raw file bytes in a new browser tab, optionally view a PDF of the file (if applicable) in a new browser tab, download the decompiled .NET source code for an assembly (if applicable), and view the [File Details](#file-details) for the file.
-### File Triage
+#### File Triage
At the top right of each file card, there is a **Triage** section with 👍 , 👎, and ❓ icons. These icons correspond to "Interesting", "Not Interesting", and "Unknown", respectively. When an icon is clicked, the triage value for the file is stored in the Nemesis backend and the file card is hidden (hidden cards can be reshown via the search filters). The selected icon will be reflected in the [File Details](#file-details) page for the file. This default behavior allows for multiple operators/analysts to easily triage large numbers of files without duplicated effort.
@@ -73,7 +74,7 @@ Additionally, comments can be saved for the file, which are also persistently sa
![Nemesis Dashboard File Notes](images/nemesis-dashboard-files-notes.png)
-## File Details
+### File Details
The **File Details** page for an individual file will display the same file card (containing path/timestamp/size/SHA1/magic type/tags/comments, triage icons, and comments) as [Files](#files) page. Beneath the card the bytes of the file (or plaintext if not binary) are displayed in a [Monaco editor](https://microsoft.github.io/monaco-editor/), a Visual Studio Code-light component that contains appropriate syntax highlighting and VSCode shortcuts.
@@ -91,7 +92,7 @@ Likewise, if there are any Yara matches, a **Yara Matches** tab will appear. Thi
![Nemesis Dashboard File Notes](images/nemesis-dashboard-file-viewer-yara.png)
-## Manual File Upload
+### Manual File Upload
Files can be manually uploaded through the Nemesis dashboard via the `File Upload` tab on the left navigation bar. After navigating to the page, information such as the operator ID, ..., needs to be completed. This information is saved via browser cookies and does not need to be entered every time. The "Original File Path" is optional but recommended. Files can be dragged/dropped into the upload modal, and on successful submission Nemesis will display the following message:
@@ -99,25 +100,25 @@ Files can be manually uploaded through the Nemesis dashboard via the `File Uploa
The file will then be displayed in the [Files](#files) page as soon as it's done processing.
-## Document Search
+### Document Search
Nemesis will extract plaintext from any files that can have ASCII/Unicode text extracted and indexes the text into the Elasticsearch backend. The **Document Search** page on the left navigation bar allows operators/analysts to search through any indexed text. Search can accomplished via **Full Document Search** and **Snippet Search**.
-### Full Document Search
+#### Full Document Search
The default search will match the supplied search term(s) in indexed plaintext documents, displaying paginated searches along with some details about the orignating document:
![Nemesis Dashboard File Search](images/nemesis-dashboard-document-search-full.png)
-#### Source Code Search
+##### Source Code Search
The **Source Code Search** tab on the top of the page functions similarly to the raw text search, but only searches indexed source code documents, as opposed to "regular" plaintext documents. To search source code, change the search index in the dropdown search filters to `source_code`:
![Nemesis Dashboard Source Code Search](images/nemesis-dashboard-document-search-source-code.png)
-### Snippet Search
+#### Snippet Search
-The **Snippet Search** tab operates a bit differently. In addition to being normally indexed in Elasticsearch, all text extracted from plaintext documents by Nemesis are also broken into chunks and run through a small [embedding model](https://www.elastic.co/what-is/vector-embedding) to produce fixed-length vector embeddings. The model currently being used is [gte-tiny](https://huggingface.co/TaylorAI/gte-tiny) but this can be modified in the [nlp.deployment.yaml](./helm/nemesis/templates/nlp.deployment.yaml) of the NLP container. These embeddings are stored in Elasticsearch along with the associated chunked text, allowing for [sematic search](https://en.wikipedia.org/wiki/Semantic_search) over indexed text.
+The **Snippet Search** tab operates a bit differently. In addition to being normally indexed in Elasticsearch, all text extracted from plaintext documents by Nemesis are also broken into chunks and run through a small [embedding model](https://www.elastic.co/what-is/vector-embedding) to produce fixed-length vector embeddings. The model currently being used is [gte-tiny](https://huggingface.co/TaylorAI/gte-tiny) but this can be modified in the [nlp.yaml](https://github.com/SpecterOps/Nemesis/blob/main/helm/nemesis/templates/nlp.yaml) of the NLP container. These embeddings are stored in Elasticsearch along with the associated chunked text, allowing for [sematic search](https://en.wikipedia.org/wiki/Semantic_search) over indexed text.
In addition, we also exploit the BM25 text search of Elasticsearch over the sparse indexed text. The two lists of results are fused with [Reciprocal Rank Fusion (RRF)](https://learn.microsoft.com/en-us/azure/search/hybrid-search-ranking) and the reordered list of snippets is presented to the user:
@@ -127,7 +128,7 @@ If you want to _only_ use the more traditional/fuzzy BM25 search and now the vec
See [this Twitter thread for more background on this approach](https://x.com/harmj0y/status/1757511877255471299).
-### Search Filtering
+#### Search Filtering
Both "Full Document Search" and "Snippet Search" allow for file paths/patterns to include or exclude in searches. These can be wildcard paths or extensions, and multiple `|` delineated terms can be specified.
@@ -139,13 +140,13 @@ To exclude .pdfs from searching:
![Nemesis Dashboard Exclude Filtering](images/nemesis-dashboard-document-search-exclude-filter.png)
-# Alerting
+## Alerting
If Slack alerting is enabled, alerts on "interesting" files (e.g., parsed credentials, Nosey Parker hits, DPAPI data discovery, etc.) will be pushed to the configuered Slack webhook/channel with **Nemesis** as the bot user. These messages will contain the alert name, sanitized file name, file SHA1, download timestamp, agent ID, message details, and a link to the [file details](#file-details) in the dashboard:
![Nemesis Slack Alerting](images/nemesis-slack-alerting.png)
-# Elasticsearch/Kibana
+## Elasticsearch/Kibana
Navigating to `http://NEMESIS_IP:8080/kibana/` will lead to the main Kibana dashboard for all indexed data (creds are set in `nemesis.config`)
diff --git a/mkdocs.yml b/mkdocs.yml
new file mode 100644
index 0000000..6d6b833
--- /dev/null
+++ b/mkdocs.yml
@@ -0,0 +1,90 @@
+site_name: Nemesis Documentation
+site_url: https://specterops.github.io/Nemesis/
+repo_url: https://github.com/SpecterOps/Nemesis
+repo_name: SpecterOps/Nemesis
+edit_uri: edit/main/docs/
+
+plugins:
+ - search
+
+
+exclude_docs: |
+ /requirements.txt
+
+nav:
+ - Nemesis: index.md
+ - Quickstart: quickstart.md
+ - Installation:
+ - Setup: setup.md
+ - Requirements: requirements.md
+ - Unsupported Platforms:
+ - Docker Desktop: requirements_docker_desktop.md
+ - Minikube: requirements_minikube.md
+ - Quickstart Chart: quickstart_chart.md
+ - Nemesis Chart: nemesis_chart.md
+ - Usage:
+ - usage_guide.md
+ - overview.md
+ - access_nemesis.md
+ - elasticsearch-kibana.md
+ - hasura.md
+ - kubernetes.md
+ - postgres.md
+ - rabbitmq.md
+ - submit_to_nemesis.md
+ - troubleshooting.md
+ - Developer:
+ - development.md
+ - new_connector.md
+ - new_seatbelt_datatype_ingestion.md
+ - new_service.md
+ - new_odr_datatype.md
+ - remote_debugging.md
+ - Operational Data References:
+ - odr/README.md
+ - References:
+ - odr/references/authentication_data.md
+ - odr/references/cookie.md
+ - odr/references/file_data.md
+ - odr/references/file_information.md
+ - odr/references/network_connection.md
+ - odr/references/path_list.md
+ - odr/references/raw_data.md
+ - Host Data:
+ - odr/references/host_data/named_pipe.md
+ - odr/references/host_data/process.md
+ - odr/references/host_data/registry_value.md
+ - odr/references/host_data/service.md
+
+theme:
+ name: material
+ locale: en
+ logo: images/logo.png
+ favicon: images/logo.png
+
+ features:
+ - navigation.sections
+
+ icon:
+ repo: fontawesome/brands/github
+
+ palette:
+ - media: "(prefers-color-scheme: light)"
+ scheme: nemesis-light
+ toggle:
+ icon: material/brightness-7
+ name: Switch to dark mode
+
+ - media: "(prefers-color-scheme: dark)"
+ scheme: nemesis-dark
+ toggle:
+ icon: material/brightness-4
+ name: Switch to light mode
+
+extra_css:
+ - stylesheets/colors.css
+
+markdown_extensions:
+ - pymdownx.superfences
+ - pymdownx.highlight:
+ use_pygments: true
\ No newline at end of file