diff --git a/.github/workflows/build-and-push.yml b/.github/workflows/build-and-push.yml deleted file mode 100644 index f1974b03..00000000 --- a/.github/workflows/build-and-push.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Build and push container image - -env: - IMAGE_NAME: "rapidast" - IMAGE_TAGS: "${{ github.sha }}" - IMAGE_REGISTRY: quay.io/redhatproductsecurity - IMAGE_REGISTRY_USER: ${{ secrets.IMAGE_REGISTRY_USER }} - IMAGE_REGISTRY_PASSWORD: ${{ secrets.IMAGE_REGISTRY_PASSWORD }} - -on: - push: - branches: ["development", "main"] - -jobs: - - build-and-push: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - # https://github.com/redhat-actions/buildah-build#readme - - name: Build container image - id: build-image - uses: redhat-actions/buildah-build@v2 - with: - image: ${{ env.IMAGE_NAME }} - tags: ${{ env.IMAGE_TAGS }} - dockerfiles: | - ./containerize/Containerfile - - # https://github.com/redhat-actions/push-to-registry#readme - - name: Push to registry - id: push-image - uses: redhat-actions/push-to-registry@v2 - with: - image: ${{ steps.build-image.outputs.image }} - tags: ${{ steps.build-image.outputs.tags }} - registry: ${{ env.IMAGE_REGISTRY }} - username: ${{ env.IMAGE_REGISTRY_USER }} - password: ${{ env.IMAGE_REGISTRY_PASSWORD }} diff --git a/.github/workflows/build-image.yml b/.github/workflows/build-image.yml deleted file mode 100644 index e1ce7f9e..00000000 --- a/.github/workflows/build-image.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Build container image - -env: - IMAGE_NAME: "rapidast" - IMAGE_TAGS: "${{ github.sha }}" - -on: - pull_request: - branches: ["development", "main"] - -jobs: - - build-image: - - runs-on: ubuntu-latest - - # https://github.com/redhat-actions/buildah-build#readme - steps: - - uses: actions/checkout@v4 - - name: Build container image - uses: redhat-actions/buildah-build@v2 - with: - image: ${{ env.IMAGE_NAME }} - tags: ${{ env.IMAGE_TAGS }} - dockerfiles: | - ./containerize/Containerfile diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 96005c4c..c9f8e8a1 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -10,8 +10,7 @@ permissions: contents: read jobs: - test: - + lint: runs-on: ubuntu-latest steps: @@ -24,9 +23,23 @@ jobs: run: | python3 -m ensurepip --upgrade pip install --no-cache-dir -r requirements.txt -r requirements-dev.txt - - name: Test with pytest - run: | - pytest - name: Lint with pre-commit hook run: | pre-commit run --all-files --show-diff-on-failure + + unit-tests: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.9 + uses: actions/setup-python@v3 + with: + python-version: "3.9" + - name: Install dependencies + run: | + python3 -m ensurepip --upgrade + pip install --no-cache-dir -r requirements.txt -r requirements-dev.txt + - name: Test with pytest + run: | + pytest tests diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 88488353..19fd0c2f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,20 +1,21 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.2.0 + rev: v4.6.0 hooks: - id: trailing-whitespace language_version: python3 + exclude: \.csv$ - id: end-of-file-fixer - id: debug-statements language_version: python3 - repo: https://github.com/asottile/reorder_python_imports - rev: v3.0.1 + rev: v3.14.0 hooks: - id: reorder-python-imports language_version: python3 - repo: https://github.com/psf/black - rev: 23.1.0 + rev: 23.12.1 hooks: - id: black args: @@ -26,7 +27,7 @@ repos: require_serial: true - repo: https://github.com/asottile/pyupgrade - rev: v2.32.0 + rev: v2.38.4 hooks: - id: pyupgrade language_version: python3 @@ -37,6 +38,7 @@ repos: # W0603 - Using the global statement # C0114,C0115,C0116 - docstring checks. Disabled because of pydocstyle checks # W0107 - unnecessary pass + # W0511: fixme # W0702: No exception type(s) specified (bare-except) # R0801: Similar lines in 2 files. Disabled because it flags any file even those which are unrelated # R1705: Unnecessary "elif" after "return", remove the leading "el" from "elif" (no-else-return) @@ -44,13 +46,13 @@ repos: - repo: https://github.com/PyCQA/pylint #rev: v3.0.3 - rev: v2.17.4 + rev: v2.17.7 hooks: - id: pylint exclude: ^tests/ args: - --max-line-length=120 - --min-public-methods=0 - - --good-names=q,f,fp,i,e - - --disable=E0401,W1201,W1203,C0114,C0115,C0116,C0411,W0107,W0702,R0801,R1705,R1710 + - --good-names=o,w,q,f,fp,i,e + - --disable=E0401,W1201,W1203,C0114,C0115,C0116,C0411,W0107,W0511,W0702,R0801,R1705,R1710 language_version: python3 diff --git a/.tekton/integration-test.yaml b/.tekton/integration-test.yaml new file mode 100644 index 00000000..0d62bb1f --- /dev/null +++ b/.tekton/integration-test.yaml @@ -0,0 +1,225 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: rapidast-e2e +spec: + params: + - name: repo_url + default: github.com/RedHatProductSecurity/rapidast + - name: revision + default: "development" + - description: 'Snapshot of the application' + name: SNAPSHOT + default: |- + '{ + "components": [ + { + "name":"rapidast", + "containerImage": "quay.io/redhatproductsecurity/rapidast:latest", + "source":{ + "git":{ + "url":"git@github.com:RedHatProductSecurity/rapidast.git", + "revision":"development", + } + } + } + ] + }' + type: string + + tasks: + - name: parse-metadata + taskRef: + resolver: git + params: + - name: url + value: https://github.com/konflux-ci/integration-examples + - name: revision + value: main + - name: pathInRepo + value: tasks/test_metadata.yaml + params: + - name: SNAPSHOT + value: $(params.SNAPSHOT) + + - name: provision-eaas-space-nessus + runAfter: + - parse-metadata + taskRef: + resolver: git + params: + - name: url + value: https://github.com/konflux-ci/build-definitions.git + - name: revision + value: main + - name: pathInRepo + value: task/provision-env-with-ephemeral-namespace/0.1/provision-env-with-ephemeral-namespace.yaml + params: + - name: KONFLUXNAMESPACE + value: $(context.pipelineRun.namespace) + - name: PIPELINERUN_NAME + value: $(context.pipelineRun.name) + - name: PIPELINERUN_UID + value: $(context.pipelineRun.uid) + + - name: copy-nessus-secret + runAfter: + - provision-eaas-space-nessus + taskSpec: + steps: + - name: copy-nessus-secret + image: registry.redhat.io/openshift4/ose-cli:latest + env: + - name: KUBECONFIG + value: /tmp/kubeconfig + - name: EAAS_KUBECONFIG_VALUE + valueFrom: + secretKeyRef: + name: $(tasks.provision-eaas-space-nessus.results.secretRef) + key: kubeconfig + workingDir: /workspace + script: | + #!/bin/bash -ex + + # initial request will default to in-cluster k8s config + oc whoami + oc get secret sfowler-nessus-pull-secret -o yaml > /tmp/nessus-pull-secret.yaml + sed '/namespace:/d' /tmp/nessus-pull-secret.yaml > /tmp/new-secret.yaml + + # second request should use newly provisioned eaas creds + namespace + echo "$EAAS_KUBECONFIG_VALUE" > "$KUBECONFIG" + oc whoami + oc apply -f /tmp/new-secret.yaml + + - name: provision-eaas-space + runAfter: + - parse-metadata + taskRef: + resolver: git + params: + - name: url + value: https://github.com/konflux-ci/build-definitions.git + - name: revision + value: main + - name: pathInRepo + value: task/provision-env-with-ephemeral-namespace/0.1/provision-env-with-ephemeral-namespace.yaml + params: + - name: KONFLUXNAMESPACE + value: $(context.pipelineRun.namespace) + - name: PIPELINERUN_NAME + value: $(context.pipelineRun.name) + - name: PIPELINERUN_UID + value: $(context.pipelineRun.uid) + + # XXX integrations tests can't reference Tasks in the same PR AFAICT + # so need to repeat them inline, rather than define in a separate file + - name: run-e2e-tests + runAfter: + - provision-eaas-space + taskSpec: + volumes: + - name: credentials + emptyDir: {} + results: + - name: TEST_RESULTS + description: e2e test results + steps: + + # XXX not supported to use workspaces in integration tests + - name: clone-repository + image: quay.io/konflux-ci/git-clone:latest + script: | + git config --global --add safe.directory /workspace + git clone "$(tasks.parse-metadata.results.source-git-url)" /workspace + pushd /workspace + git checkout "$(tasks.parse-metadata.results.source-git-revision)" + + - name: test + image: registry.redhat.io/openshift4/ose-cli:latest + env: + - name: KUBECONFIG + value: /tmp/kubeconfig + - name: KUBECONFIG_VALUE + valueFrom: + secretKeyRef: + name: $(tasks.provision-eaas-space.results.secretRef) + key: kubeconfig + - name: RAPIDAST_CLEANUP + value: "false" # namespace will be cleaned up automatically + - name: RAPIDAST_IMAGE + value: $(tasks.parse-metadata.results.component-container-image) + - name: RAPIDAST_SERVICEACCOUNT + value: namespace-manager # created by provision-env-with-ephemeral-namespace + workingDir: /workspace + volumeMounts: + - name: credentials + mountPath: /credentials + script: | + #!/bin/bash -ex + + echo "$KUBECONFIG_VALUE" > "$KUBECONFIG" + oc whoami + + yum install -y python3.12 git + python3.12 -m ensurepip + pip3 install -r requirements.txt -r requirements-dev.txt + pytest -s e2e-tests/test_integration.py --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path) + cat $(results.TEST_RESULTS.path) + + - name: run-e2e-tests-nessus + runAfter: + - copy-nessus-secret + taskSpec: + volumes: + - name: credentials + emptyDir: {} + results: + - name: TEST_RESULTS + description: e2e test results + steps: + + # XXX not supported to use workspaces in integration tests: + # * https://issues.redhat.com/browse/STONEINTG-895 + - name: clone-repository + image: quay.io/konflux-ci/git-clone:latest + script: | + git config --global --add safe.directory /workspace + git clone "$(tasks.parse-metadata.results.source-git-url)" /workspace + pushd /workspace + git checkout "$(tasks.parse-metadata.results.source-git-revision)" + + - name: test + image: registry.redhat.io/openshift4/ose-cli:latest + env: + - name: KUBECONFIG + value: /tmp/kubeconfig + - name: KUBECONFIG_VALUE + valueFrom: + secretKeyRef: + name: $(tasks.provision-eaas-space-nessus.results.secretRef) + key: kubeconfig + - name: RAPIDAST_CLEANUP + value: "false" # namespace will be cleaned up automatically + - name: RAPIDAST_IMAGE + value: $(tasks.parse-metadata.results.component-container-image) + - name: RAPIDAST_SERVICEACCOUNT + value: namespace-manager # created by provision-env-with-ephemeral-namespace + workingDir: /workspace + volumeMounts: + - name: credentials + mountPath: /credentials + script: | + #!/bin/bash -ex + + echo "$KUBECONFIG_VALUE" > "$KUBECONFIG" + oc whoami + + # XXX temp! + oc get secret sfowler-nessus-pull-secret + + yum install -y python3.12 git + python3.12 -m ensurepip + pip3 install -r requirements.txt -r requirements-dev.txt + pytest -sv e2e-tests/test_nessus.py --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path) + cat $(results.TEST_RESULTS.path) diff --git a/.tekton/rapidast-pull-request.yaml b/.tekton/rapidast-pull-request.yaml new file mode 100644 index 00000000..3dcd24fc --- /dev/null +++ b/.tekton/rapidast-pull-request.yaml @@ -0,0 +1,475 @@ +apiVersion: tekton.dev/v1 +kind: PipelineRun +metadata: + annotations: + build.appstudio.openshift.io/repo: https://github.com/RedHatProductSecurity/rapidast?rev={{revision}} + build.appstudio.redhat.com/commit_sha: '{{revision}}' + build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' + build.appstudio.redhat.com/target_branch: '{{target_branch}}' + pipelinesascode.tekton.dev/max-keep-runs: "3" + pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && (target_branch == "development" || target_branch == "main") + creationTimestamp: null + labels: + appstudio.openshift.io/application: rapidast + appstudio.openshift.io/component: rapidast + pipelines.appstudio.openshift.io/type: build + name: rapidast-on-pull-request + namespace: secaut-tenant +spec: + params: + - name: git-url + value: '{{source_url}}' + - name: revision + value: '{{revision}}' + - name: output-image + value: quay.io/redhatproductsecurity/rapidast:on-pr-{{revision}} + - name: image-expires-after + value: 7d + - name: dockerfile + value: containerize/Containerfile + - name: skip-checks + value: "true" + pipelineSpec: + description: | + This pipeline is ideal for building container images from a Containerfile while maintaining trust after pipeline customization. + + _Uses `buildah` to create a container image leveraging [trusted artifacts](https://konflux-ci.dev/architecture/ADR/0036-trusted-artifacts.html). It also optionally creates a source image and runs some build-time tests. Information is shared between tasks using OCI artifacts instead of PVCs. EC will pass the [`trusted_task.trusted`](https://enterprisecontract.dev/docs/ec-policies/release_policy.html#trusted_task__trusted) policy as long as all data used to build the artifact is generated from trusted tasks. + This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/repository/konflux-ci/tekton-catalog/pipeline-docker-build-oci-ta?tab=tags)_ + finally: + - name: show-sbom + params: + - name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + taskRef: + params: + - name: name + value: show-sbom + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:52f8b96b96ce4203d4b74d850a85f963125bf8eef0683ea5acdd80818d335a28 + - name: kind + value: task + resolver: bundles + params: + - description: Source Repository URL + name: git-url + type: string + - default: "" + description: Revision of the Source Repository + name: revision + type: string + - description: Fully Qualified Output Image + name: output-image + type: string + - default: . + description: Path to the source code of an application's component from where + to build image. + name: path-context + type: string + - default: Dockerfile + description: Path to the Dockerfile inside the context specified by parameter + path-context + name: dockerfile + type: string + - default: "false" + description: Force rebuild image + name: rebuild + type: string + - default: "false" + description: Skip checks against built image + name: skip-checks + type: string + - default: "false" + description: Execute the build with network isolation + name: hermetic + type: string + - default: "" + description: Build dependencies to be prefetched by Cachi2 + name: prefetch-input + type: string + - default: "" + description: Image tag expiration time, time values could be something like + 1h, 2d, 3w for hours, days, and weeks, respectively. + name: image-expires-after + - default: "false" + description: Build a source image. + name: build-source-image + type: string + - default: "false" + description: Add built image into an OCI image index + name: build-image-index + type: string + - default: [] + description: Array of --build-arg values ("arg=value" strings) for buildah + name: build-args + type: array + - default: "" + description: Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file + name: build-args-file + type: string + results: + - description: "" + name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + - description: "" + name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - description: "" + name: CHAINS-GIT_URL + value: $(tasks.clone-repository.results.url) + - description: "" + name: CHAINS-GIT_COMMIT + value: $(tasks.clone-repository.results.commit) + tasks: + - name: init + params: + - name: image-url + value: $(params.output-image) + - name: rebuild + value: $(params.rebuild) + - name: skip-checks + value: $(params.skip-checks) + taskRef: + params: + - name: name + value: init + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:f239f38bba3a8351c8cb0980fde8e2ee477ded7200178b0f45175e4006ff1dca + - name: kind + value: task + resolver: bundles + - name: clone-repository + params: + - name: url + value: $(params.git-url) + - name: revision + value: $(params.revision) + - name: ociStorage + value: $(params.output-image).git + - name: ociArtifactExpiresAfter + value: $(params.image-expires-after) + runAfter: + - init + taskRef: + params: + - name: name + value: git-clone-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:d1e63ec00bed1c9f0f571fa76b4da570be49a7c255c610544a461495230ba1b1 + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + workspaces: + - name: basic-auth + workspace: git-auth + - name: prefetch-dependencies + params: + - name: input + value: $(params.prefetch-input) + - name: SOURCE_ARTIFACT + value: $(tasks.clone-repository.results.SOURCE_ARTIFACT) + - name: ociStorage + value: $(params.output-image).prefetch + - name: ociArtifactExpiresAfter + value: $(params.image-expires-after) + runAfter: + - clone-repository + taskRef: + params: + - name: name + value: prefetch-dependencies-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.1@sha256:3c11f5de6a0281bf93857f0c85bbbdfeda4cc118337da273fef0c138bda5eebb + - name: kind + value: task + resolver: bundles + workspaces: + - name: git-basic-auth + workspace: git-auth + - name: netrc + workspace: netrc + - name: build-container + params: + - name: IMAGE + value: $(params.output-image) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) + - name: HERMETIC + value: $(params.hermetic) + - name: PREFETCH_INPUT + value: $(params.prefetch-input) + - name: IMAGE_EXPIRES_AFTER + value: $(params.image-expires-after) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: BUILD_ARGS + value: + - $(params.build-args[*]) + - name: BUILD_ARGS_FILE + value: $(params.build-args-file) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - prefetch-dependencies + taskRef: + params: + - name: name + value: buildah-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-buildah-oci-ta:0.2@sha256:54da71db2ae94d02c0e2662db11d399880e240bcc6a2ae1b3c8e2e9af9298415 + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - name: build-image-index + params: + - name: IMAGE + value: $(params.output-image) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: IMAGE_EXPIRES_AFTER + value: $(params.image-expires-after) + - name: ALWAYS_BUILD_INDEX + value: $(params.build-image-index) + - name: IMAGES + value: + - $(tasks.build-container.results.IMAGE_URL)@$(tasks.build-container.results.IMAGE_DIGEST) + runAfter: + - build-container + taskRef: + params: + - name: name + value: build-image-index + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:715fa1fd7a8ebe0da552730e564eef340717b6346f1690ebe06685a252fe88bc + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - name: build-source-image + params: + - name: BINARY_IMAGE + value: $(params.output-image) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: source-build-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.1@sha256:ac1f140a8906754f534f647b6b9d76c570e680d8cb8b8f3496f0e0d0fb133351 + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - input: $(params.build-source-image) + operator: in + values: + - "true" + - name: deprecated-base-image-check + params: + - name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: deprecated-image-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.4@sha256:443ffa897ee35e416a0bfd39721c68cbf88cfa5c74c843c5183218d0cd586e82 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: clair-scan + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: clair-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:90e371fe7ec2288259a906bc1fd49c53b8b97a0b0b02da0893fb65e3be2a5801 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: ecosystem-cert-preflight-checks + params: + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: ecosystem-cert-preflight-checks + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.1@sha256:5131cce0f93d0b728c7bcc0d6cee4c61d4c9f67c6d619c627e41e3c9775b497d + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: sast-snyk-check + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: sast-snyk-check-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.3@sha256:699cfad1caaa4060f0a6de5d5fb376bf2eb90967d89ec4ffef328fd358ac966d + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: clamav-scan + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: clamav-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.1@sha256:1981b5aa330a4d59f59d760e54a36ebd596948abf6a36e45e103d4fd82ecbcf3 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: apply-tags + params: + - name: IMAGE + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: apply-tags + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.1@sha256:87fd7fc0e937aad1a8db9b6e377d7e444f53394dafde512d68adbea6966a4702 + - name: kind + value: task + resolver: bundles + - name: push-dockerfile + params: + - name: IMAGE + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: push-dockerfile-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:eee2eb7b5ce2e55dde37114fefe842080c8a8e443dcc2ccf324cfb22b0453db4 + - name: kind + value: task + resolver: bundles + - name: rpms-signature-scan + params: + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: rpms-signature-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:0c9667fba291af05997397a32e5e938ccaa46e93a2e14bad228e64a6427c5545 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + workspaces: + - name: git-auth + optional: true + - name: netrc + optional: true + taskRunTemplate: {} + workspaces: + - name: git-auth + secret: + secretName: '{{ git_auth_secret }}' +status: {} diff --git a/.tekton/rapidast-push.yaml b/.tekton/rapidast-push.yaml new file mode 100644 index 00000000..4f600587 --- /dev/null +++ b/.tekton/rapidast-push.yaml @@ -0,0 +1,501 @@ +apiVersion: tekton.dev/v1 +kind: PipelineRun +metadata: + annotations: + build.appstudio.openshift.io/repo: https://github.com/RedHatProductSecurity/rapidast?rev={{revision}} + build.appstudio.redhat.com/commit_sha: '{{revision}}' + build.appstudio.redhat.com/target_branch: '{{target_branch}}' + pipelinesascode.tekton.dev/max-keep-runs: "3" + pipelinesascode.tekton.dev/on-cel-expression: event == "push" && (target_branch == "development" || target_branch == "main") + creationTimestamp: null + labels: + appstudio.openshift.io/application: rapidast + appstudio.openshift.io/component: rapidast + pipelines.appstudio.openshift.io/type: build + name: rapidast-on-push + namespace: secaut-tenant +spec: + params: + - name: git-url + value: '{{source_url}}' + - name: revision + value: '{{revision}}' + - name: output-image + value: quay.io/redhatproductsecurity/rapidast:{{revision}} + - name: dockerfile + value: containerize/Containerfile + pipelineSpec: + description: | + This pipeline is ideal for building container images from a Containerfile while maintaining trust after pipeline customization. + + _Uses `buildah` to create a container image leveraging [trusted artifacts](https://konflux-ci.dev/architecture/ADR/0036-trusted-artifacts.html). It also optionally creates a source image and runs some build-time tests. Information is shared between tasks using OCI artifacts instead of PVCs. EC will pass the [`trusted_task.trusted`](https://enterprisecontract.dev/docs/ec-policies/release_policy.html#trusted_task__trusted) policy as long as all data used to build the artifact is generated from trusted tasks. + This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/repository/konflux-ci/tekton-catalog/pipeline-docker-build-oci-ta?tab=tags)_ + finally: + - name: show-sbom + params: + - name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + taskRef: + params: + - name: name + value: show-sbom + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:52f8b96b96ce4203d4b74d850a85f963125bf8eef0683ea5acdd80818d335a28 + - name: kind + value: task + resolver: bundles + params: + - description: Source Repository URL + name: git-url + type: string + - default: "" + description: Revision of the Source Repository + name: revision + type: string + - description: Fully Qualified Output Image + name: output-image + type: string + - default: . + description: Path to the source code of an application's component from where + to build image. + name: path-context + type: string + - default: Dockerfile + description: Path to the Dockerfile inside the context specified by parameter + path-context + name: dockerfile + type: string + - default: "false" + description: Force rebuild image + name: rebuild + type: string + - default: "false" + description: Skip checks against built image + name: skip-checks + type: string + - default: "false" + description: Execute the build with network isolation + name: hermetic + type: string + - default: "" + description: Build dependencies to be prefetched by Cachi2 + name: prefetch-input + type: string + - default: "" + description: Image tag expiration time, time values could be something like + 1h, 2d, 3w for hours, days, and weeks, respectively. + name: image-expires-after + - default: "false" + description: Build a source image. + name: build-source-image + type: string + - default: "false" + description: Add built image into an OCI image index + name: build-image-index + type: string + - default: [] + description: Array of --build-arg values ("arg=value" strings) for buildah + name: build-args + type: array + - default: "" + description: Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file + name: build-args-file + type: string + results: + - description: "" + name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + - description: "" + name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - description: "" + name: CHAINS-GIT_URL + value: $(tasks.clone-repository.results.url) + - description: "" + name: CHAINS-GIT_COMMIT + value: $(tasks.clone-repository.results.commit) + tasks: + - name: init + params: + - name: image-url + value: $(params.output-image) + - name: rebuild + value: $(params.rebuild) + - name: skip-checks + value: $(params.skip-checks) + taskRef: + params: + - name: name + value: init + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:f239f38bba3a8351c8cb0980fde8e2ee477ded7200178b0f45175e4006ff1dca + - name: kind + value: task + resolver: bundles + - name: clone-repository + params: + - name: url + value: $(params.git-url) + - name: revision + value: $(params.revision) + - name: ociStorage + value: $(params.output-image).git + - name: ociArtifactExpiresAfter + value: $(params.image-expires-after) + runAfter: + - init + taskRef: + params: + - name: name + value: git-clone-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:d1e63ec00bed1c9f0f571fa76b4da570be49a7c255c610544a461495230ba1b1 + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + workspaces: + - name: basic-auth + workspace: git-auth + - name: prefetch-dependencies + params: + - name: input + value: $(params.prefetch-input) + - name: SOURCE_ARTIFACT + value: $(tasks.clone-repository.results.SOURCE_ARTIFACT) + - name: ociStorage + value: $(params.output-image).prefetch + - name: ociArtifactExpiresAfter + value: $(params.image-expires-after) + runAfter: + - clone-repository + taskRef: + params: + - name: name + value: prefetch-dependencies-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.1@sha256:3c11f5de6a0281bf93857f0c85bbbdfeda4cc118337da273fef0c138bda5eebb + - name: kind + value: task + resolver: bundles + workspaces: + - name: git-basic-auth + workspace: git-auth + - name: netrc + workspace: netrc + - name: build-container + params: + - name: IMAGE + value: $(params.output-image) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) + - name: HERMETIC + value: $(params.hermetic) + - name: PREFETCH_INPUT + value: $(params.prefetch-input) + - name: IMAGE_EXPIRES_AFTER + value: $(params.image-expires-after) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: BUILD_ARGS + value: + - $(params.build-args[*]) + - name: BUILD_ARGS_FILE + value: $(params.build-args-file) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - prefetch-dependencies + taskRef: + params: + - name: name + value: buildah-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-buildah-oci-ta:0.2@sha256:54da71db2ae94d02c0e2662db11d399880e240bcc6a2ae1b3c8e2e9af9298415 + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - name: build-image-index + params: + - name: IMAGE + value: $(params.output-image) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: IMAGE_EXPIRES_AFTER + value: $(params.image-expires-after) + - name: ALWAYS_BUILD_INDEX + value: $(params.build-image-index) + - name: IMAGES + value: + - $(tasks.build-container.results.IMAGE_URL)@$(tasks.build-container.results.IMAGE_DIGEST) + runAfter: + - build-container + taskRef: + params: + - name: name + value: build-image-index + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:715fa1fd7a8ebe0da552730e564eef340717b6346f1690ebe06685a252fe88bc + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - name: build-source-image + params: + - name: BINARY_IMAGE + value: $(params.output-image) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: source-build-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.1@sha256:ac1f140a8906754f534f647b6b9d76c570e680d8cb8b8f3496f0e0d0fb133351 + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - input: $(params.build-source-image) + operator: in + values: + - "true" + - name: deprecated-base-image-check + params: + - name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: deprecated-image-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.4@sha256:443ffa897ee35e416a0bfd39721c68cbf88cfa5c74c843c5183218d0cd586e82 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: clair-scan + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: clair-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:90e371fe7ec2288259a906bc1fd49c53b8b97a0b0b02da0893fb65e3be2a5801 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: ecosystem-cert-preflight-checks + params: + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: ecosystem-cert-preflight-checks + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.1@sha256:5131cce0f93d0b728c7bcc0d6cee4c61d4c9f67c6d619c627e41e3c9775b497d + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: sast-snyk-check + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: sast-snyk-check-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.3@sha256:699cfad1caaa4060f0a6de5d5fb376bf2eb90967d89ec4ffef328fd358ac966d + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: clamav-scan + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: clamav-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.1@sha256:1981b5aa330a4d59f59d760e54a36ebd596948abf6a36e45e103d4fd82ecbcf3 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: select-tags + description: select the tags used for apply-tags + runAfter: + - build-image-index + params: + - name: TARGET_BRANCH + value: "{{target_branch}}" + taskSpec: + params: + - name: TARGET_BRANCH + type: string + results: + - name: TAGS + type: array + default: [] + steps: + - name: select-tags-based-on-branch + image: registry.access.redhat.com/ubi9/ubi-minimal:9.4-1194@sha256:73f7dcacb460dad137a58f24668470a5a2e47378838a0190eef0ab532c6e8998 + script: | + #!/bin/bash -ex + + case $(params.TARGET_BRANCH) in + "main") echo '["latest"]' > $(results.TAGS.path) ;; + "development") echo '["development"]' > $(results.TAGS.path) ;; + *) echo '[]' > $(results.TAGS.path) ;; + esac + + echo "selected addtional tags:" + tee < $(results.TAGS.path) + - name: apply-tags + params: + - name: IMAGE + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: ADDITIONAL_TAGS + value: $(tasks.select-tags.results.TAGS) + runAfter: + - select-tags + taskRef: + params: + - name: name + value: apply-tags + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.1@sha256:87fd7fc0e937aad1a8db9b6e377d7e444f53394dafde512d68adbea6966a4702 + - name: kind + value: task + resolver: bundles + - name: push-dockerfile + params: + - name: IMAGE + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: push-dockerfile-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:eee2eb7b5ce2e55dde37114fefe842080c8a8e443dcc2ccf324cfb22b0453db4 + - name: kind + value: task + resolver: bundles + - name: rpms-signature-scan + params: + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: rpms-signature-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:0c9667fba291af05997397a32e5e938ccaa46e93a2e14bad228e64a6427c5545 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + workspaces: + - name: git-auth + optional: true + - name: netrc + optional: true + taskRunTemplate: {} + workspaces: + - name: git-auth + secret: + secretName: '{{ git_auth_secret }}' +status: {} diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..3b338ced --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,2 @@ +## all changes will assign members from below team as reviewers +* @RedHatProductSecurity/rapidast-admin diff --git a/README.md b/README.md index d362a68b..f212106a 100644 --- a/README.md +++ b/README.md @@ -1,48 +1,61 @@ # RapiDAST -RapiDAST(Rapid DAST) is an open-source security testing tool that automates the process of DAST(Dynamic Application Security Testing) security testing and streamlines the integration of security into your development workflow. It is designed to help Developers and/or QA engineers rapidly and effectively identify low-hanging security vulnerabilities in your applications, ideally in CI/CD pipelines. This will help your organization to move towards DevSecOps with the shift-left approach. +![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/redhatproductsecurity/rapidast/run-tests.yml?branch=development&logo=github&label=CI) ![GitHub License](https://img.shields.io/github/license/redhatproductsecurity/rapidast) -RapiDAST provides values as follows: +RapiDAST (Rapid DAST) is an open-source security testing tool that automates DAST ([Dynamic Application Security Testing](https://owasp.org/www-project-devsecops-guideline/latest/02b-Dynamic-Application-Security-Testing)) and streamlines the integration of security testing into development workflows. It is designed to help Developers and/or QA engineers rapidly and effectively identify low-hanging security vulnerabilities in your applications, ideally in CI/CD pipelines. RapiDAST is for organizations implementing DevSecOps with a shift-left approach. -- Ease of use and simple automation of HTTP/API scanning, fully working in CLI with a yaml configuration, taking advantage of [ZAP](https://www.zaproxy.org/) -- Ability to run automated DAST scanning to suit various users' needs via custom container images +RapiDAST provides: + +- Simplified HTTP/API security scanning using [ZAP] +- Command-line execution with yaml configuration, suitable for integration in CI/CD pipelines +- Ability to run automated DAST scanning with pre-built or custom container images - HTML, JSON and XML report generation - Integration with reporting solutions such as [OWASP DefectDojo](https://owasp.org/www-project-defectdojo/) -# Getting Started +RapiDAST is used for testing applications, and should not be used on production systems. -## Prerequisites +## Quickstart -- `python` >= 3.6.8 (3.7 for MacOS/Darwin) -- `podman` >= 3.0.1 - + required when you want to run scanners from their container images, rather than installing them to your host. -- See `requirements.txt` for a list of required python libraries +Quickly setup RapiDAST to scan a target application. See [Workflow](#workflow) for more information. + +1. Create a minimal config file for the target application, see [Configuration](#configuration) section for details +2. Run RapiDAST with the config file, either in a container or from source code ### OS Support -Linux and MacOS`*` are supported. +Linux and MacOS are both supported, however running RapiDAST in a container is currently only supported on Linux. See [MacOS Configuration](#macos) section for more details. -#### Note regarding MacOS and ZAP +### Run in container (Linux only) -RapiDAST supports executing ZAP on the MacOS host directly only. +Run the pre-built [rapidast container image](https://quay.io/repository/redhatproductsecurity/rapidast), which includes scanners like [ZAP]. Not compatible with config files using `general.container.type` set to `podman`. -To run RapiDAST on MacOS(See the Configuration section below for more details on configuration): +**Prerequisites** -* Set `general.container.type: "none"` or `scanners.zap.container.type: "none"` in the configuration. -* Configure `scanners.zap.container.parameters.executable` to the installation path of the `zap.sh` command, because it is not available in the PATH. Usually, its path is `/Applications/ZAP.app/Contents/Java/zap.sh` on MacOS. +- `docker` / `podman` (>= v3.0.1) -Example: - -```yaml -scanners: - zap: - container: - type: none - parameters: - executable: "/Applications/ZAP.app/Contents/Java/zap.sh" +**Run** +```sh +$ podman run -v ./config.yaml:/opt/rapidast/config/config.yaml:Z quay.io/redhatproductsecurity/rapidast:latest ./rapidast.py ``` -## Installation +**Note** + +* Sample config is very minimal and has no [Authentication](#authentication) enabled +* The `:Z` option is only necessary on RHEL/CentOS/Fedora systems with SELinux enabled +* To retrieve scan results, add a volume mount like `-v ./results/:/opt/rapidast/results/:Z`. The permissions of the `./results/` directory may need to be modified first with a command like `chmod o+w ./results/` to be writeable by the rapidast user in the container. + +### Run from source + +Install dependencies and run RapiDAST directly on a host machine. Unless using the config setting of `general.container.type: podman`, scanners like [ZAP] are expected to be installed on the host system. + +**Prerequisites** + +- `python` >= 3.6.8 (3.7 for MacOS/Darwin) +- `podman` >= 3.0.1 + + required when you want to run scanners from their container images, rather than installing them to your host. +- See `requirements.txt` for a list of required python libraries + +**Setup** Clone the repository. ``` @@ -62,15 +75,24 @@ Install the project requirements. (venv) $ pip install -r requirements.txt ``` -# Usage +**Run** + +Run RapiDAST script: +``` +$ ./rapidast.py --config +``` + +**Note** +* Example minimum config expects scanners like [ZAP] to be available on the host, and will fail if not found. See [Execution Environments](#choosing-the-execution-environment) section for more info +* Results will be written to the `./results/` directory ## Workflow This section summarize the basic workflow as follows: -1. Create a configuration file for testing the application. See the 'configuration' section below for more information. -2. Optionally, an environment file may be added, e.g., to separate the secrets from the configuration file. -3. Run RapiDAST and get the results. - - First run with passive scanning only which can save your time at the initial scanning phase. There are various situations that can cause an issue, not only from scanning set up but also from your application or test environment. Active scanning takes a long time in general. +1. Create a configuration file for testing the application. See the [configuration](#configuration) section below for more information. + - Optionally, an [environment file](#advanced-configuration) may be added, e.g., to separate the secrets from the configuration file. +2. Run RapiDAST and get the results. + - First run with passive scanning only, which can save time at the initial scanning phase. There are various situations that can cause an issue, not only from scanning set up but also from your application or test environment. Active scanning takes a long time in general. - Once passive Scanning has run successfully, run another scan with active scanning enabled in the configuration file. ## Configuration @@ -82,16 +104,36 @@ The configuration file is presented as YAML, and contains several main entries: + Each scanner can override an entry from `general` by creating an entry with the same name - `scanners` : list of scanners, and their configuration -See templates in the `config/` directory for examples and ideas. +See templates in the [config](./config/) directory for examples and ideas. +- `config-template-zap-tiny.yaml` : describes a bare minimum configuration, without authentication options. - `config-template-zap-simple.yaml` : describes a generic/minimal use of the ZAP scanner (i.e.: the minimum set of option to get a ZAP scan from RapiDAST) - `config-template-zap-mac.yaml` : describes a minimal use of the ZAP scanner on a Apple Mac environment - `config-template-zap-long.yaml` : describes a more extensive use of ZAP (all configuration options are presented) - `config-template-multi-scan.yaml` : describes how to combine multiple scanners in a single configuration - `config-template-generic-scan.yaml` : describes the use of the generic scanner +### Basic Example + +Example bare minimum [config file](./config/config-template-zap-tiny.yaml), without any [Authentication](#authentication) options, and passive scanning only: + +```yaml +config: + configVersion: 5 + +application: + shortName: "example-1.0" + url: "https://example.com" # root URL of the application + +scanners: + zap: + apiScan: + apis: + apiUrl: "https://example.com/api/v1/swagger.json" # URL to application openAPI spec +``` + ### Authentication -Authentication is configured in the `general` entry, as it can be applied to multiple scanning options. Currently, Authentication is applied to ZAP scanning only. In the long term it may be applied to other scanning configurations. +Authentication is configured in the `general` entry, as it can be applied to multiple scanning options. Currently, Authentication is applied to [ZAP] scanning only. In the long term it may be applied to other scanning configurations. Supported options: @@ -137,6 +179,25 @@ This method uses firefox in the background to load a login page and fill in user * `loginPageUrl`: the URL to the login page (either the full URL, or relative to the `application.url` value) * `verifyUrl`: a URL that "proves" the user is authenticated (either the full URL, or relative to the `application.url` value). This URL must return a success if the user is correctly authenticated, and an error otherwise. +### MacOS + +RapiDAST supports executing scanners like [ZAP] on the MacOS host directly only. + +To run RapiDAST on MacOS(See the Configuration section below for more details on configuration): + +* Set `general.container.type: "none"` or `scanners.zap.container.type: "none"` in the configuration. +* Configure `scanners.zap.container.parameters.executable` to the installation path of the `zap.sh` command, because it is not available in the PATH. Usually, its path is `/Applications/ZAP.app/Contents/Java/zap.sh` on MacOS. + +Example: + +```yaml +scanners: + zap: + container: + type: none + parameters: + executable: "/Applications/ZAP.app/Contents/Java/zap.sh" +``` ### Advanced configuration @@ -147,6 +208,7 @@ To avoid this, RapiDAST proposes 2 ways to provide a value for a given configura - Create an entry in the configuration file (this is the usual method) - Create an entry in the configuration file pointing to the environment variable that actually contains the data, by appending `_from_var` to the entry name: `general.authentication.parameters.rtoken_from_var=RTOKEN` (in this example, the token value is provided by the `$RTOKEN` environment variable) + #### Running several instance of a scanner It is possible to run a scanner several times with different configurations. This is done by adding a different identifier to each scan, by appending `_` to the scanner name. @@ -271,7 +333,7 @@ See https://documentation.defectdojo.com/integrations/importing/#api for more in Once you have created a configuration file, you can run a scan with it. ```sh -$ rapidast.py --config "" +$ ./rapidast.py --config "" ``` There are more options. @@ -293,18 +355,17 @@ options: ### Choosing the execution environment -Set `general.container.type` to select a runtime (default: 'none') +Set `general.container.type` to select an environment (default: `none`) + ++ `none` (default): + - Run a RapiDAST scan with scanners that are installed on the same host OR run RapiDAST in a container (scanners are to be installed in the same container image) + - __Warning__: without a container layer, RapiDAST may have to modify the host's file system, such as the tools configuration to fit its needs. For example: the ZAP plugin has to copy the policy file used in ZAP's user config directory (`~/.ZAP`) -Accepted values are as follows: + `podman`: - - Set when you want to run scanners with their container images and use `podman` to run them. - - RapiDAST must not run inside a container. + - Run scanners as separate containers using `podman` + - RapiDAST must not run inside a container - Select the image to load from `scanner..container.image` (sensible default are provided for each scanner) -+ `none`: - - Set when you want to run a RapiDAST scan with scanners that are installed on the host or you want to build the RapiDAST container image(scanners are to be built in the same image) and run a scan with it. - - __Warning__: without a container layer, RapiDAST may have to modify the host's file system, such as the tools configuration to fit its needs. For example: the ZAP plugin has to copy the policy file used in ZAP's user config directory (`~/.ZAP`) - It is also possible to set the container type for each scanner differently by setting `scanners..container.type` under a certain scanner configuration. Then the scanner will run from its image, regardless of the `general.container.type` value. @@ -388,9 +449,9 @@ Example: `podman pod create --userns=keep-id:uid=1000,gid=1000 myApp_Pod` This is useful for debugging. Set `scanners.zap.miscOptions.enableUI: True` (default: False). Then, the ZAP desktop will run with GUI on your host and show the progress of scanning. -+ Disable add-on updates: ++ Enable add-on updates: -Set `scanners.zap.miscOptions.updateAddons: False` (default: True). Then, ZAP will update its addons first and run the scan. +Set `scanners.zap.miscOptions.updateAddons: True` (default: False). ZAP will first update its addons and then run the scan. + Install additional addons: @@ -653,3 +714,5 @@ If you encounter any issues or have questions, please [open an issue](https://gi Contribution to the project is more than welcome. See [CONTRIBUTING.md](./CONTRIBUTING.md) + +[ZAP]: #ZAP diff --git a/config/config-template-nessus.yaml b/config/config-template-nessus.yaml new file mode 100644 index 00000000..39d5997f --- /dev/null +++ b/config/config-template-nessus.yaml @@ -0,0 +1,42 @@ +config: + # WARNING: `configVersion` indicates the schema version of the config file. + # This value tells RapiDAST what schema should be used to read this configuration. + # Therefore you should only change it if you update the configuration to a newer schema + configVersion: 5 + + # all the results of all scanners will be stored under that location + # base_results_dir: "./results" + +# `application` contains data related to the application, not to the scans. +application: + shortName: "nessus-test-1.0" + # url: "" # XXX unused for nessus + +# `general` is a section that will be applied to all scanners. +# Any scanner can override a value by creating an entry of the same name in their own configuration +general: + + # XXX auth section not yet used by nessus scanner + # remove `authentication` entirely for unauthenticated connection + # authentication: + # type: "oauth2_rtoken" + # parameters: + # client_id: "cloud-services" + # token_endpoint: "" + # # rtoken_from_var: "RTOKEN" # referring to a env defined in general.environ.envFile + # #preauth: false # set to true to pregenerate a token, and stick to it (no refresh) + +# `scanners' is a section that configures scanning options +scanners: + nessus: + server: + url: https://nessus-example.com/ # URL of Nessus instance + username: foo # OR username_from_var: NESSUS_USER + password: bar # OR password_from_var: NESSUS_PASSWORD + scan: + name: test-scan # name of new scan to create + folder: test-folder # name of folder in to contain scan + policy: "py-test" # policy used for scan + # timeout: 600 # timeout in seconds to complete scan + targets: + - 127.0.0.1 diff --git a/config/config-template-zap-long.yaml b/config/config-template-zap-long.yaml index 79051f56..b4773952 100644 --- a/config/config-template-zap-long.yaml +++ b/config/config-template-zap-long.yaml @@ -188,7 +188,7 @@ scanners: # EnableUI (default: false), requires a compatible runtime (e.g.: `type: none`) enableUI: False - # Defaults to True, set False to prevent auto update of ZAP plugins + # Defaults to False, set True to force auto update of ZAP plugins updateAddons: True # List (comma-separated string or list) of additional addons to install diff --git a/config/config-template-zap-tiny.yaml b/config/config-template-zap-tiny.yaml new file mode 100644 index 00000000..ac8ff646 --- /dev/null +++ b/config/config-template-zap-tiny.yaml @@ -0,0 +1,12 @@ +config: + configVersion: 5 + +application: + shortName: "example-1.0" + url: "https://example.com" # root URL of the application + +scanners: + zap: + apiScan: + apis: + apiUrl: "https://example.com/api/v1/swagger.json" # URL to application openAPI spec diff --git a/e2e-tests/conftest.py b/e2e-tests/conftest.py new file mode 100644 index 00000000..0595cc46 --- /dev/null +++ b/e2e-tests/conftest.py @@ -0,0 +1,166 @@ +import logging +import os +import shutil +import tempfile +import time +from functools import partial + +import certifi +from kubernetes import client +from kubernetes import config +from kubernetes import utils +from kubernetes import watch +from kubernetes.client.rest import ApiException + +NAMESPACE = os.getenv("RAPIDAST_NAMESPACE", "") # e.g. rapidast--pipeline +SERVICEACCOUNT = os.getenv("RAPIDAST_SERVICEACCOUNT", "pipeline") # name of ServiceAccount used in rapidast pod +RAPIDAST_IMAGE = os.getenv("RAPIDAST_IMAGE", "quay.io/redhatproductsecurity/rapidast:development") +# delete resources created by tests +RAPIDAST_CLEANUP = os.getenv("RAPIDAST_CLEANUP", "True").lower() in ("true", "1", "t", "y", "yes") + +MANIFESTS = "e2e-tests/manifests" + + +# monkeypatch certifi so that internal CAs are trusted +def where(): + return os.getenv("REQUESTS_CA_BUNDLE", "/etc/pki/tls/certs/ca-bundle.crt") + + +certifi.where = where + + +def wait_until_ready(**kwargs): + corev1 = client.CoreV1Api() + timeout = kwargs.pop("timeout", 120) + + start_time = time.time() + + while time.time() - start_time < timeout: + time.sleep(2) + try: + pods = corev1.list_namespaced_pod(namespace=NAMESPACE, **kwargs) + except client.ApiException as e: + logging.error(f"Error checking pod status: {e}") + return False + + if len(pods.items) != 1: + raise RuntimeError(f"Unexpected number of pods {len(pods.items)} matching: {kwargs}") + pod = pods.items[0] + + # Check if pod is ready by looking at conditions + if pod.status.conditions: + for condition in pod.status.conditions: + if condition.type == "Ready": + logging.info(f"{pod.metadata.name} Ready={condition.status}") + if condition.status == "True": + return True + return False + + +# simulates: $ oc logs -f | tee +def tee_log(pod_name: str, filename: str): + corev1 = client.CoreV1Api() + w = watch.Watch() + with open(filename, "w", encoding="utf-8") as f: + for e in w.stream(corev1.read_namespaced_pod_log, name=pod_name, namespace=NAMESPACE): + if not isinstance(e, str): + continue # Watch.stream() can yield non-string types + f.write(e + "\n") + print(e) + + +def render_manifests(input_dir, output_dir): + shutil.copytree(input_dir, output_dir, dirs_exist_ok=True) + logging.info(f"rendering manifests in {output_dir}") + logging.info(f"using serviceaccount {SERVICEACCOUNT}") + # XXX should probably replace this with something like kustomize + for filepath in os.scandir(output_dir): + with open(filepath, "r", encoding="utf-8") as f: + contents = f.read() + contents = contents.replace("${IMAGE}", RAPIDAST_IMAGE) + contents = contents.replace("${SERVICEACCOUNT}", SERVICEACCOUNT) + with open(filepath, "w", encoding="utf-8") as f: + f.write(contents) + + +def setup_namespace(): + global NAMESPACE # pylint: disable=W0603 + # only try to create a namespace if env is set + if NAMESPACE == "": + NAMESPACE = get_current_namespace() + else: + create_namespace(NAMESPACE) + logging.info(f"using namespace '{NAMESPACE}'") + + +def get_current_namespace() -> str: + try: + # Load the kubeconfig + config.load_config() + + # Get the kube config object + _, active_context = config.list_kube_config_contexts() + + # Return the namespace from current context + if active_context and "namespace" in active_context["context"]: + return active_context["context"]["namespace"] + return "default" + + except config.config_exception.ConfigException: + # If running inside a pod + try: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r", encoding="utf-8") as f: + return f.read().strip() + except FileNotFoundError: + return "default" + + +def create_namespace(namespace_name: str): + config.load_config() + corev1 = client.CoreV1Api() + try: + corev1.read_namespace(namespace_name) + logging.info(f"namespace {namespace_name} already exists") + except ApiException as e: + if e.status == 404: + logging.info(f"creating namespace {namespace_name}") + namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace_name)) + corev1.create_namespace(namespace) + else: + raise e + except Exception as e: # pylint: disable=W0718 + logging.error(f"error reading namespace {namespace_name}: {e}") + + +def new_kclient(): + config.load_config() + return client.ApiClient() + + +class TestBase: + _teardowns = [] + + @classmethod + def setup_class(cls): + cls.tempdir = tempfile.mkdtemp() + cls.kclient = new_kclient() + render_manifests(MANIFESTS, cls.tempdir) + logging.info(f"testing with image: {RAPIDAST_IMAGE}") + setup_namespace() + + @classmethod + def teardown_class(cls): + # TODO teardown should really occur after each test, so the the + # resource count does not grown until quota reached + if RAPIDAST_CLEANUP: + for func in cls._teardowns: + logging.debug(f"calling {func}") + func() + # XXX oobtukbe does not clean up after itself + os.system(f"kubectl delete Task/vulnerable -n {NAMESPACE}") + + def create_from_yaml(self, path: str): + # delete resources in teardown method later + self._teardowns.append(partial(os.system, f"kubectl delete -f {path} -n {NAMESPACE}")) + o = utils.create_from_yaml(self.kclient, path, namespace=NAMESPACE, verbose=True) + logging.debug(o) diff --git a/e2e-tests/manifests/nessus-deployment.yaml b/e2e-tests/manifests/nessus-deployment.yaml new file mode 100644 index 00000000..5168cdca --- /dev/null +++ b/e2e-tests/manifests/nessus-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nessus + labels: + app: nessus +spec: + replicas: 1 + selector: + matchLabels: + app: nessus + template: + metadata: + labels: + app: nessus + spec: + imagePullSecrets: + - name: sfowler-nessus-pull-secret + containers: + - name: nessus + command: + - /opt/nessus/sbin/nessus-service + - --no-root + env: + - name: AUTO_UPDATE + value: "no" + image: quay.io/sfowler/nessus@sha256:5881d6928e52d6c536634aeba0bbb7d5aac2b53e77c17f725e4e5aff0054f772 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8834 + readinessProbe: + exec: + command: + - /bin/bash + - -c + - | + #!/bin/bash + + # curl -ks https://0.0.0.0:8834/server/status | python3 -c 'import sys, json; json.load(sys.stdin)["code"] == 200 or sys.exit(1)' + curl -ks https://0.0.0.0:8834/server/status | python3 -c 'import sys, json; json.load(sys.stdin)["detailed_status"]["login_status"] == "allow" or sys.exit(1)' + initialDelaySeconds: 20 + periodSeconds: 10 + failureThreshold: 32 + resources: + limits: + cpu: 1500m + memory: 4Gi diff --git a/e2e-tests/manifests/nessus-service.yaml b/e2e-tests/manifests/nessus-service.yaml new file mode 100644 index 00000000..36116f21 --- /dev/null +++ b/e2e-tests/manifests/nessus-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: nessus + name: nessus +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - port: 8834 + protocol: TCP + targetPort: 8834 + selector: + app: nessus + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/manifests/rapidast-nessus-configmap.yaml b/e2e-tests/manifests/rapidast-nessus-configmap.yaml new file mode 100644 index 00000000..fc4d3a8b --- /dev/null +++ b/e2e-tests/manifests/rapidast-nessus-configmap.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +data: + config.yaml: |+ + config: + # WARNING: `configVersion` indicates the schema version of the config file. + # This value tells RapiDAST what schema should be used to read this configuration. + # Therefore you should only change it if you update the configuration to a newer schema + configVersion: 5 + + # all the results of all scanners will be stored under that location + # base_results_dir: "./results" + + # `application` contains data related to the application, not to the scans. + application: + shortName: "nessus-test-1.0" + # url: "" # XXX unused for nessus + + # `general` is a section that will be applied to all scanners. + # Any scanner can override a value by creating an entry of the same name in their own configuration + general: + # container: + # type: podman + + # remove `authentication` entirely for unauthenticated connection + authentication: + type: "oauth2_rtoken" + parameters: + client_id: "cloud-services" + token_endpoint: "" + # rtoken_from_var: "RTOKEN" # referring to a env defined in general.environ.envFile + #preauth: false # set to true to pregenerate a token, and stick to it (no refresh) + + # `scanners' is a section that configures scanning options + scanners: + nessus_foobar: + server: + # url: https://10.0.108.143:8834/ # URL of Nessus instance + url: https://nessus:8834/ # URL of Nessus instance + username_from_var: NESSUS_USER # Nessus credentials + password_from_var: NESSUS_PASSWORD + scan: + name: nessus-test # name of new scan to create + folder: nessus-tests # name of folder in to contain scan + policy: "discovery" # policy used for scan + timeout: 600 # timeout limit in seconds to complete scan + targets: + - 127.0.0.1 + +kind: ConfigMap +metadata: + name: rapidast-nessus diff --git a/e2e-tests/manifests/rapidast-nessus-pod.yaml b/e2e-tests/manifests/rapidast-nessus-pod.yaml new file mode 100644 index 00000000..5c928b1d --- /dev/null +++ b/e2e-tests/manifests/rapidast-nessus-pod.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: rapidast-nessus +spec: + containers: + - command: + - bash + - -c + - ./rapidast.py + env: + - name: HOME + value: /opt/rapidast + - name: NESSUS_USER + value: admin + - name: NESSUS_PASSWORD + value: foobar + image: ${IMAGE} # quay.io/redhatproductsecurity/rapidast:latest + imagePullPolicy: Always + name: rapidast-nessus + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 125m + memory: 256Mi + volumeMounts: + - name: config-volume + mountPath: /opt/rapidast/config + securityContext: + supplementalGroups: [1000] # "dast" group, necessary to write to /opt/rapidast/results if no PVC mounted + volumes: + - name: config-volume + configMap: + name: rapidast-nessus + restartPolicy: Never diff --git a/e2e-tests/manifests/rapidast-oobtkube-configmap.yaml b/e2e-tests/manifests/rapidast-oobtkube-configmap.yaml new file mode 100644 index 00000000..0648aecf --- /dev/null +++ b/e2e-tests/manifests/rapidast-oobtkube-configmap.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +data: + config.yaml: |+ + config: + configVersion: 5 + + # `application` contains data related to the application, not to the scans. + application: + shortName: "oobttest" + + general: + container: + # currently supported: `podman` and `none` + type: "none" + + scanners: + generic_1: + # results: + # An absolute path to file or directory where results are stored on the host. + # if it is "*stdout" or unspecified, the command's standard output will be selected + results: "/tmp/oobtkube.sarif.json" # if None or "*stdout", the command's standard output is selected + # toolDir: scanners/generic/tools + inline: "python3 oobtkube.py --log-level debug -d 60 -p 6000 -i rapidast-oobtkube -f /opt/rapidast/config/cr_example.yaml | tee /tmp/oobtkube.sarif.json" + # XXX using tekton Task because it has: + # - a resource type that has a .spec field (required by oobtkube script) + # - fields in .spec that are arbitrary string (.spec.description) + # this could be replaced later with something more common like a ConfigMap, + # once oobtkube can test non .spec values + cr_example.yaml: |+ + apiVersion: tekton.dev/v1 + kind: Task + metadata: + name: vulnerable + spec: + description: foobar + params: + - name: foo + type: string + steps: + - image: foo + name: foo + script: foo + +kind: ConfigMap +metadata: + name: rapidast-oobtkube diff --git a/e2e-tests/manifests/rapidast-oobtkube-pod.yaml b/e2e-tests/manifests/rapidast-oobtkube-pod.yaml new file mode 100644 index 00000000..bd344915 --- /dev/null +++ b/e2e-tests/manifests/rapidast-oobtkube-pod.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + name: rapidast-oobtkube + labels: + app: rapidast-oobtkube +spec: + containers: + - command: + - bash + - -c + - ./rapidast.py + env: + - name: HOME + value: /opt/rapidast + image: ${IMAGE} # quay.io/redhatproductsecurity/rapidast:latest + imagePullPolicy: Always + name: rapidast-oobtkube + resources: + limits: + cpu: 0.5 + memory: 1Gi + volumeMounts: + - name: config-volume + mountPath: /opt/rapidast/config + serviceAccountName: ${SERVICEACCOUNT} # oobtkube needs perms to query API server + securityContext: + supplementalGroups: [1000] # "dast" group, necessary to write to /opt/rapidast/results if no PVC mounted + volumes: + - name: config-volume + configMap: + name: rapidast-oobtkube + # - name: results-volume + # persistentVolumeClaim: + # claimName: rapidast-results + restartPolicy: Never diff --git a/e2e-tests/manifests/rapidast-oobtkube-service.yaml b/e2e-tests/manifests/rapidast-oobtkube-service.yaml new file mode 100644 index 00000000..5cf4ccda --- /dev/null +++ b/e2e-tests/manifests/rapidast-oobtkube-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: rapidast-oobtkube + name: rapidast-oobtkube +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - port: 6000 + protocol: TCP + targetPort: 6000 + selector: + app: rapidast-oobtkube + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/manifests/rapidast-trivy-configmap.yaml b/e2e-tests/manifests/rapidast-trivy-configmap.yaml new file mode 100644 index 00000000..f0fcdca3 --- /dev/null +++ b/e2e-tests/manifests/rapidast-trivy-configmap.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +data: + config.yaml: |+ + config: + # WARNING: `configVersion` indicates the schema version of the config file. + # This value tells RapiDAST what schema should be used to read this configuration. + # Therefore you should only change it if you update the configuration to a newer schema + # It is intended to keep backward compatibility (newer RapiDAST running an older config) + configVersion: 5 + + # `application` contains data related to the application, not to the scans. + application: + shortName: "my-cluster" + + # `scanners' is a section that configures scanning options + scanners: + generic_trivy: + # results: + # An absolute path to file or directory where results are stored on the host. + # if it is "*stdout" or unspecified, the command's standard output will be selected + # When container.type is 'podman', this needs to be used along with the container.volumes configuration below + # If the result needs to be sent to DefectDojo, this must be a SARIF format file + #results: "/path/to/results" + + # Example: scan a k8s cluster for misconfiguration issue + # - See https://aquasecurity.github.io/trivy/v0.49/docs/target/kubernetes/ for more information on 'trivy k8s' scan + # - scanners/generic/tools/convert_trivy_k8s_to_sarif.py converts the Trivy json result to the SARIF format + # 'inline' is used when container.type is not 'podman' + # 'toolDir' specifies the default directory where inline scripts are located + #toolDir: scanners/generic/tools + inline: "trivy k8s -n $(cat /run/secrets/kubernetes.io/serviceaccount/namespace) pod --scanners=misconfig --report all --format json --skip-policy-update | convert_trivy_k8s_to_sarif.py" + + container: + parameters: + # Optional: list of expected return codes, anything else will be considered as an error. by default: [0] + validReturns: [ 0 ] + +kind: ConfigMap +metadata: + name: rapidast-trivy diff --git a/e2e-tests/manifests/rapidast-trivy-pod.yaml b/e2e-tests/manifests/rapidast-trivy-pod.yaml new file mode 100644 index 00000000..e82f4fdf --- /dev/null +++ b/e2e-tests/manifests/rapidast-trivy-pod.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + name: rapidast-trivy +spec: + containers: + - command: + - bash + - -c + - ./rapidast.py + env: + - name: HOME + value: /opt/rapidast + image: ${IMAGE} # quay.io/redhatproductsecurity/rapidast:latest + imagePullPolicy: Always + name: rapidast-trivy + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 125m + memory: 256Mi + volumeMounts: + - name: config-volume + mountPath: /opt/rapidast/config + serviceAccountName: ${SERVICEACCOUNT} # trivy needs perms to query API server + securityContext: + supplementalGroups: [1000] # "dast" group, necessary to write to /opt/rapidast/results if no PVC mounted + volumes: + - name: config-volume + configMap: + name: rapidast-trivy + # - name: results-volume + # persistentVolumeClaim: + # claimName: rapidast-results + restartPolicy: Never diff --git a/e2e-tests/manifests/rapidast-vapi-configmap.yaml b/e2e-tests/manifests/rapidast-vapi-configmap.yaml new file mode 100644 index 00000000..be5fe8b5 --- /dev/null +++ b/e2e-tests/manifests/rapidast-vapi-configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +data: + config.yaml: |+ + config: + configVersion: 5 + + # `application` contains data related to the application, not to the scans. + application: + shortName: "v5-none-release-test" + url: "http://vapi:5000" + + scanners: + zap: + # define a scan through the ZAP scanner + apiScan: + apis: + apiUrl: "http://vapi:5000/docs/openapi.json" + + passiveScan: + # optional list of passive rules to disable + disabledRules: "2,10015,10027,10096,10024,10098,10023,10105" + + activeScan: + policy: API-scan-minimal + + container: + parameters: + executable: "zap.sh" + + miscOptions: + # enableUI (default: false), requires a compatible runtime (e.g.: flatpak or no containment) + #enableUI: True + # Defaults to False, set True to force auto update of ZAP plugins + updateAddons: False + # additionalAddons: ascanrulesBeta + # If set to True and authentication is oauth2_rtoken and api.apiUrl is set, download the API outside of ZAP + oauth2OpenapiManualDownload: False + +kind: ConfigMap +metadata: + name: rapidast-vapi diff --git a/e2e-tests/manifests/rapidast-vapi-pod.yaml b/e2e-tests/manifests/rapidast-vapi-pod.yaml new file mode 100644 index 00000000..a5d7a957 --- /dev/null +++ b/e2e-tests/manifests/rapidast-vapi-pod.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + name: rapidast-vapi +spec: + containers: + - command: + - bash + - -c + - "./rapidast.py && cat results/*/*/zap/zap-report.json" # ugly, but saves needing a PVC to retrieve .json file after execution + # - "./rapidast.py --log-level debug && sleep infinity" # keep the pod alive so we can exec into it to parse reports + env: + - name: HOME + value: /opt/rapidast + image: ${IMAGE} # quay.io/redhatproductsecurity/rapidast:latest + imagePullPolicy: Always + name: rapidast-vapi + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 250m + memory: 512Mi + volumeMounts: + - name: config-volume + mountPath: /opt/rapidast/config + # - name: results-volume + # mountPath: /opt/rapidast/results + securityContext: + supplementalGroups: [1000] # "dast" group, necessary to write to /opt/rapidast/results if no PVC mounted + volumes: + - name: config-volume + configMap: + name: rapidast-vapi + # - name: results-volume + # persistentVolumeClaim: + # claimName: rapidast-results + restartPolicy: Never +# --- +# apiVersion: v1 +# kind: PersistentVolumeClaim +# metadata: +# name: rapidast-results +# spec: +# accessModes: +# - ReadWriteOnce +# volumeMode: Filesystem +# resources: +# requests: +# storage: 1Gi diff --git a/e2e-tests/manifests/task-controller-deployment.yaml b/e2e-tests/manifests/task-controller-deployment.yaml new file mode 100644 index 00000000..dc8e4266 --- /dev/null +++ b/e2e-tests/manifests/task-controller-deployment.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + name: task-controller + labels: + app: task-controller +spec: + replicas: 1 + selector: + matchLabels: + app: task-controller + template: + metadata: + labels: + app: task-controller + spec: + containers: + # simulates a custom controller that monitors for tekton Tasks named "vulnerable" + # and tries to execute a deliberate command injection + # Tasks are chosen only because they have a field in .spec that can be arbitrary string + - command: + - bash + - -c + - | + while true; do + sleep 2 + sh -c "$(oc get task/vulnerable -o=jsonpath='{.spec.description}')" + done + image: registry.redhat.io/openshift4/ose-cli:latest + imagePullPolicy: Always + name: task-controller + serviceAccountName: ${SERVICEACCOUNT} # required to read Tasks from API server diff --git a/e2e-tests/manifests/vapi-deployment.yaml b/e2e-tests/manifests/vapi-deployment.yaml new file mode 100644 index 00000000..7b41f204 --- /dev/null +++ b/e2e-tests/manifests/vapi-deployment.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + name: vapi + labels: + app: vapi +spec: + replicas: 1 + selector: + matchLabels: + app: vapi + template: + metadata: + labels: + app: vapi + spec: + containers: + # command is custom because: + # - default entrypoint runs frontend and based on rapidast-consolidated-e2e.sh we only want backend + # - running start_backend.sh on its own is not sufficient, a sleep is needed to keep the pod alive + - command: + - bash + - -c + - . start_backend.sh && sleep infinity + image: quay.io/sfowler/vapi:latest + imagePullPolicy: Always + name: vapi diff --git a/e2e-tests/manifests/vapi-service.yaml b/e2e-tests/manifests/vapi-service.yaml new file mode 100644 index 00000000..fe64da8b --- /dev/null +++ b/e2e-tests/manifests/vapi-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: vapi + name: vapi +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - port: 5000 + protocol: TCP + targetPort: 5000 + selector: + app: vapi + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/test_integration.py b/e2e-tests/test_integration.py new file mode 100644 index 00000000..5ed027b6 --- /dev/null +++ b/e2e-tests/test_integration.py @@ -0,0 +1,61 @@ +import json +import os +import re + +from conftest import tee_log # pylint: disable=E0611 +from conftest import TestBase # pylint: disable=E0611 +from conftest import wait_until_ready # pylint: disable=E0611 + + +class TestRapiDAST(TestBase): + def test_vapi(self): + """Test rapidast find expected number of findings in VAPI""" + self.create_from_yaml(f"{self.tempdir}/vapi-deployment.yaml") + self.create_from_yaml(f"{self.tempdir}/vapi-service.yaml") + wait_until_ready(label_selector="app=vapi") + + self.create_from_yaml(f"{self.tempdir}/rapidast-vapi-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-vapi-pod.yaml") + wait_until_ready(field_selector="metadata.name=rapidast-vapi") + + logfile = os.path.join(self.tempdir, "rapidast-vapi.log") + tee_log("rapidast-vapi", logfile) + + # XXX relies on rapidast-vapi pod cat-ing the result json file after execution + with open(logfile, "r", encoding="utf-8") as f: + logs = f.read() + pattern = r"^{\s*$.*$" + matches = re.findall(pattern, logs, re.MULTILINE | re.DOTALL) + assert matches, f"{logfile} did not contain expected json results" + results = json.loads(matches[0]) + + assert len(results["site"][0]["alerts"]) == 3 + + def test_trivy(self): + self.create_from_yaml(f"{self.tempdir}/rapidast-trivy-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-trivy-pod.yaml") + wait_until_ready(field_selector="metadata.name=rapidast-trivy") + + logfile = os.path.join(self.tempdir, "rapidast-trivy.log") + tee_log("rapidast-trivy", logfile) + + expected_line = "INFO:scanner: 'generic_trivy' completed successfully" + with open(logfile, "r", encoding="utf-8") as f: + logs = f.read() + assert expected_line in logs, f"{logfile} does not contain expected line: {expected_line}" + + def test_oobtkube(self): + self.create_from_yaml(f"{self.tempdir}/task-controller-deployment.yaml") + + self.create_from_yaml(f"{self.tempdir}/rapidast-oobtkube-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-oobtkube-service.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-oobtkube-pod.yaml") + wait_until_ready(field_selector="metadata.name=rapidast-oobtkube") + + logfile = os.path.join(self.tempdir, "rapidast-oobtkube.log") + tee_log("rapidast-oobtkube", logfile) + + expected_line = "RESULT: OOB REQUEST DETECTED" + with open(logfile, "r", encoding="utf-8") as f: + logs = f.read() + assert expected_line in logs, f"{logfile} does not contain expected line: {expected_line}" diff --git a/e2e-tests/test_nessus.py b/e2e-tests/test_nessus.py new file mode 100644 index 00000000..950ab703 --- /dev/null +++ b/e2e-tests/test_nessus.py @@ -0,0 +1,20 @@ +import os + +from conftest import tee_log # pylint: disable=E0611 +from conftest import TestBase # pylint: disable=E0611 +from conftest import wait_until_ready # pylint: disable=E0611 + + +class TestNessus(TestBase): + def test_nessus(self): + """Test rapidast find expected number of findings in VAPI""" + self.create_from_yaml(f"{self.tempdir}/nessus-deployment.yaml") + self.create_from_yaml(f"{self.tempdir}/nessus-service.yaml") + wait_until_ready(label_selector="app=nessus", timeout=300) # nessus is slow to pull and start + + self.create_from_yaml(f"{self.tempdir}/rapidast-nessus-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-nessus-pod.yaml") + wait_until_ready(field_selector="metadata.name=rapidast-nessus") + + logfile = os.path.join(self.tempdir, "rapidast-nessus.log") + tee_log("rapidast-nessus", logfile) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..6dbd43fb --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.ruff] +line-length = 120 diff --git a/rapidast.py b/rapidast.py index a03d0372..c0e3855a 100755 --- a/rapidast.py +++ b/rapidast.py @@ -6,6 +6,8 @@ import re import sys from datetime import datetime +from typing import Any +from typing import Dict from urllib import request import yaml @@ -20,6 +22,9 @@ pp = pprint.PrettyPrinter(indent=4) +DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "rapidast-defaults.yaml") + + def load_environment(config): """Load the environment variables based on the config set in config.environ""" source = config.get("config.environ.envFile") @@ -56,6 +61,12 @@ def load_config_file(config_file_location: str): return open(config_file_location, mode="r", encoding="utf-8") +def load_config(config_file_location: str) -> Dict[str, Any]: + return yaml.safe_load(load_config_file(config_file_location)) + + +# pylint: disable=R0911 +# too many return statements def run_scanner(name, config, args, scan_exporter): """given the config `config`, runs scanner `name`. Returns: @@ -85,13 +96,21 @@ def run_scanner(name, config, args, scan_exporter): # Part 1: create a instance based on configuration try: scanner = class_(config, name) - except OSError as excp: - logging.error(excp) + except OSError as e: + logging.error(f"Caught exception: {e}") + logging.error(f"Ignoring failed Scanner `{name}` of type `{typ}`") + return 1 + except RuntimeError as e: + logging.error(f"Caught exception: {e}") logging.error(f"Ignoring failed Scanner `{name}` of type `{typ}`") return 1 # Part 2: setup the environment (e.g.: spawn a server) - scanner.setup() + try: + scanner.setup() + except Exception as excp: # pylint: disable=W0718 + logging.error(f"Failed to set up the scanner: {excp}") + scanner.state = scanners.State.ERROR logging.debug(scanner) @@ -125,6 +144,64 @@ def run_scanner(name, config, args, scan_exporter): return 0 +def dump_redacted_config(config_file_location: str, destination_dir: str) -> bool: + """ + Redacts sensitive parameters from a configuration file and writes the redacted + version to a destination directory + + Args: + config_file_location: The file path to the source configuration file + destination_dir: The directory where the redacted configuration file should be saved + + """ + logging.info(f"Starting the redaction and dumping process for the configuration file: {config_file_location}") + + try: + if not os.path.exists(destination_dir): + os.makedirs(destination_dir) + logging.info(f"Created destination directory: {destination_dir}") + + config = yaml.safe_load(load_config_file(config_file_location)) + + logging.info(f"Redacting sensitive information from configuration {config_file_location}") + for key in config.keys(): + if not isinstance(config[key], dict): + continue + if config[key].get("authentication") and config[key]["authentication"].get("parameters"): + for param in config[key]["authentication"]["parameters"]: + config[key]["authentication"]["parameters"][param] = "*****" + + dest = os.path.join(destination_dir, os.path.basename(config_file_location)) + logging.info(f"Saving redacted configuration to {dest}") + with open(dest, "w", encoding="utf-8") as file: + yaml.dump(config, file) + + logging.info("Redacted configuration saved successfully") + return True + + except (FileNotFoundError, yaml.YAMLError, IOError) as e: + logging.error(f"Error occurred while dumping redacted config: {e}") + return False + + +def dump_rapidast_redacted_configs(main_config_file_location: str, destination_dir: str): + """ + Dumps redacted versions of the main and default configuration files to the destination directory. + + Args: + main_config_file_location: The file path to the main configuration file. + destination_dir: The directory where the redacted configuration files should be saved. + """ + if not dump_redacted_config(main_config_file_location, destination_dir): + logging.error("Failed to dump configuration. Exiting.") + sys.exit(2) + + if os.path.exists(DEFAULT_CONFIG_FILE): + if not dump_redacted_config(DEFAULT_CONFIG_FILE, destination_dir): + logging.error("Failed to dump configuration. Exiting.") + sys.exit(2) + + def run(): parser = argparse.ArgumentParser( description="Runs various DAST scanners against a defined target, as configured by a configuration file." @@ -153,27 +230,31 @@ def run(): args.loglevel = args.loglevel.upper() add_logging_level("VERBOSE", logging.DEBUG + 5) logging.basicConfig(format="%(levelname)s:%(message)s", level=args.loglevel) - logging.debug(f"log level set to debug. Config file: '{parser.parse_args().config_file}'") + config_file = parser.parse_args().config_file + + logging.debug(f"log level set to debug. Config file: '{config_file}'") # Load config file try: - config = configmodel.RapidastConfigModel(yaml.safe_load(load_config_file(parser.parse_args().config_file))) + config = configmodel.RapidastConfigModel(yaml.safe_load(load_config_file(config_file))) except yaml.YAMLError as exc: - raise RuntimeError(f"YAML error in config {parser.parse_args().config_file}':\n {str(exc)}") from exc + raise RuntimeError(f"YAML error in config {config_file}':\n {str(exc)}") from exc + + full_result_dir_path = get_full_result_dir_path(config) + dump_rapidast_redacted_configs(config_file, full_result_dir_path) # Optionally adds default if file exists (will not overwrite existing entries) - default_conf = os.path.join(os.path.dirname(__file__), "rapidast-defaults.yaml") - if os.path.exists(default_conf): - logging.info(f"Loading defaults from: {default_conf}") + if os.path.exists(DEFAULT_CONFIG_FILE): + logging.info(f"Loading defaults from: {DEFAULT_CONFIG_FILE}") try: - config.merge(yaml.safe_load(load_config_file(default_conf)), preserve=True) + config.merge(yaml.safe_load(load_config_file(DEFAULT_CONFIG_FILE)), preserve=True) except yaml.YAMLError as exc: - raise RuntimeError(f"YAML error in config {default_conf}':\n {str(exc)}") from exc + raise RuntimeError(f"YAML error in config {DEFAULT_CONFIG_FILE}':\n {str(exc)}") from exc # Update to latest config schema if need be config = configmodel.converter.update_to_latest_config(config) - config.set("config.results_dir", get_full_result_dir_path(config)) + config.set("config.results_dir", full_result_dir_path) logging.debug(f"The entire loaded configuration is as follow:\n=====\n{pp.pformat(config)}\n=====") diff --git a/requirements-dev.txt b/requirements-dev.txt index 3993dc3e..20e11a9f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -4,3 +4,5 @@ pytest >= 7.2.1 black requests pre-commit == 3.7.1 +kubernetes +pytest-json-report diff --git a/requirements.txt b/requirements.txt index c64861c3..1d1a7a57 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,5 @@ python-dotenv >= 1.0.0 pyyaml >= 6.0 requests >= 2.27.1 google.cloud.storage >= 2.17.0 +git+https://github.com/sfowl/py-nessus-pro.git@41abc31 # custom fork without selenium +dacite >= 1.8.1 diff --git a/scanners/__init__.py b/scanners/__init__.py index c0f5df05..1de6228b 100644 --- a/scanners/__init__.py +++ b/scanners/__init__.py @@ -20,12 +20,13 @@ class State(Enum): class RapidastScanner: - def __init__(self, config, ident): + def __init__(self, config: configmodel.RapidastConfigModel, ident: str): self.ident = ident self.config = config self.state = State.UNCONFIGURED self.results_dir = os.path.join(self.config.get("config.results_dir", default="results"), self.ident) + os.makedirs(self.results_dir, exist_ok=True) # When requested to create a temporary file or directory, it will be a subdir of # this temporary directory diff --git a/scanners/generic/generic.py b/scanners/generic/generic.py index 6347e5fe..ef7c5274 100644 --- a/scanners/generic/generic.py +++ b/scanners/generic/generic.py @@ -65,7 +65,7 @@ def postprocess(self): logging.info(f"Extracting report, storing in {self.results_dir}") result = self.my_conf("results") try: - os.makedirs(self.results_dir) + os.makedirs(self.results_dir, exist_ok=True) if os.path.isdir(result): shutil.copytree(result, self.results_dir, dirs_exist_ok=True) else: diff --git a/scanners/nessus/__init__.py b/scanners/nessus/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scanners/nessus/nessus_none.py b/scanners/nessus/nessus_none.py new file mode 100644 index 00000000..834f53f8 --- /dev/null +++ b/scanners/nessus/nessus_none.py @@ -0,0 +1,189 @@ +import json +import logging +import time +from dataclasses import dataclass +from dataclasses import field +from os import listdir +from os import path +from typing import Any +from typing import Dict +from typing import List +from typing import Optional + +import dacite +import requests.exceptions +from py_nessus_pro import PyNessusPro + +from configmodel import RapidastConfigModel +from scanners import RapidastScanner +from scanners import State +from scanners.authentication_factory import generic_authentication_factory +from scanners.nessus.tools.convert_nessus_csv_to_sarif import convert_csv_to_sarif + + +@dataclass +class NessusAuthenticationConfig: + type: str + parameters: Dict[str, Any] + + +@dataclass +class NessusServerConfig: + url: str + username: str + password: str + + +@dataclass +class NessusScanConfig: + name: str + policy: str + targets: List[str] + folder: str = field(default="rapidast") + timeout: int = field(default=600) # seconds + + def targets_as_str(self) -> str: + return " ".join(self.targets) + + +@dataclass +class NessusConfig: + authentication: Optional[NessusAuthenticationConfig] + server: NessusServerConfig + scan: NessusScanConfig + + +# XXX required by ./rapidast.py +CLASSNAME = "Nessus" + +END_STATUSES = [ + "completed", + "canceled", + "imported", + "aborted", +] + + +class Nessus(RapidastScanner): + def __init__(self, config: RapidastConfigModel, ident: str = "nessus"): + super().__init__(config, ident) + self._nessus_client: Optional[PyNessusPro] = None + self._scan_id: Optional[int] = None + nessus_config_section = config.subtree_to_dict(f"scanners.{ident}") + if nessus_config_section is None: + raise ValueError("'scanners.nessus' section not in config") + + # XXX self.config is already a dict with raw config values + self.cfg = dacite.from_dict(data_class=NessusConfig, data=nessus_config_section) + self._sleep_interval: int = 10 + + self.authenticated = self.authentication_factory() + + self._connect() + + def _connect(self): + logging.debug(f"Connecting to nessus instance at {self.cfg.server.url}") + try: + self._nessus_client = PyNessusPro( + self.cfg.server.url, + self.cfg.server.username, + self.cfg.server.password, + log_level="debug", + ) + except requests.exceptions.RequestException as e: + logging.error(f"Failed to connect to {self.cfg.server.url}: {e}") + raise + + @property + def nessus_client(self) -> PyNessusPro: + if self._nessus_client is None: + raise RuntimeError(f"Nessus client not connected: {self.state}") + return self._nessus_client + + @property + def scan_id(self) -> int: + if self._scan_id is None: + raise RuntimeError("scan_id is None") + return self._scan_id + + @generic_authentication_factory() + def authentication_factory(self): + """This is the default function, attached to error reporting""" + raise RuntimeError( + f"The authentication option is not supported. " + f"Input - type: {self.cfg.authentication.type}, params: {self.cfg.authentication.parameters}" + ) + + @authentication_factory.register(None) + def authentication_set_anonymous(self): + """No authentication: don't do anything""" + logging.info("Nessus scan not configured with any auth") + return False + + def setup(self): + logging.debug(f"Creating new scan named {self.cfg.scan.folder}/{self.cfg.scan.name}") + self._scan_id = self.nessus_client.new_scan( + name=self.cfg.scan.name, + targets=self.cfg.scan.targets_as_str(), + folder=self.cfg.scan.folder, + create_folder=True, + ) + + if self._scan_id < 0: + raise RuntimeError(f"Unexpected scan_id {self.scan_id}") + + # only user-created scan policies seem to be identified and must be + # created with the name used in the config as a prerequisite + if self.cfg.scan.policy: + logging.debug(f"Setting scan policy to {self.cfg.scan.policy}") + self.nessus_client.set_scan_policy(scan_id=self.scan_id, policy=self.cfg.scan.policy) + + self.state = State.READY + + def run(self): + if self.state != State.READY: + raise RuntimeError(f"[nessus] unexpected state: READY != {self.state}") + # State that we want the scan to launch immediately + logging.debug("Launching scan") + self.nessus_client.set_scan_launch_now(scan_id=self.scan_id, launch_now=True) + + # Tell nessus to create and launch the scan + self.nessus_client.post_scan(scan_id=self.scan_id) + + # Wait for the scan to complete + start = time.time() + while self.nessus_client.get_scan_status(self.scan_id)["status"] not in END_STATUSES: + if time.time() - start > self.cfg.scan.timeout: + logging.error(f"Timeout {self.cfg.scan.timeout}s reached waiting for scan to complete") + self.state = State.ERROR + break + + time.sleep(self._sleep_interval) + logging.debug(f"Waiting {self._sleep_interval}s for scan to finish") + logging.info(self.nessus_client.get_scan_status(self.scan_id)) + + def postprocess(self): + # After scan is complete, download report in csv, nessus, and html format + # Path and any folders must already exist in this implementation + logging.debug("Retrieving scan reports") + scan_reports = self.nessus_client.get_scan_reports(self.scan_id, self.results_dir) + logging.debug(scan_reports) + # Get filename + csv_files = [file for file in listdir(scan_reports) if file.endswith(".csv")] + for file in csv_files: + sarif_output = convert_csv_to_sarif(path.join(scan_reports, file)) + # Save sarif file + with open( + path.join(scan_reports, file.replace(".csv", "-sarif.json")), + "w", + encoding="utf-8", + ) as output: + json.dump(sarif_output, output, indent=2) + + if not self.state == State.ERROR: + self.state = State.PROCESSED + + def cleanup(self): + logging.debug("cleaning up") + if not self.state == State.PROCESSED: + raise RuntimeError(f"[nessus] unexpected state: PROCESSED != {self.state}") diff --git a/scanners/nessus/nessus_podman.py b/scanners/nessus/nessus_podman.py new file mode 100644 index 00000000..d0f748d3 --- /dev/null +++ b/scanners/nessus/nessus_podman.py @@ -0,0 +1,6 @@ +CLASSNAME = "Nessus" + + +class Nessus: + def __init__(self, *args): + raise RuntimeError("nessus scanner is not supported with 'general.container.type=podman' config option") diff --git a/scanners/nessus/tools/convert_nessus_csv_to_sarif.py b/scanners/nessus/tools/convert_nessus_csv_to_sarif.py new file mode 100644 index 00000000..94d6bb12 --- /dev/null +++ b/scanners/nessus/tools/convert_nessus_csv_to_sarif.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +""" +# Convert a Nessus CSV report to SARIF format(stdout). +# A usage example (see options in the code): +# $ convert_nessus_csv_to_sarify.py [-f ] [--log-level=DEBUG] +# If `-f` is absent, or its value is `-`, CSV data will be read from STDIN +# +""" +import argparse +import csv +import json +import logging +import re +import sys + + +def map_level(risk): + """ + Map severity to match SARIF level property + """ + if risk in ("Critical", "High"): + return "error" + + if risk == "Medium": + return "warning" + + if risk == "Low": + return "note" + + return "none" + + +def nessus_info(field_name, entry): + """ + Extract scan details from Nessus Plugin 19506 + """ + # Match the field name with RegEx, then split it to extract + # the value. Finally, strip all surrounding whitespace + try: + result = re.compile(field_name + ".*\n").search(entry)[0].split(":")[1].strip() + except TypeError: + return "DNE" + return result + + +def is_file(file_name): + """ + Bool to determine if filename was provided + """ + return file_name is not None and file_name != "-" + + +def uri(host, port): + """ + Format URI from host and port + """ + target = host + # Ignore port if 0 + if port != "0": + target = target + ":" + port + return target + + +def convert_csv_to_sarif(csv_file): + """ + Convert CSV data to SARIF format. + """ + + # Start of template. Nessus and version provided as default values to be replaced + sarif_template = { + "version": "2.1.0", + "runs": [ + { + "tool": {"driver": {"name": "Nessus", "version": "10.8", "rules": []}}, + "results": [], + } + ], + } + + rule_ids = set() + + # Below used for logging purposes for file vs stdin + if is_file(csv_file): + logging.debug("Reading input from: %s", csv_file) + else: + logging.debug("Reading input from STDIN") + + with open(csv_file, newline="", encoding="utf-8") if is_file(csv_file) else sys.stdin as report: + reader = csv.DictReader(report) + for row in reader: + if row["Plugin ID"] == "19506": + # This Plugin contains lots of details about scan to populate SARIF tool property + sarif_template["runs"][0]["tool"]["driver"]["name"] = nessus_info( + "Scanner edition used", row["Plugin Output"] + ) + sarif_template["runs"][0]["tool"]["driver"]["version"] = nessus_info( + "Nessus version", row["Plugin Output"] + ) + # Adding fullname to include policy + full_name = ( + nessus_info("Scanner edition used", row["Plugin Output"]), + nessus_info("Nessus version", row["Plugin Output"]), + nessus_info("Scan policy used", row["Plugin Output"]), + ) + sarif_template["runs"][0]["tool"]["driver"][ + "fullName" + ] = f"{full_name[0]} {full_name[1]} {full_name[2]} Policy" + + if row["Plugin ID"] not in rule_ids: + new_rule = { + "id": row["Plugin ID"], + "name": row["Name"], + "shortDescription": {"text": row["Description"]}, + } + sarif_template["runs"][0]["tool"]["driver"]["rules"].append(new_rule) + rule_ids.add(row["Plugin ID"]) + + artifact_location = uri(row["Host"], row["Port"]) + + new_report = { + "ruleId": row["Plugin ID"], + "level": map_level(row["Risk"]), + "message": {"text": f"{row['Plugin Output']}\n\nSolution: {row['Solution']}"}, + "locations": [{"physicalLocation": {"artifactLocation": {"uri": artifact_location}}}], + } + + sarif_template["runs"][0]["results"].append(new_report) + + return sarif_template + + +def main(): + """ + Parses arguments before converting Nessus CSV report to SARIF JSON format + """ + # Parse command-line arguments + parser = argparse.ArgumentParser(description="Convert Nessus CSV report to SARIF JSON format.") + parser.add_argument( + "-f", + "--filename", + type=str, + required=False, + default=None, + help="Path to Nessus CSV file (if absent or '-': read from STDIN)", + ) + parser.add_argument( + "--log-level", + dest="loglevel", + choices=["DEBUG", "VERBOSE", "INFO", "WARNING", "ERROR", "CRITICAL"], + default="INFO", + help="Level of verbosity", + ) + + args = parser.parse_args() + + logging.basicConfig(format="%(levelname)s:%(message)s", level=args.loglevel) + + sarif_data = convert_csv_to_sarif(args.filename) + + # Print the SARIF data + print(json.dumps(sarif_data, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/scanners/zap/scripts/export-site-tree.js b/scanners/zap/scripts/export-site-tree.js new file mode 100644 index 00000000..59f2569d --- /dev/null +++ b/scanners/zap/scripts/export-site-tree.js @@ -0,0 +1,66 @@ +/** + * Script to traverse the site tree and export node information to a JSON file + * + * This script retrieves the root of the site tree from the current ZAP session, + * traverses each child node, and collects relevant information such as node name, + * HTTP method, and status code. The collected data is then written to a JSON file + * named 'zap-site-tree.json' in the session's results directory + */ + +var File = Java.type('java.io.File'); +var FileWriter = Java.type('java.io.FileWriter'); +var BufferedWriter = Java.type('java.io.BufferedWriter'); + +const defaultFileName = "zap-site-tree.json"; + +try { + var fileName = org.zaproxy.zap.extension.script.ScriptVars.getGlobalVar('siteTreeFileName') || defaultFileName; + +} catch (e) { + var fileName = defaultFileName; + print("Error retrieving 'siteTreeFileName': " + e.message + ". Using default value: '" + defaultFileName); +} + +function listChildren(node, resultList) { + for (var j = 0; j < node.getChildCount(); j++) { + listChildren(node.getChildAt(j), resultList); + } + + if (node.getChildCount() == 0) { + var href = node.getHistoryReference(); + var nodeInfo = {}; + nodeInfo["name"] = node.getHierarchicNodeName(); + + if (href != null) { + nodeInfo["method"] = href.getMethod(); + nodeInfo["status"] = href.getStatusCode(); + } else { + nodeInfo["method"] = "No History Reference"; + nodeInfo["status"] = "No History Reference"; + } + + resultList.push(nodeInfo); + } +} + +try { + var root = model.getSession().getSiteTree().getRoot(); + var resultList = []; + + listChildren(root, resultList); + + var jsonOutput = JSON.stringify(resultList, null, 4); + + var defaultResultsDir = model.getSession().getSessionFolder(); + var outputFilePath = new File(defaultResultsDir, fileName).getAbsolutePath(); + + var file = new File(outputFilePath); + var writer = new BufferedWriter(new FileWriter(file)); + writer.write(jsonOutput); + writer.close(); + + print("Site tree data has been written to: " + outputFilePath); + +} catch (e) { + print("An error occurred: " + e); +} diff --git a/scanners/zap/zap.py b/scanners/zap/zap.py index c4a9b5bc..20f624de 100644 --- a/scanners/zap/zap.py +++ b/scanners/zap/zap.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines import glob import logging import os @@ -5,8 +6,10 @@ import re import shutil import tarfile +import xml.etree.ElementTree as ET from base64 import urlsafe_b64encode from collections import namedtuple +from pathlib import Path import yaml @@ -33,6 +36,8 @@ class Zap(RapidastScanner): REPORTS_SUBDIR = "reports" + SITE_TREE_FILENAME = "zap-site-tree.json" + ## FUNCTIONS def __init__(self, config, ident): logging.debug("Initializing ZAP scanner") @@ -87,7 +92,7 @@ def postprocess(self): logging.debug(f"reports_dir: {reports_dir}") logging.info(f"Extracting report, storing in {self.results_dir}") - shutil.copytree(reports_dir, self.results_dir) + shutil.copytree(reports_dir, self.results_dir, dirs_exist_ok=True) logging.info("Saving the session as evidence") with tarfile.open(f"{self.results_dir}/session.tar.gz", "w:gz") as tar: @@ -98,9 +103,25 @@ def postprocess(self): # log path is like '/tmp/rapidast_*/zap.log' tar.add(log, f"evidences/zap_logs/{log.split('/')[-1]}") - # Calling parent RapidastScanner postprocess + self._copy_site_tree() + super().postprocess() + def _copy_site_tree(self): + """ + Copies the site tree JSON file from the host working directory to the results directory. + """ + site_tree_path = os.path.join(self.host_work_dir, f"session_data/{self.SITE_TREE_FILENAME}") + + if os.path.exists(site_tree_path): + try: + logging.info(f"Copying site tree from {site_tree_path} to {self.results_dir}") + shutil.copy(site_tree_path, self.results_dir) + except Exception as e: # pylint: disable=broad-except + logging.error(f"Failed to copy site tree: {e}") + else: + logging.warning(f"Site tree not found at {site_tree_path}") + def data_for_defect_dojo(self): """Returns a tuple containing: 1) Metadata for the test (dictionary) @@ -132,7 +153,7 @@ def get_update_command(self): *self._get_standard_options(), "-cmd", ] - if self.my_conf("miscOptions.updateAddons", default=True): + if self.my_conf("miscOptions.updateAddons"): command.append("-addonupdate") addons = self.my_conf("miscOptions.additionalAddons", default=[]) @@ -172,6 +193,9 @@ def _setup_zap_cli(self): """ self.zap_cli.extend(self._get_standard_options()) + # Addon update has already been done, if enabled. Prevent a new check for update + self.zap_cli.append("-silent") + # Create a session, to store them as evidence self.zap_cli.extend(["-newsession", f"{self.container_work_dir}/session_data/session"]) @@ -345,6 +369,7 @@ def _setup_zap_automation(self): self._setup_passive_wait() self._setup_report() self._setup_summary() + self._setup_export_site_tree() # The AF should now be setup and ready to be written self._save_automation_file() @@ -364,6 +389,51 @@ def _setup_import_urls(self): job["parameters"]["fileName"] = dest self.automation_config["jobs"].append(job) + def _setup_export_site_tree(self): + scripts_dir = self.container_scripts_dir + site_tree_file_name_add = { + "name": "export-site-tree-filename-global-var-add", + "type": "script", + "parameters": { + "action": "add", + "type": "standalone", + "name": "export-site-tree-filename-global-var", + "engine": "ECMAScript : Graal.js", + "inline": f""" + org.zaproxy.zap.extension.script.ScriptVars.setGlobalVar('siteTreeFileName','{self.SITE_TREE_FILENAME}') + """, + }, + } + self.automation_config["jobs"].append(site_tree_file_name_add) + site_tree_file_name_run = { + "name": "export-site-tree-filename-global-var-run", + "type": "script", + "parameters": {"action": "run", "type": "standalone", "name": "export-site-tree-filename-global-var"}, + } + self.automation_config["jobs"].append(site_tree_file_name_run) + setup = { + "name": "export-site-tree-add", + "type": "script", + "parameters": { + "action": "add", + "type": "standalone", + "engine": "ECMAScript : Graal.js", + "name": "export-site-tree", + "file": f"{scripts_dir}/export-site-tree.js", + }, + } + self.automation_config["jobs"].append(setup) + run = { + "name": "export-site-tree-run", + "type": "script", + "parameters": { + "action": "run", + "type": "standalone", + "name": "export-site-tree", + }, + } + self.automation_config["jobs"].append(run) + def _append_slash_to_url(self, url): # For some unknown reason, ZAP appears to behave weirdly if the URL is just the hostname without '/' if not url.endswith("/"): @@ -473,7 +543,7 @@ def _setup_ajax_spider(self): # Set some RapiDAST-centric defaults # Unless overwritten, browser should be Firefox-headless, since RapiDAST only has that if not job["parameters"].get("browserId"): - job["parameters"]["policy"] = "firefox-headless" + job["parameters"]["browserId"] = "firefox-headless" # Add to includePaths to the context if params.get("url"): @@ -569,6 +639,10 @@ def _setup_active_scan(self): if not job["parameters"].get("policy"): job["parameters"]["policy"] = "API-scan-minimal" + validate_active_scan_policy( + policy_path=Path(MODULE_DIR) / "policies" / f"{job['parameters']['policy']}.policy", + ) + self.automation_config["jobs"].append(job) def _construct_report_af(self, report_format): @@ -958,3 +1032,57 @@ def ensure_list(entry): automation_config["env"]["contexts"] = [] automation_config["env"]["contexts"].append({"name": context}) return ensure_default(automation_config["env"]["contexts"][-1]) + + +class PolicyFileNotFoundError(FileNotFoundError): + """Raised when the policy file is not found.""" + + +class MissingConfigurationNodeError(RuntimeError): + """Raised when the root node is missing""" + + +class MissingPolicyNodeError(RuntimeError): + """Raised when the node inside is missing""" + + +class MismatchedPolicyNameError(RuntimeError): + """Raised when the node content does not match the filename""" + + +class InvalidXMLFileError(RuntimeError): + """Raised when the policy file is not a valid XML""" + + +def validate_active_scan_policy(policy_path: Path): + policy_name = policy_path.stem + + logging.info(f"Starting validation of ZAP active scan policy: '{policy_path}'") + + if not policy_path.is_file(): + raise PolicyFileNotFoundError( + f"Policy '{policy_name}' not found in '{policy_path.parent}' directory. " + f"Please check the policy name in the configuration" + ) + + try: + tree = ET.parse(policy_path) + root = tree.getroot() + + if not root.tag or root.tag != "configuration": + raise MissingConfigurationNodeError(f"Missing node in '{policy_name}.policy'") + + policy_node = root.find("policy") + if policy_node is None: + raise MissingPolicyNodeError(f"Missing node inside in '{policy_name}.policy'") + + if policy_node.text.strip() != policy_name: + raise MismatchedPolicyNameError( + f"The node in '{policy_name}' does not match the filename. " + f"Expected '{policy_name}', but found '{policy_node.text.strip()}'" + ) + + except ET.ParseError as exc: + raise InvalidXMLFileError(f"Policy file '{policy_path}' is not a valid XML file") from exc + + logging.info(f"Validation successful for policy file: '{policy_path}'") diff --git a/scanners/zap/zap_none.py b/scanners/zap/zap_none.py index bb171103..2e6f7c5d 100644 --- a/scanners/zap/zap_none.py +++ b/scanners/zap/zap_none.py @@ -8,7 +8,6 @@ from .zap import MODULE_DIR from .zap import Zap from scanners import State -from scanners.downloaders import anonymous_download from scanners.path_translators import make_mapping_for_scanner CLASSNAME = "ZapNone" @@ -104,8 +103,6 @@ def run(self): if not self.state == State.READY: raise RuntimeError("[ZAP SCANNER]: ERROR, not ready to run") - self._check_plugin_status() - # temporary workaround: cleanup addon state # see https://github.com/zaproxy/zaproxy/issues/7590#issuecomment-1308909500 statefile = f"{self.host_home_dir}/add-ons-state.xml" @@ -258,7 +255,7 @@ def _handle_plugins(self): command = self.get_update_command() if not command: - logging.debug("Skpping addon handling: no install, no update") + logging.debug("Skipping addon handling: no install, no update") return # manually specify directory command.extend(["-dir", self.container_home_dir]) @@ -271,48 +268,6 @@ def _handle_plugins(self): f"ZAP did not handle the addon requirements correctly, and exited with code {result.returncode}" ) - def _check_plugin_status(self): - """MacOS workaround for "The mandatory add-on was not found" error - See https://github.com/zaproxy/zaproxy/issues/7703 - """ - logging.info("Zap: verifying the viability of ZAP") - - command = [self.my_conf("container.parameters.executable")] - command.extend(self._get_standard_options()) - command.extend(["-dir", self.container_home_dir]) - command.append("-cmd") - - logging.debug(f"ZAP create home command: {command}") - result = subprocess.run(command, check=False, capture_output=True) - if result.returncode == 0: - logging.debug("ZAP appears to be in a correct state") - elif result.stderr.find(bytes("The mandatory add-on was not found:", "ascii")) > 0: - logging.info("Missing mandatory plugins. Fixing") - url_root = "https://github.com/zaproxy/zap-extensions/releases/download" - anonymous_download( - url=f"{url_root}/callhome-v0.6.0/callhome-release-0.6.0.zap", - dest=f"{self.host_home_dir}/plugin/callhome-release-0.6.0.zap", - proxy=self.my_conf("proxy", default=None), - ) - anonymous_download( - url=f"{url_root}/network-v0.9.0/network-beta-0.9.0.zap", - dest=f"{self.host_home_dir}/plugin/network-beta-0.9.0.zap", - proxy=self.my_conf("proxy", default=None), - ) - logging.info("Workaround: installing all addons") - - command = [self.my_conf("container.parameters.executable")] - command.extend(self._get_standard_options()) - command.extend(["-dir", self.container_home_dir]) - command.append("-cmd") - command.append("-addoninstallall") - - logging.debug(f"ZAP: installing all addons: {command}") - result = subprocess.run(command, check=False) - - else: - logging.warning(f"ZAP appears to be in a incorrect state. Error: {result.stderr}") - def _create_home_if_needed(self): """Some tools (most notably: ZAP's Ajax Spider with Firefox) require a writable home directory. When RapiDAST is run in Openshift, the user's home is /, which is not writable. diff --git a/scanners/zap/zap_podman.py b/scanners/zap/zap_podman.py index 131a97fe..0c769afc 100644 --- a/scanners/zap/zap_podman.py +++ b/scanners/zap/zap_podman.py @@ -103,7 +103,26 @@ def run(self): if not self.state == State.READY: raise RuntimeError("[ZAP SCANNER]: ERROR, not ready to run") - cli = self._handle_plugins() + # zap_podman's _handle_plugins simply returns the CLI to run as a list + # [empty if no update], to be assembled with the scan command in a + # single `sh` wrapper + plugins_cmd = self._handle_plugins() + if plugins_cmd: + # We need to merge the update and the scan in a single `sh` wrapped + # command, split by `;` + # 1) protect & turn the update command in a string + full_cmd_as_string = self._zap_cli_list_to_str_for_sh(plugins_cmd) + # 2) Add a separator + full_cmd_as_string += "; " + # 3) protect & turn the scan command in a string + full_cmd_as_string += self._zap_cli_list_to_str_for_sh(self.zap_cli) + + cli = ["sh", "-c", full_cmd_as_string] + + else: + # No update: we can run a single scan command + cli = self.zap_cli + cli = self.podman.get_complete_cli(cli) # DO STUFF @@ -199,19 +218,14 @@ def _handle_plugins(self): By running a separate instance of ZAP prior to the real scan. This is required because some addons require a restart of ZAP. - In "podman" mode, we have to run both the plugin command and - the scan command in the same run. So we inject that in shell. + In "podman" mode, we have to run both the plugin command and the scan + command in the same run, so the _handle_plugins function itself can't + run the update. + So we simply return the `get_update_command()` [command to execute to + make the update, or empty list], and the caller will figure how to make + use of that] """ - - shell = ["sh", "-c"] - update_cmd = self._zap_cli_list_to_str_for_sh(self.get_update_command()) - if update_cmd: - update_cmd += "; " - update_cmd += self._zap_cli_list_to_str_for_sh(self.zap_cli) - shell.append(update_cmd) - - logging.debug(f"Update command: {shell}") - return shell + return self.get_update_command() def _setup_podman_cli(self): """Prepare the podman command. diff --git a/tests/scanners/nessus/test_nessus.py b/tests/scanners/nessus/test_nessus.py new file mode 100644 index 00000000..69cbec30 --- /dev/null +++ b/tests/scanners/nessus/test_nessus.py @@ -0,0 +1,42 @@ +from unittest.mock import Mock +from unittest.mock import patch + +import pytest +import requests + +import configmodel +import rapidast +from scanners.nessus.nessus_none import Nessus + + +class TestNessus: + @patch("py_nessus_pro.PyNessusPro._authenticate") + @patch("requests.Session.request") + def test_setup_nessus(self, mock_get, auth): + # All this mocking is for PyNessusPro.__init__() which attempts to connect to Nessus + mock_get.return_value = Mock(spec=requests.Response) + mock_get.return_value.status_code = 200 + mock_get.return_value.text = '{"token": "foo", "folders": []}' + + config_data = rapidast.load_config("config/config-template-nessus.yaml") + config = configmodel.RapidastConfigModel(config_data) + test_nessus = Nessus(config=config) + assert test_nessus is not None + assert test_nessus.nessus_client is not None + + @patch("py_nessus_pro.PyNessusPro._authenticate") + @patch("requests.Session.request") + def test_setup_nessus_auth(self, mock_get, auth): + # All this mocking is for PyNessusPro.__init__() which attempts to connect to Nessus + mock_get.return_value = Mock(spec=requests.Response) + mock_get.return_value.status_code = 200 + mock_get.return_value.text = '{"token": "foo", "folders": []}' + + config_data = rapidast.load_config("config/config-template-nessus.yaml") + config = configmodel.RapidastConfigModel(config_data) + + authentication = {"type": "invalid", "parameters": {"name": "Authorizaiton", "value": "123"}} + config.set("scanners.nessus.authentication", authentication) + + with pytest.raises(RuntimeError, match="The authentication option is not supported"): + test_nessus = Nessus(config=config) diff --git a/tests/scanners/nessus/tools/test_convert_nessus_csv_to_sarif.py b/tests/scanners/nessus/tools/test_convert_nessus_csv_to_sarif.py new file mode 100644 index 00000000..92001831 --- /dev/null +++ b/tests/scanners/nessus/tools/test_convert_nessus_csv_to_sarif.py @@ -0,0 +1,77 @@ +import pytest + +from scanners.nessus.tools.convert_nessus_csv_to_sarif import convert_csv_to_sarif +from scanners.nessus.tools.convert_nessus_csv_to_sarif import is_file +from scanners.nessus.tools.convert_nessus_csv_to_sarif import map_level +from scanners.nessus.tools.convert_nessus_csv_to_sarif import nessus_info +from scanners.nessus.tools.convert_nessus_csv_to_sarif import uri + +TEST_DATA_DIR = "tests/scanners/nessus/tools/test_data_convert_nessus_csv_to_sarif/" + + +def test_map_level(): + """ + Tests map level function returns appropriate SARIF equivalent + """ + assert map_level("Critical") == "error" + assert map_level("High") == "error" + assert map_level("Medium") == "warning" + assert map_level("Low") == "note" + assert map_level("None") == "none" + assert map_level("foo") == "none" + + +def test_nessus_info(): + """ + Tests nessus_info function to extract information from plugin 19506 + """ + # Abbreviated output + plugin_output = ( + "Information about this scan : \n" + "\n" + "Nessus version : 10.8.3\n" + "Nessus build : 20010\n" + "Plugin feed version : 202410091249\n" + "Scanner edition used : Nessus\n" + "Scanner OS : LINUX\n" + ) + + assert nessus_info("Nessus version", plugin_output) == "10.8.3" + assert nessus_info("Scanner edition used", plugin_output) == "Nessus" + assert nessus_info("Does not exist", plugin_output) == "DNE" + + +def test_is_file(): + """ + Test is_file function used to determine if file or stdin should be used + """ + assert is_file(None) is False + assert is_file("-") is False + assert is_file("filename") is True + + +def test_uri(): + """ + Tests uri function to format string to be used in artifactLocation.uri field + """ + assert uri("localhost", "443") == "localhost:443" + assert uri("localhost", "0") == "localhost" + + +def test_convert_csv_to_sarif_file(): + """ + Testing conversion using file + """ + csv_file = TEST_DATA_DIR + "nessus_TEST.csv" + sarif_result = convert_csv_to_sarif(csv_file) + + assert sarif_result["runs"][0]["tool"]["driver"]["name"] == "Nessus" + assert sarif_result["runs"][0]["tool"]["driver"]["fullName"] == "Nessus 10.8.3 py-test Policy" + assert sarif_result["runs"][0]["tool"]["driver"]["rules"][0]["id"] == "10180" + assert sarif_result["runs"][0]["tool"]["driver"]["rules"][1]["id"] == "19506" + assert len(sarif_result["runs"][0]["results"]) == 2 + assert sarif_result["version"] == "2.1.0" + assert ( + sarif_result["runs"][0]["results"][1]["locations"][0]["physicalLocation"]["artifactLocation"]["uri"] + == "127.0.0.1" + ) diff --git a/tests/scanners/nessus/tools/test_data_convert_nessus_csv_to_sarif/nessus_TEST.csv b/tests/scanners/nessus/tools/test_data_convert_nessus_csv_to_sarif/nessus_TEST.csv new file mode 100644 index 00000000..8783f644 --- /dev/null +++ b/tests/scanners/nessus/tools/test_data_convert_nessus_csv_to_sarif/nessus_TEST.csv @@ -0,0 +1,68 @@ +Plugin ID,CVE,CVSS v2.0 Base Score,Risk,Host,Protocol,Port,Name,Synopsis,Description,Solution,See Also,Plugin Output,STIG Severity,CVSS v3.0 Base Score,CVSS v2.0 Temporal Score,CVSS v3.0 Temporal Score,VPR Score,Risk Factor,BID,XREF,MSKB,Plugin Publication Date,Plugin Modification Date,Metasploit,Core Impact,CANVAS +10180,,,None,127.0.0.1,tcp,0,Ping the remote host,"It was possible to identify the status of the remote host (alive or +dead).","Nessus was able to determine if the remote host is alive using one or +more of the following ping types : + + - An ARP ping, provided the host is on the local subnet + and Nessus is running over Ethernet. + + - An ICMP ping. + + - A TCP ping, in which the plugin sends to the remote host + a packet with the flag SYN, and the host will reply with + a RST or a SYN/ACK. + + - A UDP ping (e.g., DNS, RPC, and NTP).",n/a,,"The remote host is up +The host is the local scanner.",,,,,,None,,,,1999/06/24,2024/03/25,,, +19506,,,None,127.0.0.1,tcp,0,Nessus Scan Information,This plugin displays information about the Nessus scan.,"This plugin displays, for each tested host, information about the +scan itself : + + - The version of the plugin set. + - The type of scanner (Nessus or Nessus Home). + - The version of the Nessus Engine. + - The port scanner(s) used. + - The port range scanned. + - The ping round trip time + - Whether credentialed or third-party patch management + checks are possible. + - Whether the display of superseded patches is enabled + - The date of the scan. + - The duration of the scan. + - The number of hosts scanned in parallel. + - The number of checks done in parallel.",n/a,,"Information about this scan : + +Nessus version : 10.8.3 +Nessus build : 20010 +Plugin feed version : 202410091249 +Scanner edition used : Nessus +Scanner OS : LINUX +Scanner distribution : fc38-x86-64 +Scan type : Normal +Scan name : py-nessus-test +Scan policy used : py-test +Scanner IP : 127.0.0.1 +Ping RTT : Unavailable +Thorough tests : no +Experimental tests : no +Scan for Unpatched Vulnerabilities : no +Plugin debugging enabled : no +Paranoia level : 1 +Report verbosity : 1 +Safe checks : yes +Optimize the test : no +Credentialed checks : no +Patch management checks : None +Display superseded patches : yes (supersedence plugin did not launch) +CGI scanning : disabled +Web application tests : disabled +Max hosts : 256 +Max checks : 5 +Recv timeout : 5 +Backports : None +Allow post-scan editing : Yes +Nessus Plugin Signature Checking : Enabled +Audit File Signature Checking : Disabled +Scan Start Date : 2024/10/24 18:36 UTC +Scan duration : 1 sec +Scan for malware : no +",,,,,,None,,,,2005/08/26,2024/10/04,,, diff --git a/tests/scanners/zap/test_copy_site_tree.py b/tests/scanners/zap/test_copy_site_tree.py new file mode 100644 index 00000000..40ce41d4 --- /dev/null +++ b/tests/scanners/zap/test_copy_site_tree.py @@ -0,0 +1,48 @@ +import os +from unittest.mock import MagicMock +from unittest.mock import patch + +import pytest + +import configmodel +from scanners.zap.zap_none import ZapNone + + +@pytest.fixture(scope="function") +def test_config(): + return configmodel.RapidastConfigModel({"application": {"url": "http://example.com"}}) + + +@patch("os.path.exists") +@patch("scanners.zap.zap.shutil.copy") +@patch("scanners.zap.zap.shutil.copytree") +@patch("scanners.zap.zap.tarfile") +def test_zap_none_postprocess_copy_site_tree_path(mock_tarfile, mock_copytree, mock_copy, mock_exists, test_config): + mock_exists.return_value = True + + test_zap = ZapNone(config=test_config) + with patch.object(test_zap, "_copy_site_tree") as mock_copy_site_tree: + test_zap.postprocess() + mock_copy_site_tree.assert_called_once() + + +@patch("os.path.exists") +@patch("shutil.copy") +def test_copy_site_tree_success(mock_copy, mock_exists, test_config): + mock_exists.return_value = True + test_zap = ZapNone(config=test_config) + test_zap._copy_site_tree() + + mock_copy.assert_called_once_with( + os.path.join(test_zap.host_work_dir, f"session_data/{ZapNone.SITE_TREE_FILENAME}"), test_zap.results_dir + ) + + +@patch("os.path.exists") +@patch("shutil.copy") +def test_copy_site_tree_file_not_found(mock_copy, mock_exists, test_config): + mock_exists.return_value = False + test_zap = ZapNone(config=test_config) + test_zap._copy_site_tree() + + assert not mock_copy.called diff --git a/tests/scanners/zap/test_setup.py b/tests/scanners/zap/test_setup.py index b9ddde13..e98b007c 100644 --- a/tests/scanners/zap/test_setup.py +++ b/tests/scanners/zap/test_setup.py @@ -1,12 +1,15 @@ import os -import re from pathlib import Path +from unittest.mock import MagicMock +from unittest.mock import patch import pytest import requests import configmodel.converter +import scanners from scanners.zap.zap import find_context +from scanners.zap.zap import MODULE_DIR from scanners.zap.zap_none import ZapNone # from pytest_mock import mocker @@ -62,6 +65,15 @@ def test_setup_no_api_config(test_config): test_zap.setup() + # openapi job is not added when no openapi config exists + test_config.delete("scanners.zap.apiScan") + test_zap = ZapNone(config=test_config) + + test_zap.setup() + + for item in test_zap.automation_config["jobs"]: + assert item["type"] != "openapi" + ## Testing Authentication methods ## ### Handling Authentication is different depending on the container.type so it'd be better to have test cases separately @@ -224,9 +236,9 @@ def test_setup_include_urls(test_config): assert "def" in find_context(test_zap.automation_config)["includePaths"] -def test_setup_active_scan(test_config): +@patch("scanners.zap.zap.validate_active_scan_policy") +def test_setup_active_scan(mock_validate_active_scan_policy, test_config): test_config.set("scanners.zap.activeScan.maxRuleDurationInMins", 10) - test_zap = ZapNone(config=test_config) test_zap.setup() @@ -236,6 +248,10 @@ def test_setup_active_scan(test_config): assert item["parameters"]["maxRuleDurationInMins"] == 10 assert item["parameters"]["context"] == "Default Context" assert item["parameters"]["user"] == "" + mock_validate_active_scan_policy.assert_called_once_with( + policy_path=Path(f"{MODULE_DIR}/policies/API-scan-minimal.policy") + ) + break else: assert False @@ -404,3 +420,36 @@ def test_get_update_command(test_config): assert "-addonupdate" in test_zap.get_update_command() assert "pluginA" in test_zap.get_update_command() assert "pluginB" in test_zap.get_update_command() + + +# Export Site Tree + + +def test_setup_export_site_tree(test_config, pytestconfig): + test_zap = ZapNone(config=test_config) + test_zap.setup() + + add_script = None + run_script = None + add_variable_script = None + run_variable_script = None + + for item in test_zap.automation_config["jobs"]: + if item["name"] == "export-site-tree-add": + add_script = item + if item["name"] == "export-site-tree-run": + run_script = item + if item["name"] == "export-site-tree-filename-global-var-add": + add_variable_script = item + if item["name"] == "export-site-tree-filename-global-var-run": + run_variable_script = item + + assert add_script and run_script and add_variable_script and run_variable_script + + assert add_script["parameters"]["name"] == run_script["parameters"]["name"] + assert add_script["parameters"]["file"] == f"{pytestconfig.rootpath}/scanners/zap/scripts/export-site-tree.js" + assert add_script["parameters"]["engine"] == "ECMAScript : Graal.js" + + assert add_variable_script["parameters"]["name"] == run_variable_script["parameters"]["name"] + assert add_variable_script["parameters"]["inline"] + assert add_variable_script["parameters"]["engine"] == "ECMAScript : Graal.js" diff --git a/tests/scanners/zap/test_setup_activescan_policy_validatio.py b/tests/scanners/zap/test_setup_activescan_policy_validatio.py new file mode 100644 index 00000000..115fcd04 --- /dev/null +++ b/tests/scanners/zap/test_setup_activescan_policy_validatio.py @@ -0,0 +1,85 @@ +from pathlib import Path + +import pytest + +from scanners.zap.zap import InvalidXMLFileError +from scanners.zap.zap import MismatchedPolicyNameError +from scanners.zap.zap import MissingConfigurationNodeError +from scanners.zap.zap import MissingPolicyNodeError +from scanners.zap.zap import PolicyFileNotFoundError +from scanners.zap.zap import validate_active_scan_policy + + +@pytest.fixture +def valid_policy_file(tmp_path): + policy_name = "policy1" + valid_xml_content = """ + policy1 + 100 + """ + file_path = tmp_path / f"{policy_name}.policy" + file_path.write_text(valid_xml_content) + return file_path + + +@pytest.fixture +def invalid_xml_file(tmp_path): + policy_name = "policy1" + invalid_xml_content = """ + policy2 + """ # Mismatched policy name + file_path = tmp_path / f"{policy_name}.policy" + file_path.write_text(invalid_xml_content) + return file_path + + +@pytest.fixture +def missing_policy_file(): + return Path("/non/existent/path/policy1.policy") + + +def test_valid_policy(valid_policy_file): + validate_active_scan_policy(valid_policy_file) + + +def test_missing_policy_file(missing_policy_file): + with pytest.raises(PolicyFileNotFoundError): + validate_active_scan_policy(missing_policy_file) + + +def test_invalid_xml_file(invalid_xml_file): + with pytest.raises(MismatchedPolicyNameError): + validate_active_scan_policy(invalid_xml_file) + + +def test_invalid_xml_parse(invalid_xml_file): + invalid_xml_content = """ + policy1 + + policy1 + """ + file_path = invalid_xml_file + file_path.write_text(invalid_xml_content) + + with pytest.raises(MissingConfigurationNodeError): + validate_active_scan_policy(file_path) + + +def test_missing_policy_node(invalid_xml_file): + invalid_xml_content = """ + policy1 + """ + file_path = invalid_xml_file + file_path.write_text(invalid_xml_content) + + with pytest.raises(MissingPolicyNodeError): + validate_active_scan_policy(file_path) diff --git a/tests/scanners/zap/test_setup_podman.py b/tests/scanners/zap/test_setup_podman.py index 550458ac..7aefc953 100644 --- a/tests/scanners/zap/test_setup_podman.py +++ b/tests/scanners/zap/test_setup_podman.py @@ -185,10 +185,17 @@ def test_podman_handling_plugins(test_config): assert "pluginB" in test_zap.get_update_command() shell = test_zap._handle_plugins() - assert len(shell) == 3 - assert shell[0] == "sh" - assert shell[1] == "-c" - assert re.search( - "^zap.sh .* -cmd -addonupdate -addoninstall pluginA -addoninstall pluginB; .*", - shell[2], - ) + assert_shell = [ + "zap.sh", + "-config", + "network.connection.httpProxy.enabled=false", + "-config", + "network.localServers.mainProxy.port=47691", + "-cmd", + "-addonupdate", + "-addoninstall", + "pluginA", + "-addoninstall", + "pluginB", + ] + assert shell == assert_shell diff --git a/tests/test_dump_redacted_config.py b/tests/test_dump_redacted_config.py new file mode 100644 index 00000000..0dc9c45d --- /dev/null +++ b/tests/test_dump_redacted_config.py @@ -0,0 +1,68 @@ +from unittest.mock import mock_open +from unittest.mock import patch + +import pytest +import yaml + +from rapidast import DEFAULT_CONFIG_FILE +from rapidast import dump_rapidast_redacted_configs +from rapidast import dump_redacted_config + + +@pytest.fixture +def mock_yaml_data() -> dict: + return { + "service1": {"authentication": {"parameters": {"username": "admin", "password": "secret"}}}, + "service2": {"authentication": {"parameters": {"api_key": "123456"}}}, + } + + +@patch("yaml.safe_load") +@patch("yaml.dump") +@patch("builtins.open", new_callable=mock_open) +@patch("rapidast.load_config_file") +def test_dump_redacted_config_success( + mock_load_config_file, mock_open_func, mock_yaml_dump, mock_yaml_load, mock_yaml_data: dict +) -> None: + expected_redacted_data = { + "service1": {"authentication": {"parameters": {"username": "*****", "password": "*****"}}}, + "service2": {"authentication": {"parameters": {"api_key": "*****"}}}, + } + mock_yaml_load.return_value = mock_yaml_data + success = dump_redacted_config("config.yaml", "destination_dir") + + assert success + + mock_open_func.assert_called_once_with("destination_dir/config.yaml", "w", encoding="utf-8") + mock_yaml_dump.assert_called_once_with(expected_redacted_data, mock_open_func()) + + +@patch("rapidast.load_config_file") +def test_dump_redacted_exceptions(mock_load_config_file) -> None: + for e in (FileNotFoundError, yaml.YAMLError, IOError): + mock_load_config_file.side_effect = e + success = dump_redacted_config("invalid_config.yaml", "destination_dir") + assert not success + + +@patch("os.makedirs") +@patch("os.path.exists") +@patch("rapidast.load_config_file") +def test_dump_redacted_config_creates_destination_dir(mock_load_config_file, mock_exists, mock_os_makedirs) -> None: + # Raising a FileNotFoundError to simulate the absence of the configuration file and stop the process + mock_load_config_file.side_effect = FileNotFoundError + mock_exists.return_value = False + _ = dump_redacted_config("config.yaml", "destination_dir") + + mock_os_makedirs.assert_called_with("destination_dir") + + +@patch("os.path.exists") +@patch("rapidast.dump_redacted_config") +def test_dump_rapidast_redacted_configs(mock_dump_redacted_config, mock_exists): + mock_exists.return_value = True + dump_rapidast_redacted_configs("config.yaml", "destination_dir") + + mock_exists.assert_called_once_with(DEFAULT_CONFIG_FILE) + mock_dump_redacted_config.assert_any_call(DEFAULT_CONFIG_FILE, "destination_dir") + mock_dump_redacted_config.assert_any_call("config.yaml", "destination_dir") diff --git a/tests/test_rapidast_run_scanner.py b/tests/test_rapidast_run_scanner.py new file mode 100644 index 00000000..cf05a370 --- /dev/null +++ b/tests/test_rapidast_run_scanner.py @@ -0,0 +1,61 @@ +from unittest.mock import MagicMock +from unittest.mock import patch + +import rapidast +from rapidast import scanners + + +@patch("rapidast.scanners.str_to_scanner") +def test_run_scanner_setup_failure(mock_str_to_scanner): + """ + Test that if an exception occurs during `scanner.setup`, the `run_scanner` method + catches the exception, returns 1, and updates the scanner's state to 'ERROR' + """ + + mock_config = MagicMock() + mock_args = MagicMock() + mock_scan_exporter = MagicMock() + + mock_scanner = MagicMock() + mock_str_to_scanner.return_value = lambda config, name: mock_scanner + + mock_scanner.setup.side_effect = Exception("Setup failed") + + result = rapidast.run_scanner("mock_name", mock_config, mock_args, mock_scan_exporter) + + assert result == 1 + mock_scanner.setup.assert_called_once() + assert mock_scanner.state == scanners.State.ERROR + + +@patch("rapidast.scanners.str_to_scanner") +def test_run_scanner_setup_success(mock_str_to_scanner): + """ + Test that if `scanner.setup` is successful, `run_scanner` continues as expected. + Subsequent actions are mocked to focus on ensuring `run_scanner` returns a successful + result (0) + """ + + def update_state(state): + mock_scanner.state = state + + def update_state_ready(): + update_state(scanners.State.READY) + + def update_state_processed(): + update_state(scanners.State.PROCESSED) + + mock_config = MagicMock() + mock_args = MagicMock() + mock_scan_exporter = MagicMock() + + mock_scanner = MagicMock() + mock_str_to_scanner.return_value = lambda config, name: mock_scanner + + mock_scanner.setup.side_effect = update_state_ready + mock_scanner.postprocess.side_effect = update_state_processed + + result = rapidast.run_scanner("mock_name", mock_config, mock_args, mock_scan_exporter) + + assert result == 0 + mock_scanner.setup.assert_called_once()