From 64cb1db5db435cf87cfefd69ee7e3a7347c6e75d Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 15 Nov 2024 15:35:12 +1000 Subject: [PATCH] wip: add nessus e2e test --- .tekton/integration-test.yaml | 76 +++++++- e2e-tests/conftest.py | 160 ++++++++++++++++ e2e-tests/manifests/nessus-deployment.yaml | 47 +++++ e2e-tests/manifests/nessus-service.yaml | 19 ++ .../manifests/rapidast-nessus-configmap.yaml | 51 ++++++ e2e-tests/manifests/rapidast-nessus-pod.yaml | 37 ++++ e2e-tests/test_integration.py | 172 ++---------------- e2e-tests/test_nessus.py | 30 +++ pyproject.toml | 2 + 9 files changed, 436 insertions(+), 158 deletions(-) create mode 100644 e2e-tests/conftest.py create mode 100644 e2e-tests/manifests/nessus-deployment.yaml create mode 100644 e2e-tests/manifests/nessus-service.yaml create mode 100644 e2e-tests/manifests/rapidast-nessus-configmap.yaml create mode 100644 e2e-tests/manifests/rapidast-nessus-pod.yaml create mode 100644 e2e-tests/test_nessus.py create mode 100644 pyproject.toml diff --git a/.tekton/integration-test.yaml b/.tekton/integration-test.yaml index 1fcd1d05..da51e1c1 100644 --- a/.tekton/integration-test.yaml +++ b/.tekton/integration-test.yaml @@ -43,6 +43,26 @@ spec: - name: SNAPSHOT value: $(params.SNAPSHOT) + - name: provision-eaas-space-nessus + runAfter: + - parse-metadata + taskRef: + resolver: git + params: + - name: url + value: https://github.com/konflux-ci/build-definitions.git + - name: revision + value: main + - name: pathInRepo + value: task/provision-env-with-ephemeral-namespace/0.1/provision-env-with-ephemeral-namespace.yaml + params: + - name: KONFLUXNAMESPACE + value: $(context.pipelineRun.namespace) + - name: PIPELINERUN_NAME + value: $(context.pipelineRun.name) + - name: PIPELINERUN_UID + value: $(context.pipelineRun.uid) + - name: provision-eaas-space runAfter: - parse-metadata @@ -131,5 +151,59 @@ spec: yum install -y python3.12 python3.12 -m ensurepip pip3 install -r requirements.txt -r requirements-dev.txt - pytest -s e2e-tests --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path) + pytest -s e2e-tests/test_integration.py --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path) + cat $(results.TEST_RESULTS.path) + + - name: run-e2e-tests + runAfter: + - provision-eaas-space-nessus + taskSpec: + volumes: + - name: credentials + emptyDir: {} + results: + - name: TEST_RESULTS + description: e2e test results + steps: + + # XXX not supported to use workspaces in integration tests: + # * https://issues.redhat.com/browse/STONEINTG-895 + - name: clone-repository + image: quay.io/konflux-ci/git-clone:latest + script: | + git config --global --add safe.directory /workspace + git clone "$(tasks.parse-metadata.results.source-git-url)" /workspace + pushd /workspace + git checkout "$(tasks.parse-metadata.results.source-git-revision)" + + - name: test + image: registry.redhat.io/openshift4/ose-cli:latest + env: + - name: KUBECONFIG + value: /tmp/kubeconfig + - name: KUBECONFIG_VALUE + valueFrom: + secretKeyRef: + name: $(tasks.provision-eaas-space-nessus.results.secretRef) + key: kubeconfig + - name: RAPIDAST_CLEANUP + value: "false" # namespace will be cleaned up automatically + - name: RAPIDAST_IMAGE + value: $(tasks.parse-metadata.results.component-container-image) + - name: RAPIDAST_SERVICEACCOUNT + value: namespace-manager # created by provision-env-with-ephemeral-namespace + workingDir: /workspace + volumeMounts: + - name: credentials + mountPath: /credentials + script: | + #!/bin/bash -ex + + echo "$KUBECONFIG_VALUE" > "$KUBECONFIG" + oc whoami + + yum install -y python3.12 + python3.12 -m ensurepip + pip3 install -r requirements.txt -r requirements-dev.txt + pytest -s e2e-tests/test_nessus.py --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path) cat $(results.TEST_RESULTS.path) diff --git a/e2e-tests/conftest.py b/e2e-tests/conftest.py new file mode 100644 index 00000000..551fe985 --- /dev/null +++ b/e2e-tests/conftest.py @@ -0,0 +1,160 @@ +import os +import shutil +import tempfile +import time + +import certifi +from kubernetes import client, config, utils, watch +from kubernetes.client.rest import ApiException + +NAMESPACE = os.getenv("RAPIDAST_NAMESPACE", "") # e.g. rapidast--pipeline +SERVICEACCOUNT = os.getenv("RAPIDAST_SERVICEACCOUNT", "pipeline") # name of ServiceAccount used in rapidast pod +RAPIDAST_IMAGE = os.getenv("RAPIDAST_IMAGE", "quay.io/redhatproductsecurity/rapidast:development") +# delete resources created by tests +RAPIDAST_CLEANUP = os.getenv("RAPIDAST_CLEANUP", "True").lower() in ("true", "1", "t", "y", "yes") + +MANIFESTS = "e2e-tests/manifests" + + +# monkeypatch certifi so that internal CAs are trusted +def where(): + return os.getenv("REQUESTS_CA_BUNDLE", "/etc/pki/tls/certs/ca-bundle.crt") + + +certifi.where = where + + +def wait_until_ready(**kwargs): + corev1 = client.CoreV1Api() + w = watch.Watch() + timeout = kwargs.pop("timeout", 120) + + start_time = time.time() + + while time.time() - start_time < timeout: + try: + pods = corev1.list_namespaced_pod(namespace=NAMESPACE, **kwargs) + if len(pods.items) != 1: + raise RuntimeError(f"Unexpected number of pods {len(pods.items)} matching: {kwargs}") + pod = pods.items[0] + + # Check if pod is ready by looking at conditions + if pod.status.conditions: + for condition in pod.status.conditions: + if condition.type == "Ready": + print(f"{pod.metadata.name} Ready={condition.status}") + if condition.status == "True": + return True + + time.sleep(2) + except client.ApiException as e: + print(f"Error checking pod status: {e}") + return False + + +# simulates: $ oc logs -f | tee +def tee_log(pod_name: str, filename: str): + corev1 = client.CoreV1Api() + w = watch.Watch() + with open(filename, "w", encoding="utf-8") as f: + for e in w.stream(corev1.read_namespaced_pod_log, name=pod_name, namespace=NAMESPACE): + if not isinstance(e, str): + continue # Watch.stream() can yield non-string types + f.write(e + "\n") + print(e) + + +def render_manifests(input_dir, output_dir): + shutil.copytree(input_dir, output_dir, dirs_exist_ok=True) + print(f"rendering manifests in {output_dir}") + print(f"using serviceaccount {SERVICEACCOUNT}") + # XXX should probably replace this with something like kustomize + for filepath in os.scandir(output_dir): + with open(filepath, "r", encoding="utf-8") as f: + contents = f.read() + contents = contents.replace("${IMAGE}", RAPIDAST_IMAGE) + contents = contents.replace("${SERVICEACCOUNT}", SERVICEACCOUNT) + with open(filepath, "w", encoding="utf-8") as f: + f.write(contents) + + +def setup_namespace(): + global NAMESPACE # pylint: disable=W0603 + # only try to create a namespace if env is set + if NAMESPACE == "": + NAMESPACE = get_current_namespace() + else: + create_namespace(NAMESPACE) + print(f"using namespace '{NAMESPACE}'") + + +def get_current_namespace() -> str: + try: + # Load the kubeconfig + config.load_config() + + # Get the kube config object + _, active_context = config.list_kube_config_contexts() + + # Return the namespace from current context + if active_context and "namespace" in active_context["context"]: + return active_context["context"]["namespace"] + return "default" + + except config.config_exception.ConfigException: + # If running inside a pod + try: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r", encoding="utf-8") as f: + return f.read().strip() + except FileNotFoundError: + return "default" + + +def create_namespace(namespace_name: str): + config.load_config() + corev1 = client.CoreV1Api() + try: + corev1.read_namespace(namespace_name) + print(f"namespace {namespace_name} already exists") + except ApiException as e: + if e.status == 404: + print(f"creating namespace {namespace_name}") + namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace_name)) + corev1.create_namespace(namespace) + else: + raise e + except Exception as e: # pylint: disable=W0718 + print(f"error reading namespace {namespace_name}: {e}") + + +def cleanup(): + if RAPIDAST_CLEANUP: + os.system(f"kubectl delete -f {MANIFESTS}/") + # XXX oobtukbe does not clean up after itself + os.system("kubectl delete Task/vulnerable") + + +def new_kclient(): + config.load_config() + return client.ApiClient() + + +class TestBase: + @classmethod + def setup_class(cls): + cls.tempdir = tempfile.mkdtemp() + cls.kclient = new_kclient() + render_manifests(MANIFESTS, cls.tempdir) + print(f"testing with image: {RAPIDAST_IMAGE}") + setup_namespace() + cleanup() + + @classmethod + def teardown_class(cls): + # TODO teardown should really occur after each test, so the the + # resource count does not grown until quota reached + cleanup() + + def create_from_yaml(self, path: str): + # simple wrapper to reduce repetition + utils.create_from_yaml(self.kclient, path, namespace=NAMESPACE, verbose=True) diff --git a/e2e-tests/manifests/nessus-deployment.yaml b/e2e-tests/manifests/nessus-deployment.yaml new file mode 100644 index 00000000..a7a06835 --- /dev/null +++ b/e2e-tests/manifests/nessus-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nessus + labels: + app: nessus +spec: + replicas: 1 + selector: + matchLabels: + app: nessus + template: + metadata: + labels: + app: nessus + spec: + imagePullSecrets: + - name: sfowler-nessus-pull-secret + containers: + - name: nessus + command: + - /opt/nessus/sbin/nessus-service + - --no-root + env: + - name: AUTO_UPDATE + value: "no" + image: quay.io/sfowler/nessus@sha256:5881d6928e52d6c536634aeba0bbb7d5aac2b53e77c17f725e4e5aff0054f772 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8834 + readinessProbe: + exec: + command: + - /bin/bash + - -c + - | + #!/bin/bash + + # curl -ks https://0.0.0.0:8834/server/status | python3 -c 'import sys, json; json.load(sys.stdin)["code"] == 200 or sys.exit(1)' + curl -ks https://0.0.0.0:8834/server/status | python3 -c 'import sys, json; json.load(sys.stdin)["detailed_status"]["login_status"] == "allow" or sys.exit(1)' + initialDelaySeconds: 20 + periodSeconds: 10 + failureThreshold: 32 + resources: + limits: + cpu: 2 + memory: 4Gi diff --git a/e2e-tests/manifests/nessus-service.yaml b/e2e-tests/manifests/nessus-service.yaml new file mode 100644 index 00000000..36116f21 --- /dev/null +++ b/e2e-tests/manifests/nessus-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: nessus + name: nessus +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - port: 8834 + protocol: TCP + targetPort: 8834 + selector: + app: nessus + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/manifests/rapidast-nessus-configmap.yaml b/e2e-tests/manifests/rapidast-nessus-configmap.yaml new file mode 100644 index 00000000..fc4d3a8b --- /dev/null +++ b/e2e-tests/manifests/rapidast-nessus-configmap.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +data: + config.yaml: |+ + config: + # WARNING: `configVersion` indicates the schema version of the config file. + # This value tells RapiDAST what schema should be used to read this configuration. + # Therefore you should only change it if you update the configuration to a newer schema + configVersion: 5 + + # all the results of all scanners will be stored under that location + # base_results_dir: "./results" + + # `application` contains data related to the application, not to the scans. + application: + shortName: "nessus-test-1.0" + # url: "" # XXX unused for nessus + + # `general` is a section that will be applied to all scanners. + # Any scanner can override a value by creating an entry of the same name in their own configuration + general: + # container: + # type: podman + + # remove `authentication` entirely for unauthenticated connection + authentication: + type: "oauth2_rtoken" + parameters: + client_id: "cloud-services" + token_endpoint: "" + # rtoken_from_var: "RTOKEN" # referring to a env defined in general.environ.envFile + #preauth: false # set to true to pregenerate a token, and stick to it (no refresh) + + # `scanners' is a section that configures scanning options + scanners: + nessus_foobar: + server: + # url: https://10.0.108.143:8834/ # URL of Nessus instance + url: https://nessus:8834/ # URL of Nessus instance + username_from_var: NESSUS_USER # Nessus credentials + password_from_var: NESSUS_PASSWORD + scan: + name: nessus-test # name of new scan to create + folder: nessus-tests # name of folder in to contain scan + policy: "discovery" # policy used for scan + timeout: 600 # timeout limit in seconds to complete scan + targets: + - 127.0.0.1 + +kind: ConfigMap +metadata: + name: rapidast-nessus diff --git a/e2e-tests/manifests/rapidast-nessus-pod.yaml b/e2e-tests/manifests/rapidast-nessus-pod.yaml new file mode 100644 index 00000000..5c928b1d --- /dev/null +++ b/e2e-tests/manifests/rapidast-nessus-pod.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: rapidast-nessus +spec: + containers: + - command: + - bash + - -c + - ./rapidast.py + env: + - name: HOME + value: /opt/rapidast + - name: NESSUS_USER + value: admin + - name: NESSUS_PASSWORD + value: foobar + image: ${IMAGE} # quay.io/redhatproductsecurity/rapidast:latest + imagePullPolicy: Always + name: rapidast-nessus + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 125m + memory: 256Mi + volumeMounts: + - name: config-volume + mountPath: /opt/rapidast/config + securityContext: + supplementalGroups: [1000] # "dast" group, necessary to write to /opt/rapidast/results if no PVC mounted + volumes: + - name: config-volume + configMap: + name: rapidast-nessus + restartPolicy: Never diff --git a/e2e-tests/test_integration.py b/e2e-tests/test_integration.py index f87baa94..c9047934 100644 --- a/e2e-tests/test_integration.py +++ b/e2e-tests/test_integration.py @@ -1,161 +1,19 @@ import json import os import re -import shutil -import tempfile -import certifi -import pytest -from kubernetes import client -from kubernetes import config -from kubernetes import utils -from kubernetes import watch -from kubernetes.client.rest import ApiException +from conftest import TestBase, tee_log, wait_until_ready -NAMESPACE = os.getenv("RAPIDAST_NAMESPACE", "") # e.g. rapidast--pipeline -SERVICEACCOUNT = os.getenv("RAPIDAST_SERVICEACCOUNT", "pipeline") # name of ServiceAccount used in rapidast pod -RAPIDAST_IMAGE = os.getenv("RAPIDAST_IMAGE", "quay.io/redhatproductsecurity/rapidast:latest") -# delete resources created by tests -RAPIDAST_CLEANUP = os.getenv("RAPIDAST_CLEANUP", "True").lower() in ("true", "1", "t", "y", "yes") -MANIFESTS = "e2e-tests/manifests" - - -# monkeypatch certifi so that internal CAs are trusted -def where(): - return os.getenv("REQUESTS_CA_BUNDLE", "/etc/pki/tls/certs/ca-bundle.crt") - - -certifi.where = where - - -@pytest.fixture(name="kclient") -def fixture_kclient(): - config.load_config() - yield client.ApiClient() - - -def wait_until_ready(**kwargs): - corev1 = client.CoreV1Api() - w = watch.Watch() - for event in w.stream(func=corev1.list_namespaced_pod, namespace=NAMESPACE, timeout_seconds=120, **kwargs): - if not isinstance(event, dict): # Watch.stream() can yield non-dict types - continue - print(event["object"].metadata.name, event["object"].status.phase) - if event["object"].status.phase == "Running": - return - raise RuntimeError(f"Timeout out waiting for pod matching: {kwargs}") - - -# simulates: $ oc logs -f | tee -def tee_log(pod_name: str, filename: str): - corev1 = client.CoreV1Api() - w = watch.Watch() - with open(filename, "w", encoding="utf-8") as f: - for e in w.stream(corev1.read_namespaced_pod_log, name=pod_name, namespace=NAMESPACE): - if not isinstance(e, str): - continue # Watch.stream() can yield non-string types - f.write(e + "\n") - print(e) - - -def render_manifests(input_dir, output_dir): - shutil.copytree(input_dir, output_dir, dirs_exist_ok=True) - print(f"rendering manifests in {output_dir}") - print(f"using serviceaccount {SERVICEACCOUNT}") - # XXX should probably replace this with something like kustomize - for filepath in os.scandir(output_dir): - with open(filepath, "r", encoding="utf-8") as f: - contents = f.read() - contents = contents.replace("${IMAGE}", RAPIDAST_IMAGE) - contents = contents.replace("${SERVICEACCOUNT}", SERVICEACCOUNT) - with open(filepath, "w", encoding="utf-8") as f: - f.write(contents) - - -def setup_namespace(): - global NAMESPACE # pylint: disable=W0603 - # only try to create a namespace if env is set - if NAMESPACE == "": - NAMESPACE = get_current_namespace() - else: - create_namespace(NAMESPACE) - print(f"using namespace '{NAMESPACE}'") - - -def get_current_namespace() -> str: - try: - # Load the kubeconfig - config.load_config() - - # Get the kube config object - _, active_context = config.list_kube_config_contexts() - - # Return the namespace from current context - if active_context and "namespace" in active_context["context"]: - return active_context["context"]["namespace"] - return "default" - - except config.config_exception.ConfigException: - # If running inside a pod - try: - with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r", encoding="utf-8") as f: - return f.read().strip() - except FileNotFoundError: - return "default" - - -def create_namespace(namespace_name: str): - config.load_config() - corev1 = client.CoreV1Api() - try: - corev1.read_namespace(namespace_name) - print(f"namespace {namespace_name} already exists") - except ApiException as e: - if e.status == 404: - print(f"creating namespace {namespace_name}") - namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace_name)) - corev1.create_namespace(namespace) - else: - raise e - except Exception as e: # pylint: disable=W0718 - print(f"error reading namespace {namespace_name}: {e}") - - -def cleanup(): - if RAPIDAST_CLEANUP: - os.system(f"kubectl delete -f {MANIFESTS}/") - # XXX oobtukbe does not clean up after itself - os.system("kubectl delete Task/vulnerable") - - -class TestRapiDAST: - @classmethod - def setup_class(cls): - cls.tempdir = tempfile.mkdtemp() - render_manifests(MANIFESTS, cls.tempdir) - print(f"testing with image: {RAPIDAST_IMAGE}") - setup_namespace() - cleanup() - - @classmethod - def teardown_class(cls): - # TODO teardown should really occur after each test, so the the - # resource count does not grown until quota reached - cleanup() - - def create_from_yaml(self, kclient, path: str): - # simple wrapper to reduce repetition - utils.create_from_yaml(kclient, path, namespace=NAMESPACE, verbose=True) - - def test_vapi(self, kclient): +class TestRapiDAST(TestBase): + def test_vapi(self): """Test rapidast find expected number of findings in VAPI""" - self.create_from_yaml(kclient, f"{self.tempdir}/vapi-deployment.yaml") - self.create_from_yaml(kclient, f"{self.tempdir}/vapi-service.yaml") + self.create_from_yaml(f"{self.tempdir}/vapi-deployment.yaml") + self.create_from_yaml(f"{self.tempdir}/vapi-service.yaml") wait_until_ready(label_selector="app=vapi") - self.create_from_yaml(kclient, f"{self.tempdir}/rapidast-vapi-configmap.yaml") - self.create_from_yaml(kclient, f"{self.tempdir}/rapidast-vapi-pod.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-vapi-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-vapi-pod.yaml") wait_until_ready(field_selector="metadata.name=rapidast-vapi") logfile = os.path.join(self.tempdir, "rapidast-vapi.log") @@ -171,9 +29,9 @@ def test_vapi(self, kclient): assert len(results["site"][0]["alerts"]) == 3 - def test_trivy(self, kclient): - self.create_from_yaml(kclient, f"{self.tempdir}/rapidast-trivy-configmap.yaml") - self.create_from_yaml(kclient, f"{self.tempdir}/rapidast-trivy-pod.yaml") + def test_trivy(self): + self.create_from_yaml(f"{self.tempdir}/rapidast-trivy-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-trivy-pod.yaml") wait_until_ready(field_selector="metadata.name=rapidast-trivy") logfile = os.path.join(self.tempdir, "rapidast-trivy.log") @@ -184,12 +42,12 @@ def test_trivy(self, kclient): logs = f.read() assert expected_line in logs, f"{logfile} does not contain expected line: {expected_line}" - def test_oobtkube(self, kclient): - self.create_from_yaml(kclient, f"{self.tempdir}/task-controller-deployment.yaml") + def test_oobtkube(self): + self.create_from_yaml(f"{self.tempdir}/task-controller-deployment.yaml") - self.create_from_yaml(kclient, f"{self.tempdir}/rapidast-oobtkube-configmap.yaml") - self.create_from_yaml(kclient, f"{self.tempdir}/rapidast-oobtkube-service.yaml") - self.create_from_yaml(kclient, f"{self.tempdir}/rapidast-oobtkube-pod.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-oobtkube-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-oobtkube-service.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-oobtkube-pod.yaml") wait_until_ready(field_selector="metadata.name=rapidast-oobtkube") logfile = os.path.join(self.tempdir, "rapidast-oobtkube.log") diff --git a/e2e-tests/test_nessus.py b/e2e-tests/test_nessus.py new file mode 100644 index 00000000..6e56abd3 --- /dev/null +++ b/e2e-tests/test_nessus.py @@ -0,0 +1,30 @@ +import json +import os +import re + +from conftest import TestBase, tee_log, wait_until_ready + + +class TestNessus(TestBase): + def test_nessus(self): + """Test rapidast find expected number of findings in VAPI""" + self.create_from_yaml(f"{self.tempdir}/nessus-deployment.yaml") + self.create_from_yaml(f"{self.tempdir}/nessus-service.yaml") + wait_until_ready(label_selector="app=nessus", timeout=300) # nessus is slow to pull and start + + self.create_from_yaml(f"{self.tempdir}/rapidast-nessus-configmap.yaml") + self.create_from_yaml(f"{self.tempdir}/rapidast-nessus-pod.yaml") + wait_until_ready(field_selector="metadata.name=rapidast-nessus") + + logfile = os.path.join(self.tempdir, "rapidast-nessus.log") + tee_log("rapidast-nessus", logfile) + + # # XXX relies on rapidast-vapi pod cat-ing the result json file after execution + # with open(logfile, "r", encoding="utf-8") as f: + # logs = f.read() + # pattern = r"^{\s*$.*$" + # matches = re.findall(pattern, logs, re.MULTILINE | re.DOTALL) + # assert matches, f"{logfile} did not contain expected json results" + # results = json.loads(matches[0]) + + # assert len(results["site"][0]["alerts"]) == 3 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..6dbd43fb --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.ruff] +line-length = 120