diff --git a/docker/celery/entrypoint.sh b/docker/celery/entrypoint.sh index 2aad31b78..68cc60f71 100755 --- a/docker/celery/entrypoint.sh +++ b/docker/celery/entrypoint.sh @@ -43,7 +43,7 @@ watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/r watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=50 --loglevel=$CELERY_LOGLEVEL -Q run_command_queue -n run_command_worker & watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=10 --loglevel=$CELERY_LOGLEVEL -Q query_reverse_whois_queue -n query_reverse_whois_worker & watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=10 --loglevel=$CELERY_LOGLEVEL -Q query_ip_history_queue -n query_ip_history_worker & -watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=30 --loglevel=$CELERY_LOGLEVEL -Q gpt_queue -n gpt_worker & +watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=30 --loglevel=$CELERY_LOGLEVEL -Q llm_queue -n llm_worker & watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=10 --loglevel=$CELERY_LOGLEVEL -Q dorking_queue -n dorking_worker & watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=10 --loglevel=$CELERY_LOGLEVEL -Q osint_discovery_queue -n osint_discovery_worker & watchmedo auto-restart --recursive --pattern="*.py" --directory="/home/rengine/rengine/" -- poetry run -C $HOME/ celery -A reNgine.tasks worker --pool=gevent --concurrency=10 --loglevel=$CELERY_LOGLEVEL -Q h8mail_queue -n h8mail_worker & diff --git a/docker/celery/pyproject.toml b/docker/celery/pyproject.toml index 1c3b78066..8ccc56cdd 100644 --- a/docker/celery/pyproject.toml +++ b/docker/celery/pyproject.toml @@ -11,6 +11,8 @@ aiodns = "3.0.0" argh = "0.26.2" beautifulsoup4 = "4.9.3" celery = "5.4.0" +channels = "3.0.5" +channels-redis = "3.4.1" debugpy = "1.8.5" discord-webhook = "1.3.0" django = "3.2.25" diff --git a/docker/proxy/config/rengine.conf b/docker/proxy/config/rengine.conf index 263c0565a..4a29f386f 100644 --- a/docker/proxy/config/rengine.conf +++ b/docker/proxy/config/rengine.conf @@ -33,6 +33,29 @@ server { alias /home/rengine/rengine/staticfiles/; } + + # WebSocket support + location /ws/ { + proxy_pass http://web:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 86400; # 24 hours + } + + location /api/tool/ollama/ { + proxy_pass http://web:8000; + proxy_http_version 1.1; + proxy_set_header Connection ''; + proxy_buffering off; + proxy_cache off; + proxy_read_timeout 600s; # 10 minutes timeout + } + location /media/ { alias /home/rengine/scan_results/; } diff --git a/docker/web/entrypoint.sh b/docker/web/entrypoint.sh index 8721143fc..0d196fe7a 100755 --- a/docker/web/entrypoint.sh +++ b/docker/web/entrypoint.sh @@ -1,9 +1,29 @@ #!/bin/bash -# Collect static files +print_msg() { + printf "\r\n" + printf "========================================\r\n" + printf "$1\r\n" + printf "========================================\r\n\r\n" +} + +print_msg "Generate Django migrations files" +poetry run -C $HOME/ python3 manage.py makemigrations + +print_msg "Migrate database" +poetry run -C $HOME/ python3 manage.py migrate + +print_msg "Collect static files" poetry run -C $HOME/ python3 manage.py collectstatic --noinput -# Run production server -poetry run -C $HOME/ gunicorn reNgine.wsgi:application -w 8 --bind 0.0.0.0:8000 --limit-request-line 0 +print_msg "Starting ASGI server with Uvicorn" +poetry run -C $HOME/ uvicorn reNgine.asgi:application \ + --host 0.0.0.0 \ + --port 8000 \ + --workers 4 \ + --log-level info \ + --ws-ping-interval 20 \ + --ws-ping-timeout 30 \ + --timeout-keep-alive 65 exec "$@" \ No newline at end of file diff --git a/docker/web/pyproject.toml b/docker/web/pyproject.toml index 4b78c33c3..0ead65222 100644 --- a/docker/web/pyproject.toml +++ b/docker/web/pyproject.toml @@ -11,6 +11,8 @@ aiodns = "3.0.0" argh = "0.26.2" beautifulsoup4 = "4.9.3" celery = "5.4.0" +channels = "3.0.5" +channels-redis = "3.4.1" debugpy = "1.8.5" discord-webhook = "1.3.0" django = "3.2.25" @@ -45,6 +47,7 @@ requests = "2.32.2" scapy = "2.4.3" tldextract = "3.5.0" uro = "1.0.0" +uvicorn = { extras = ["standard"], version = "^0.27.1" } validators = "0.18.2" watchdog = "4.0.0" weasyprint = "53.3" diff --git a/web/api/consumers.py b/web/api/consumers.py new file mode 100644 index 000000000..e6ffd1cc9 --- /dev/null +++ b/web/api/consumers.py @@ -0,0 +1,69 @@ +from channels.generic.websocket import WebsocketConsumer +from asgiref.sync import async_to_sync +import json +import re +import logging + +logger = logging.getLogger(__name__) + +class OllamaDownloadConsumer(WebsocketConsumer): + def clean_channel_name(self, name): + """Clean channel name to only contain valid characters""" + return re.sub(r'[^a-zA-Z0-9\-\.]', '-', name) + + def connect(self): + try: + logger.info(f"WebSocket connection attempt with scope: {self.scope}") + self.model_name = self.scope['url_route']['kwargs']['model_name'] + self.room_group_name = f"ollama-download-{self.clean_channel_name(self.model_name)}" + + logger.info(f"Joining group: {self.room_group_name}") + + # Join room group + async_to_sync(self.channel_layer.group_add)( + self.room_group_name, + self.channel_name + ) + + logger.info("WebSocket connection accepted") + self.accept() + + except Exception as e: + logger.error(f"Error in WebSocket connect: {e}") + raise + + def disconnect(self, close_code): + try: + logger.info(f"WebSocket disconnecting with code: {close_code}") + # Leave room group + async_to_sync(self.channel_layer.group_discard)( + self.room_group_name, + self.channel_name + ) + except Exception as e: + logger.error(f"Error in WebSocket disconnect: {e}") + + def receive(self, text_data): + try: + logger.info(f"WebSocket received data: {text_data}") + text_data_json = json.loads(text_data) + message = text_data_json['message'] + + # Send message to room group + async_to_sync(self.channel_layer.group_send)( + self.room_group_name, + { + 'type': 'download_progress', + 'message': message + } + ) + except Exception as e: + logger.error(f"Error in WebSocket receive: {e}") + + def download_progress(self, event): + try: + message = event['message'] + # Send message to WebSocket + self.send(text_data=json.dumps(message)) + except Exception as e: + logger.error(f"Error in download_progress: {e}") \ No newline at end of file diff --git a/web/api/tests/test_project.py b/web/api/tests/test_project.py index 3522fcadb..276a022dc 100644 --- a/web/api/tests/test_project.py +++ b/web/api/tests/test_project.py @@ -2,7 +2,6 @@ This file contains the test cases for the API views. """ -from unittest.mock import patch from django.utils import timezone from django.urls import reverse from rest_framework import status @@ -12,7 +11,6 @@ 'TestCreateProjectApi', 'TestAddReconNote', 'TestListTodoNotes', - 'TestGPTAttackSuggestion' ] class TestCreateProjectApi(BaseTestCase): @@ -108,24 +106,3 @@ def test_list_todo_notes(self): self.data_generator.todo_note.scan_history.id, ) -class TestGPTAttackSuggestion(BaseTestCase): - """Tests for the GPT Attack Suggestion API.""" - - def setUp(self): - super().setUp() - self.data_generator.create_project_base() - - @patch("reNgine.gpt.GPTAttackSuggestionGenerator.get_attack_suggestion") - def test_get_attack_suggestion(self, mock_get_suggestion): - """Test getting an attack suggestion for a subdomain.""" - mock_get_suggestion.return_value = { - "status": True, - "description": "Test attack suggestion", - } - api_url = reverse("api:gpt_get_possible_attacks") - response = self.client.get( - api_url, {"subdomain_id": self.data_generator.subdomain.id} - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data["status"]) - self.assertEqual(response.data["description"], "Test attack suggestion") diff --git a/web/api/tests/test_tools.py b/web/api/tests/test_tools.py index 60bef6a10..5c688e99a 100644 --- a/web/api/tests/test_tools.py +++ b/web/api/tests/test_tools.py @@ -7,6 +7,8 @@ from rest_framework import status from startScan.models import SubScan from utils.test_base import BaseTestCase +from reNgine.llm import config +from dashboard.models import OllamaSettings __all__ = [ 'TestOllamaManager', @@ -25,6 +27,15 @@ class TestOllamaManager(BaseTestCase): """Tests for the OllamaManager API endpoints.""" + def setUp(self): + """Set up test environment.""" + super().setUp() + self.ollama_settings = OllamaSettings.objects.create( + id=1, + selected_model="llama2", + use_ollama=True + ) + @patch("requests.post") def test_get_download_model(self, mock_post): """Test downloading an Ollama model.""" @@ -34,35 +45,44 @@ def test_get_download_model(self, mock_post): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(response.data["status"]) - @patch("requests.post") - def test_get_download_model_failure(self, mock_post): - """Test failed downloading of an Ollama model.""" - mock_post.return_value.json.return_value = {"error": "pull model manifest: file does not exist"} - api_url = reverse("api:ollama_manager") - response = self.client.get(api_url, data={"model": "invalid-model"}) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(response.data["message"], "pull model manifest: file does not exist") - self.assertFalse(response.data["status"]) - @patch("requests.delete") - def test_delete_model(self, mock_delete): + @patch("requests.get") + def test_delete_model(self, mock_get, mock_delete): """Test deleting an Ollama model.""" - mock_delete.return_value.json.return_value = {"status": "success"} - api_url = reverse("api:ollama_manager") - response = self.client.delete( - api_url, data={"model": "gpt-4"}, content_type="application/json" - ) + mock_get.return_value.json.return_value = { + "models": [{"name": "llama2"}] + } + mock_delete.return_value.status_code = 200 + + model_name = "llama2" + api_url = reverse("api:ollama_detail_manager", kwargs={"model_name": model_name}) + + response = self.client.delete(api_url) + self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(response.data["status"]) + mock_delete.assert_called_once_with( + f"{config.OLLAMA_INSTANCE}/api/delete", + json={"name": model_name} + ) - def test_put_update_model(self): + @patch("requests.get") + def test_put_update_model(self, mock_get): """Test updating the selected Ollama model.""" - api_url = reverse("api:ollama_manager") - response = self.client.put( - api_url, data={"model": "gpt-4"}, content_type="application/json" - ) + mock_get.return_value.json.return_value = { + "models": [{"name": "gpt-4"}] + } + + model_name = "gpt-4" + api_url = reverse("api:ollama_detail_manager", kwargs={"model_name": model_name}) + + response = self.client.put(api_url) + self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(response.data["status"]) + + updated_settings = OllamaSettings.objects.get(id=1) + self.assertEqual(updated_settings.selected_model, model_name) class TestWafDetector(BaseTestCase): """Tests for the WAF Detector API.""" diff --git a/web/api/tests/test_vulnerability.py b/web/api/tests/test_vulnerability.py index c3cf416a9..52acd348b 100644 --- a/web/api/tests/test_vulnerability.py +++ b/web/api/tests/test_vulnerability.py @@ -10,7 +10,7 @@ __all__ = [ 'TestVulnerabilityViewSet', - 'TestGPTVulnerabilityReportGenerator', + 'TestLLMVulnerabilityReportGenerator', 'TestDeleteVulnerability', 'TestVulnerabilityReport', 'TestFetchMostCommonVulnerability', @@ -79,8 +79,8 @@ def test_list_vulnerabilities_by_severity(self): self.data_generator.vulnerabilities[0].name, ) -class TestGPTVulnerabilityReportGenerator(BaseTestCase): - """Tests for the GPT Vulnerability Report Generator API.""" +class TestLLMVulnerabilityReportGenerator(BaseTestCase): + """Tests for the LLM Vulnerability Report Generator API.""" def setUp(self): super().setUp() @@ -88,7 +88,7 @@ def setUp(self): self.data_generator.create_endpoint() self.data_generator.create_vulnerability() - @patch("reNgine.tasks.gpt_vulnerability_description.apply_async") + @patch("reNgine.tasks.llm_vulnerability_report.apply_async") def test_get_vulnerability_report(self, mock_apply_async): """Test generating a vulnerability report.""" mock_task = MagicMock() @@ -97,7 +97,7 @@ def test_get_vulnerability_report(self, mock_apply_async): "description": "Test vulnerability report", } mock_apply_async.return_value = mock_task - api_url = reverse("api:gpt_vulnerability_report_generator") + api_url = reverse("api:llm_vulnerability_report_generator") response = self.client.get( api_url, {"id": self.data_generator.vulnerabilities[0].id} ) diff --git a/web/api/urls.py b/web/api/urls.py index ec9e1e108..d5ecb164b 100644 --- a/web/api/urls.py +++ b/web/api/urls.py @@ -159,13 +159,21 @@ GfList.as_view(), name='gf_list'), path( - 'tools/gpt_vulnerability_report/', - GPTVulnerabilityReportGenerator.as_view(), - name='gpt_vulnerability_report_generator'), + 'tools/llm_vulnerability_report/', + LLMVulnerabilityReportGenerator.as_view(), + name='llm_vulnerability_report_generator'), path( - 'tools/gpt_get_possible_attacks/', - GPTAttackSuggestion.as_view(), - name='gpt_get_possible_attacks'), + 'tools/llm_get_possible_attacks/', + LLMAttackSuggestion.as_view(), + name='llm_get_possible_attacks'), + path( + 'tools/llm_models/', + LLMModelsManager.as_view(), + name='llm_models_manager'), + path( + 'tools/available_ollama_models/', + AvailableOllamaModels.as_view(), + name='available_ollama_models'), path( 'github/tool/get_latest_releases/', GithubToolCheckGetLatestRelease.as_view(), @@ -186,6 +194,10 @@ 'tool/ollama/', OllamaManager.as_view(), name='ollama_manager'), + path( + 'tool/ollama//', + OllamaDetailManager.as_view(), + name='ollama_detail_manager'), path( 'rengine/update/', RengineUpdateCheck.as_view(), diff --git a/web/api/views.py b/web/api/views.py index 66fe3a8f8..cbd0a41e0 100644 --- a/web/api/views.py +++ b/web/api/views.py @@ -5,10 +5,13 @@ import socket from ipaddress import IPv4Network from collections import defaultdict +from datetime import datetime +import json import requests import validators from django.urls import reverse +from django.core.cache import cache from dashboard.models import OllamaSettings, Project, SearchHistory from django.db.models import CharField, Count, F, Q, Value from django.shortcuts import get_object_or_404 @@ -22,223 +25,486 @@ from rest_framework.views import APIView from rest_framework.status import HTTP_400_BAD_REQUEST from rest_framework.parsers import JSONParser +from rest_framework.decorators import api_view from recon_note.models import TodoNote from reNgine.celery import app from reNgine.common_func import ( - get_data_from_post_request, - get_interesting_endpoints, - get_interesting_subdomains, - get_lookup_keywords, - safe_int_cast + get_data_from_post_request, + get_interesting_endpoints, + get_interesting_subdomains, + get_lookup_keywords, + safe_int_cast, + get_open_ai_key, ) from reNgine.definitions import ( - ABORTED_TASK, - OLLAMA_INSTANCE, - NUCLEI_SEVERITY_MAP, - DEFAULT_GPT_MODELS, - RUNNING_TASK, - SUCCESS_TASK + ABORTED_TASK, + NUCLEI_SEVERITY_MAP, + RUNNING_TASK, + SUCCESS_TASK +) +from reNgine.llm.config import ( + OLLAMA_INSTANCE, + DEFAULT_GPT_MODELS, + MODEL_REQUIREMENTS, + RECOMMENDED_MODELS ) from reNgine.settings import ( - RENGINE_CURRENT_VERSION, - RENGINE_TOOL_GITHUB_PATH + RENGINE_CURRENT_VERSION, + RENGINE_TOOL_GITHUB_PATH ) from reNgine.tasks import ( - create_scan_activity, - gpt_vulnerability_description, - initiate_subscan, - query_ip_history, - query_reverse_whois, - query_whois, - run_cmseek, - run_command, - run_gf_list, - run_wafw00f, - send_hackerone_report + create_scan_activity, + llm_vulnerability_report, + initiate_subscan, + query_ip_history, + query_reverse_whois, + query_whois, + run_cmseek, + run_command, + run_gf_list, + run_wafw00f, + send_hackerone_report ) -from reNgine.gpt import GPTAttackSuggestionGenerator +from reNgine.llm.llm import LLMAttackSuggestionGenerator +from reNgine.llm.utils import convert_markdown_to_html from reNgine.utilities import is_safe_path, remove_lead_and_trail_slash from scanEngine.models import EngineType, InstalledExternalTool from startScan.models import ( - Command, - DirectoryFile, - DirectoryScan, - Dork, - Email, - Employee, - EndPoint, - IpAddress, - MetaFinderDocument, - Port, - ScanActivity, - ScanHistory, - Subdomain, - SubScan, - Technology, - Vulnerability, + Command, + DirectoryFile, + DirectoryScan, + Dork, + Email, + Employee, + EndPoint, + IpAddress, + MetaFinderDocument, + Port, + ScanActivity, + ScanHistory, + Subdomain, + SubScan, + Technology, + Vulnerability, ) from targetApp.models import Domain, Organization from .serializers import ( - CommandSerializer, - DirectoryFileSerializer, - DirectoryScanSerializer, - DomainSerializer, - DorkCountSerializer, - DorkSerializer, - EmailSerializer, - EmployeeSerializer, - EndpointOnlyURLsSerializer, - EndpointSerializer, - EndPointChangesSerializer, - EngineSerializer, - InterestingEndPointSerializer, - InterestingSubdomainSerializer, - IpSerializer, - IpSubdomainSerializer, - MetafinderDocumentSerializer, - MetafinderUserSerializer, - OnlySubdomainNameSerializer, - OrganizationSerializer, - OrganizationTargetsSerializer, - PortSerializer, - ProjectSerializer, - ReconNoteSerializer, - ScanHistorySerializer, - SearchHistorySerializer, - SubdomainChangesSerializer, - SubdomainSerializer, - SubScanResultSerializer, - SubScanSerializer, - TechnologyCountSerializer, - VisualiseDataSerializer, - VulnerabilitySerializer + CommandSerializer, + DirectoryFileSerializer, + DirectoryScanSerializer, + DomainSerializer, + DorkCountSerializer, + DorkSerializer, + EmailSerializer, + EmployeeSerializer, + EndpointOnlyURLsSerializer, + EndpointSerializer, + EndPointChangesSerializer, + EngineSerializer, + InterestingEndPointSerializer, + InterestingSubdomainSerializer, + IpSerializer, + IpSubdomainSerializer, + MetafinderDocumentSerializer, + MetafinderUserSerializer, + OnlySubdomainNameSerializer, + OrganizationSerializer, + OrganizationTargetsSerializer, + PortSerializer, + ProjectSerializer, + ReconNoteSerializer, + ScanHistorySerializer, + SearchHistorySerializer, + SubdomainChangesSerializer, + SubdomainSerializer, + SubScanResultSerializer, + SubScanSerializer, + TechnologyCountSerializer, + VisualiseDataSerializer, + VulnerabilitySerializer ) +from channels.layers import get_channel_layer +from asgiref.sync import async_to_sync +import threading + logger = logging.getLogger(__name__) class OllamaManager(APIView): + def clean_channel_name(self, name): + """Clean channel name to only contain valid characters""" + # Replace any non-alphanumeric characters with hyphens + clean_name = re.sub(r'[^a-zA-Z0-9\-\.]', '-', name) + return clean_name + def get(self, request): model_name = request.query_params.get('model') if not model_name: - return Response({'status': False, 'message': 'Model name is required'}, status=400) + return Response({'status': False, 'message': 'Model name is required'}) try: - pull_model_api = f'{OLLAMA_INSTANCE}/api/pull' - _response = requests.post( - pull_model_api, - json={'name': model_name, 'stream': False} - ).json() - if _response.get('error'): - return Response({'status': False, 'message': _response.get('error')}, status=400) - return Response({'status': True}) + # Create safe channel name + channel_name = f"ollama-download-{self.clean_channel_name(model_name)}" + channel_layer = get_channel_layer() + + def download_task(): + response = None + session = None + try: + session = requests.Session() + + # Send initial progress + async_to_sync(channel_layer.group_send)( + channel_name, + { + 'type': 'download_progress', + 'message': { + 'status': 'downloading', + 'progress': 0, + 'total': 100, + 'message': 'Starting download...' + } + } + ) + + response = session.post( + f'{OLLAMA_INSTANCE}/api/pull', + json={'name': model_name, 'stream': True}, + stream=True + ) + + for line in response.iter_lines(): + if line: + try: + data = json.loads(line.decode('utf-8')) + logger.debug(f"Ollama response: {data}") + + if 'error' in data: + async_to_sync(channel_layer.group_send)( + channel_name, + { + 'type': 'download_progress', + 'message': { + 'status': 'error', + 'error': data['error'] + } + } + ) + break + + status_data = { + 'status': 'downloading', + 'progress': data.get('completed', 0), + 'total': data.get('total', 100), + 'message': data.get('status', 'Downloading...') + } + + async_to_sync(channel_layer.group_send)( + channel_name, + { + 'type': 'download_progress', + 'message': status_data + } + ) + + if data.get('status') == 'success': + async_to_sync(channel_layer.group_send)( + channel_name, + { + 'type': 'download_progress', + 'message': { + 'status': 'complete', + 'message': 'Download complete!' + } + } + ) + break + + except json.JSONDecodeError as e: + logger.error(f"JSON decode error: {e}") + async_to_sync(channel_layer.group_send)( + channel_name, + { + 'type': 'download_progress', + 'message': { + 'status': 'error', + 'error': 'Invalid response format' + } + } + ) + break + + except Exception as e: + logger.error(f"Download error: {e}") + try: + async_to_sync(channel_layer.group_send)( + channel_name, + { + 'type': 'download_progress', + 'message': { + 'status': 'error', + 'error': str(e) + } + } + ) + except Exception as e2: + logger.error(f"Error sending error message: {e2}") + finally: + if response: + response.close() + if session: + session.close() + + thread = threading.Thread(target=download_task) + thread.daemon = True + thread.start() + + return Response({ + 'status': True, + 'channel': channel_name, + 'message': 'Download started' + }) + except Exception as e: - logger.error(f"Error in OllamaManager GET: {str(e)}") - return Response({'status': False, 'message': 'An error occurred while pulling the model.'}, status=500) - - def delete(self, request): - model_name = get_data_from_post_request(request, 'model') + logger.error(f"Error in OllamaManager: {e}") + return Response({ + 'status': False, + 'error': str(e) + }, status=500) + +class OllamaDetailManager(APIView): + def delete(self, request, model_name): if not model_name: return Response({'status': False, 'message': 'Model name is required'}, status=400) try: delete_model_api = f'{OLLAMA_INSTANCE}/api/delete' - _response = requests.delete( + response = requests.delete( delete_model_api, json={'name': model_name} - ).json() - if _response.get('error'): - return Response({'status': False, 'message': _response.get('error')}, status=400) - return Response({'status': True}) + ) + + # Ollama sends a 200 status code on success + if response.status_code == 200: + return Response({'status': True}) + + # Try to parse the JSON response if it exists + try: + error_data = response.json() + error_message = error_data.get('error', 'Unknown error occurred') + except ValueError: + error_message = response.text or 'Unknown error occurred' + + return Response( + {'status': False, 'message': error_message}, + status=response.status_code + ) + except Exception as e: - logger.error(f"Error in OllamaManager DELETE: {str(e)}") - return Response({'status': False, 'message': 'An error occurred while deleting the model.'}, status=500) + logger.error(f"Error in OllamaDetailManager DELETE: {str(e)}") + return Response( + {'status': False, 'message': 'An error occurred while deleting the model.'}, + status=500 + ) - def put(self, request): - model_name = request.data.get('model') + def put(self, request, model_name): if not model_name: return Response({'status': False, 'message': 'Model name is required'}, status=400) - use_ollama = all(model['name'] != model_name for model in DEFAULT_GPT_MODELS) - try: + use_ollama = all(model['name'] != model_name for model in DEFAULT_GPT_MODELS) + OllamaSettings.objects.update_or_create( id=1, defaults={ 'selected_model': model_name, - 'use_ollama': use_ollama, - 'selected': True + 'use_ollama': use_ollama } ) - return Response({'status': True}) + return Response({ + 'status': True, + 'message': 'Model selected successfully' + }) + except Exception as e: + logger.error(f"Error in OllamaDetailManager PUT: {str(e)}") + return Response({ + 'status': False, + 'message': 'An error occurred while updating the model selection.' + }, status=500) + +class AvailableOllamaModels(APIView): + def get(self, request): + try: + cache_key = 'ollama_available_models' + cached_data = cache.get(cache_key) + + if cached_data: + return Response(cached_data) + + # Use recommended models from config + recommended_models = list(RECOMMENDED_MODELS.values()) + + # Check installed models + try: + response = requests.get(f'{OLLAMA_INSTANCE}/api/tags', timeout=5) + if response.status_code == 200: + installed_models = { + model['name']: model + for model in response.json().get('models', []) + } + + # Mark installed models and add their details + for model in recommended_models: + base_name = model['name'] + model['installed_versions'] = [ + name.replace(f"{base_name}:", "") + for name in installed_models.keys() + if name.startswith(base_name) + ] + model['installed'] = len(model['installed_versions']) > 0 + + # Add capabilities from MODEL_REQUIREMENTS if available + if base_name in MODEL_REQUIREMENTS: + model['capabilities'] = MODEL_REQUIREMENTS[base_name] + else: + logger.warning(f"Ollama API returned status {response.status_code}") + for model in recommended_models: + model['installed'] = False + model['installed_versions'] = [] + except requests.exceptions.RequestException as e: + logger.error(f"Error connecting to Ollama API: {str(e)}") + for model in recommended_models: + model['installed'] = False + model['installed_versions'] = [] + + response_data = { + 'status': True, + 'models': recommended_models + } + + cache.set(cache_key, response_data, 300) + return Response(response_data) + + except Exception as e: + logger.error(f"Error in AvailableOllamaModels: {str(e)}") + return Response({ + 'status': False, + 'error': str(e) + }, status=500) + +class LLMAttackSuggestion(APIView): + def get(self, request): + req = request + subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) + force_regenerate = req.query_params.get('force_regenerate') == 'true' + check_only = req.query_params.get('check_only') == 'true' + selected_model = req.query_params.get('llm_model') # Get selected model from request + + if not subdomain_id: + return Response({ + 'status': False, + 'error': 'Missing GET param Subdomain `subdomain_id`' + }) + + try: + subdomain = Subdomain.objects.get(id=subdomain_id) + except Subdomain.DoesNotExist: + return Response({ + 'status': False, + 'error': f'Subdomain not found with id {subdomain_id}' + }) + + # Return cached result only if not forcing regeneration + if subdomain.attack_surface and not force_regenerate: + sanitized_html = subdomain.formatted_attack_surface + return Response({ + 'status': True, + 'subdomain_name': subdomain.name, + 'description': sanitized_html, + 'cached': True + }) + + # If check_only, return without generating new analysis + if check_only: + return Response({ + 'status': True, + 'subdomain_name': subdomain.name, + 'description': None + }) + + # Generate new analysis + ip_addrs = subdomain.ip_addresses.all() + open_ports = ', '.join(f'{port.number}/{port.service_name}' for ip in ip_addrs for port in ip.ports.all()) + tech_used = ', '.join(tech.name for tech in subdomain.technologies.all()) + + input_data = f''' + Subdomain Name: {subdomain.name} + Subdomain Page Title: {subdomain.page_title} + Open Ports: {open_ports} + HTTP Status: {subdomain.http_status} + Technologies Used: {tech_used} + Content type: {subdomain.content_type} + Web Server: {subdomain.webserver} + Page Content Length: {subdomain.content_length} + ''' + + llm = LLMAttackSuggestionGenerator() + response = llm.get_attack_suggestion(input_data, selected_model) # Pass selected model to generator + response['subdomain_name'] = subdomain.name + + if response.get('status'): + # Use the actual selected model name + markdown_content = f'[LLM:{selected_model}]\n{response.get("description")}' + subdomain.attack_surface = markdown_content + subdomain.save() + + response['description'] = convert_markdown_to_html(markdown_content) + + return Response(response) + + def delete(self, request): + subdomain_id = request.query_params.get('subdomain_id') + if not subdomain_id: + return Response({ + 'status': False, + 'error': 'Missing subdomain_id parameter' + }, status=400) + + try: + subdomain = Subdomain.objects.get(id=subdomain_id) + subdomain.attack_surface = None + subdomain.save() + return Response({ + 'status': True, + 'message': 'Attack surface analysis deleted successfully' + }) + except Subdomain.DoesNotExist: + return Response({ + 'status': False, + 'error': f'Subdomain not found with id {subdomain_id}' + }, status=404) except Exception as e: - logger.error(f"Error in OllamaManager PUT: {str(e)}") - return Response({'status': False, 'message': 'An error occurred while updating Ollama settings.'}, status=500) - -class GPTAttackSuggestion(APIView): - def get(self, request): - req = self.request - subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) - if not subdomain_id: - return Response({ - 'status': False, - 'error': 'Missing GET param Subdomain `subdomain_id`' - }) - try: - subdomain = Subdomain.objects.get(id=subdomain_id) - except Subdomain.DoesNotExist: - return Response({ - 'status': False, - 'error': f'Subdomain not found with id {subdomain_id}' - }) - - if subdomain.attack_surface: - return Response({ - 'status': True, - 'subdomain_name': subdomain.name, - 'description': subdomain.attack_surface - }) - - ip_addrs = subdomain.ip_addresses.all() - open_ports = ', '.join(f'{port.number}/{port.service_name}' for ip in ip_addrs for port in ip.ports.all()) - tech_used = ', '.join(tech.name for tech in subdomain.technologies.all()) - - input_data = f''' - Subdomain Name: {subdomain.name} - Subdomain Page Title: {subdomain.page_title} - Open Ports: {open_ports} - HTTP Status: {subdomain.http_status} - Technologies Used: {tech_used} - Content type: {subdomain.content_type} - Web Server: {subdomain.webserver} - Page Content Length: {subdomain.content_length} - ''' - - gpt = GPTAttackSuggestionGenerator() - response = gpt.get_attack_suggestion(input_data) - response['subdomain_name'] = subdomain.name - - if response.get('status'): - subdomain.attack_surface = response.get('description') - subdomain.save() - - return Response(response) - - -class GPTVulnerabilityReportGenerator(APIView): - def get(self, request): - req = self.request - vulnerability_id = safe_int_cast(req.query_params.get('id')) - if not vulnerability_id: - return Response({ - 'status': False, - 'error': 'Missing GET param Vulnerability `id`' - }) - task = gpt_vulnerability_description.apply_async(args=(vulnerability_id,)) - response = task.wait() - return Response(response) + logger.error(f"Error deleting attack surface analysis: {str(e)}") + return Response({ + 'status': False, + 'error': 'An error occurred while deleting the analysis' + }, status=500) + +class LLMVulnerabilityReportGenerator(APIView): + def get(self, request): + req = self.request + vulnerability_id = safe_int_cast(req.query_params.get('id')) + if not vulnerability_id: + return Response({ + 'status': False, + 'error': 'Missing GET param Vulnerability `id`' + }) + task = llm_vulnerability_report.apply_async(args=(vulnerability_id,)) + response = task.wait() + return Response(response) class CreateProjectApi(APIView): @@ -259,164 +525,164 @@ def get(self, request): return Response({'status': False, 'message': 'Failed to create project.'}, status=HTTP_400_BAD_REQUEST) class QueryInterestingSubdomains(APIView): - def get(self, request): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - domain_id = safe_int_cast(req.query_params.get('target_id')) + def get(self, request): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + domain_id = safe_int_cast(req.query_params.get('target_id')) - if scan_id: - queryset = get_interesting_subdomains(scan_history=scan_id) - elif domain_id: - queryset = get_interesting_subdomains(domain_id=domain_id) - else: - queryset = get_interesting_subdomains() + if scan_id: + queryset = get_interesting_subdomains(scan_history=scan_id) + elif domain_id: + queryset = get_interesting_subdomains(domain_id=domain_id) + else: + queryset = get_interesting_subdomains() - queryset = queryset.distinct('name') + queryset = queryset.distinct('name') - return Response(InterestingSubdomainSerializer(queryset, many=True).data) + return Response(InterestingSubdomainSerializer(queryset, many=True).data) class ListTargetsDatatableViewSet(viewsets.ModelViewSet): - queryset = Domain.objects.all() - serializer_class = DomainSerializer - - def get_queryset(self): - slug = self.request.GET.get('slug', None) - if slug: - self.queryset = self.queryset.filter(project__slug=slug) - return self.queryset - - def filter_queryset(self, qs): - qs = self.queryset.filter() - search_value = self.request.GET.get(u'search[value]', None) - _order_col = self.request.GET.get(u'order[0][column]', None) - _order_direction = self.request.GET.get(u'order[0][dir]', None) - if search_value or _order_col or _order_direction: - order_col = 'id' - if _order_col == '2': - order_col = 'name' - elif _order_col == '4': - order_col = 'insert_date' - elif _order_col == '5': - order_col = 'start_scan_date' - if _order_direction == 'desc': - return qs.order_by(F('start_scan_date').desc(nulls_last=True)) - return qs.order_by(F('start_scan_date').asc(nulls_last=True)) - - - if _order_direction == 'desc': - order_col = f'-{order_col}' - - qs = self.queryset.filter( - Q(name__icontains=search_value) | - Q(description__icontains=search_value) | - Q(domains__name__icontains=search_value) - ) - return qs.order_by(order_col) - - return qs.order_by('-id') + queryset = Domain.objects.all() + serializer_class = DomainSerializer + def get_queryset(self): + slug = self.request.GET.get('slug', None) + if slug: + self.queryset = self.queryset.filter(project__slug=slug) + return self.queryset + + def filter_queryset(self, qs): + qs = self.queryset.filter() + search_value = self.request.GET.get(u'search[value]', None) + _order_col = self.request.GET.get(u'order[0][column]', None) + _order_direction = self.request.GET.get(u'order[0][dir]', None) + if search_value or _order_col or _order_direction: + order_col = 'id' + if _order_col == '2': + order_col = 'name' + elif _order_col == '4': + order_col = 'insert_date' + elif _order_col == '5': + order_col = 'start_scan_date' + if _order_direction == 'desc': + return qs.order_by(F('start_scan_date').desc(nulls_last=True)) + return qs.order_by(F('start_scan_date').asc(nulls_last=True)) + + + if _order_direction == 'desc': + order_col = f'-{order_col}' + + qs = self.queryset.filter( + Q(name__icontains=search_value) | + Q(description__icontains=search_value) | + Q(domains__name__icontains=search_value) + ) + return qs.order_by(order_col) + return qs.order_by('-id') -class WafDetector(APIView): - def get(self, request): - req = self.request - url = req.query_params.get('url') - response = { - 'status': False, - 'message': '', - 'results': None - } - - if not url: - response['message'] = 'URL parameter is missing' - return Response(response) - - try: - logger.debug(f"Initiating WAF detection for URL: {url}") - result = run_wafw00f.delay(url).get(timeout=30) - - if result.startswith("Unexpected error"): - response['message'] = result - elif result != "No WAF detected": - response['status'] = True - response['results'] = result - else: - response['message'] = 'Could not detect any WAF!' - - logger.debug(f"WAF detection result: {response}") - except Exception as e: - logger.error(f"Error during WAF detection: {str(e)}") - response['message'] = "An unexpected error occurred. Please try again later." - - return Response(response) - -class SearchHistoryView(APIView): - def get(self, request): - req = self.request - - response = {} - response['status'] = False - scan_history = SearchHistory.objects.all().order_by('-id')[:5] - if scan_history: - response['status'] = True - response['results'] = SearchHistorySerializer(scan_history, many=True).data +class WafDetector(APIView): + def get(self, request): + req = self.request + url = req.query_params.get('url') + response = { + 'status': False, + 'message': '', + 'results': None + } - return Response(response) + if not url: + response['message'] = 'URL parameter is missing' + return Response(response) + try: + logger.debug(f"Initiating WAF detection for URL: {url}") + result = run_wafw00f.delay(url).get(timeout=30) -class UniversalSearch(APIView): - def get(self, request): - req = self.request - query = req.query_params.get('query') + if result.startswith("Unexpected error"): + response['message'] = result + elif result != "No WAF detected": + response['status'] = True + response['results'] = result + else: + response['message'] = 'Could not detect any WAF!' - response = {} - response['status'] = False + logger.debug(f"WAF detection result: {response}") + except Exception as e: + logger.error(f"Error during WAF detection: {str(e)}") + response['message'] = "An unexpected error occurred. Please try again later." - if not query: - response['message'] = 'No query parameter provided!' - return Response(response) + return Response(response) - response['results'] = {} +class SearchHistoryView(APIView): + def get(self, request): + req = self.request - # search history to be saved - SearchHistory.objects.get_or_create( - query=query - ) + response = {} + response['status'] = False - # lookup query in subdomain - subdomain = Subdomain.objects.filter( - Q(name__icontains=query) | - Q(cname__icontains=query) | - Q(page_title__icontains=query) | - Q(http_url__icontains=query) - ).distinct('name') - subdomain_data = SubdomainSerializer(subdomain, many=True).data - response['results']['subdomains'] = subdomain_data + scan_history = SearchHistory.objects.all().order_by('-id')[:5] - endpoint = EndPoint.objects.filter( - Q(http_url__icontains=query) | - Q(page_title__icontains=query) - ).distinct('http_url') - endpoint_data = EndpointSerializer(endpoint, many=True).data - response['results']['endpoints'] = endpoint_data + if scan_history: + response['status'] = True + response['results'] = SearchHistorySerializer(scan_history, many=True).data - vulnerability = Vulnerability.objects.filter( - Q(http_url__icontains=query) | - Q(name__icontains=query) | - Q(description__icontains=query) - ).distinct() - vulnerability_data = VulnerabilitySerializer(vulnerability, many=True).data - response['results']['vulnerabilities'] = vulnerability_data + return Response(response) - response['results']['others'] = {} - if subdomain_data or endpoint_data or vulnerability_data: - response['status'] = True +class UniversalSearch(APIView): + def get(self, request): + req = self.request + query = req.query_params.get('query') + + response = {} + response['status'] = False + + if not query: + response['message'] = 'No query parameter provided!' + return Response(response) + + response['results'] = {} + + # search history to be saved + SearchHistory.objects.get_or_create( + query=query + ) + + # lookup query in subdomain + subdomain = Subdomain.objects.filter( + Q(name__icontains=query) | + Q(cname__icontains=query) | + Q(page_title__icontains=query) | + Q(http_url__icontains=query) + ).distinct('name') + subdomain_data = SubdomainSerializer(subdomain, many=True).data + response['results']['subdomains'] = subdomain_data + + endpoint = EndPoint.objects.filter( + Q(http_url__icontains=query) | + Q(page_title__icontains=query) + ).distinct('http_url') + endpoint_data = EndpointSerializer(endpoint, many=True).data + response['results']['endpoints'] = endpoint_data + + vulnerability = Vulnerability.objects.filter( + Q(http_url__icontains=query) | + Q(name__icontains=query) | + Q(description__icontains=query) + ).distinct() + vulnerability_data = VulnerabilitySerializer(vulnerability, many=True).data + response['results']['vulnerabilities'] = vulnerability_data + + response['results']['others'] = {} + + if subdomain_data or endpoint_data or vulnerability_data: + response['status'] = True - return Response(response) + return Response(response) class FetchMostCommonVulnerability(APIView): @@ -468,2101 +734,2101 @@ def post(self, request): return Response(response) class FetchMostVulnerable(APIView): - def post(self, request): - req = self.request - data = req.data - - project_slug = data.get('slug') - scan_history_id = safe_int_cast(data.get('scan_history_id')) - target_id = safe_int_cast(data.get('target_id')) - limit = safe_int_cast(data.get('limit', 20)) - is_ignore_info = data.get('ignore_info', False) - - response = {} - response['status'] = False - - if project_slug: - project = Project.objects.get(slug=project_slug) - subdomains = Subdomain.objects.filter(target_domain__project=project) - domains = Domain.objects.filter(project=project) - else: - subdomains = Subdomain.objects.all() - domains = Domain.objects.all() - - if scan_history_id: - subdomain_query = subdomains.filter(scan_history__id=scan_history_id) - if is_ignore_info: - most_vulnerable_subdomains = ( - subdomain_query - .annotate( - vuln_count=Count('vulnerability__name', filter=~Q(vulnerability__severity=0)) - ) - .order_by('-vuln_count') - .exclude(vuln_count=0)[:limit] - ) - else: - most_vulnerable_subdomains = ( - subdomain_query - .annotate(vuln_count=Count('vulnerability__name')) - .order_by('-vuln_count') - .exclude(vuln_count=0)[:limit] - ) - - if most_vulnerable_subdomains: - response['status'] = True - response['result'] = ( - SubdomainSerializer( - most_vulnerable_subdomains, - many=True) - .data - ) - - elif target_id: - subdomain_query = subdomains.filter(target_domain__id=target_id) - if is_ignore_info: - most_vulnerable_subdomains = ( - subdomain_query - .annotate(vuln_count=Count('vulnerability__name', filter=~Q(vulnerability__severity=0))) - .order_by('-vuln_count') - .exclude(vuln_count=0)[:limit] - ) - else: - most_vulnerable_subdomains = ( - subdomain_query - .annotate(vuln_count=Count('vulnerability__name')) - .order_by('-vuln_count') - .exclude(vuln_count=0)[:limit] - ) - - if most_vulnerable_subdomains: - response['status'] = True - response['result'] = ( - SubdomainSerializer( - most_vulnerable_subdomains, - many=True) - .data - ) - else: - if is_ignore_info: - most_vulnerable_targets = ( - domains - .annotate(vuln_count=Count('subdomain__vulnerability__name', filter=~Q(subdomain__vulnerability__severity=0))) - .order_by('-vuln_count') - .exclude(vuln_count=0)[:limit] - ) - else: - most_vulnerable_targets = ( - domains - .annotate(vuln_count=Count('subdomain__vulnerability__name')) - .order_by('-vuln_count') - .exclude(vuln_count=0)[:limit] - ) - - if most_vulnerable_targets: - response['status'] = True - response['result'] = ( - DomainSerializer( - most_vulnerable_targets, - many=True) - .data - ) - - return Response(response) + def post(self, request): + req = self.request + data = req.data + + project_slug = data.get('slug') + scan_history_id = safe_int_cast(data.get('scan_history_id')) + target_id = safe_int_cast(data.get('target_id')) + limit = safe_int_cast(data.get('limit', 20)) + is_ignore_info = data.get('ignore_info', False) + + response = {} + response['status'] = False + + if project_slug: + project = Project.objects.get(slug=project_slug) + subdomains = Subdomain.objects.filter(target_domain__project=project) + domains = Domain.objects.filter(project=project) + else: + subdomains = Subdomain.objects.all() + domains = Domain.objects.all() + + if scan_history_id: + subdomain_query = subdomains.filter(scan_history__id=scan_history_id) + if is_ignore_info: + most_vulnerable_subdomains = ( + subdomain_query + .annotate( + vuln_count=Count('vulnerability__name', filter=~Q(vulnerability__severity=0)) + ) + .order_by('-vuln_count') + .exclude(vuln_count=0)[:limit] + ) + else: + most_vulnerable_subdomains = ( + subdomain_query + .annotate(vuln_count=Count('vulnerability__name')) + .order_by('-vuln_count') + .exclude(vuln_count=0)[:limit] + ) + + if most_vulnerable_subdomains: + response['status'] = True + response['result'] = ( + SubdomainSerializer( + most_vulnerable_subdomains, + many=True) + .data + ) + + elif target_id: + subdomain_query = subdomains.filter(target_domain__id=target_id) + if is_ignore_info: + most_vulnerable_subdomains = ( + subdomain_query + .annotate(vuln_count=Count('vulnerability__name', filter=~Q(vulnerability__severity=0))) + .order_by('-vuln_count') + .exclude(vuln_count=0)[:limit] + ) + else: + most_vulnerable_subdomains = ( + subdomain_query + .annotate(vuln_count=Count('vulnerability__name')) + .order_by('-vuln_count') + .exclude(vuln_count=0)[:limit] + ) + + if most_vulnerable_subdomains: + response['status'] = True + response['result'] = ( + SubdomainSerializer( + most_vulnerable_subdomains, + many=True) + .data + ) + else: + if is_ignore_info: + most_vulnerable_targets = ( + domains + .annotate(vuln_count=Count('subdomain__vulnerability__name', filter=~Q(subdomain__vulnerability__severity=0))) + .order_by('-vuln_count') + .exclude(vuln_count=0)[:limit] + ) + else: + most_vulnerable_targets = ( + domains + .annotate(vuln_count=Count('subdomain__vulnerability__name')) + .order_by('-vuln_count') + .exclude(vuln_count=0)[:limit] + ) + + if most_vulnerable_targets: + response['status'] = True + response['result'] = ( + DomainSerializer( + most_vulnerable_targets, + many=True) + .data + ) + + return Response(response) class CVEDetails(APIView): - def get(self, request): - req = self.request + def get(self, request): + req = self.request - cve_id = req.query_params.get('cve_id') + cve_id = req.query_params.get('cve_id') - if not cve_id: - return Response({'status': False, 'message': 'CVE ID not provided'}) + if not cve_id: + return Response({'status': False, 'message': 'CVE ID not provided'}) - response = requests.get('https://cve.circl.lu/api/cve/' + cve_id) + response = requests.get('https://cve.circl.lu/api/cve/' + cve_id) - if response.status_code != 200: - return Response({'status': False, 'message': 'Unknown Error Occured!'}) + if response.status_code != 200: + return Response({'status': False, 'message': 'Unknown Error Occured!'}) - if not response.json(): - return Response({'status': False, 'message': 'CVE ID does not exists.'}) + if not response.json(): + return Response({'status': False, 'message': 'CVE ID does not exists.'}) - return Response({'status': True, 'result': response.json()}) + return Response({'status': True, 'result': response.json()}) class AddReconNote(APIView): - def post(self, request): - req = self.request - data = req.data - - subdomain_id = safe_int_cast(data.get('subdomain_id')) - scan_history_id = safe_int_cast(data.get('scan_history_id')) - title = data.get('title') - description = data.get('description') - project = data.get('project') + def post(self, request): + req = self.request + data = req.data + + subdomain_id = safe_int_cast(data.get('subdomain_id')) + scan_history_id = safe_int_cast(data.get('scan_history_id')) + title = data.get('title') + description = data.get('description') + project = data.get('project') - if not title: - return Response({"status": False, "error": "Title is required."}, status=400) - if not project: - return Response({"status": False, "error": "Project is required."}, status=400) - - - try: - project = Project.objects.get(slug=project) - note = TodoNote() - note.title = title - note.description = description - - if scan_history_id: - scan_history = ScanHistory.objects.get(id=scan_history_id) - note.scan_history = scan_history - - # get scan history for subdomain_id - if subdomain_id: - subdomain = Subdomain.objects.get(id=subdomain_id) - note.subdomain = subdomain - - # also get scan history - scan_history_id = subdomain.scan_history.id - scan_history = ScanHistory.objects.get(id=scan_history_id) - note.scan_history = scan_history - - note.project = project - note.save() - return Response({"status": True, "error": False, "id": note.id}, status=200) - except Exception as e: - logger.error(e) - return Response({"status": False, "error": "An error occurred."}, status=400) + if not title: + return Response({"status": False, "error": "Title is required."}, status=400) + if not project: + return Response({"status": False, "error": "Project is required."}, status=400) + + + try: + project = Project.objects.get(slug=project) + note = TodoNote() + note.title = title + note.description = description + + if scan_history_id: + scan_history = ScanHistory.objects.get(id=scan_history_id) + note.scan_history = scan_history + + # get scan history for subdomain_id + if subdomain_id: + subdomain = Subdomain.objects.get(id=subdomain_id) + note.subdomain = subdomain + + # also get scan history + scan_history_id = subdomain.scan_history.id + scan_history = ScanHistory.objects.get(id=scan_history_id) + note.scan_history = scan_history + + note.project = project + note.save() + return Response({"status": True, "error": False, "id": note.id}, status=200) + except Exception as e: + logger.error(e) + return Response({"status": False, "error": "An error occurred."}, status=400) class ToggleSubdomainImportantStatus(APIView): - def post(self, request): - req = self.request - data = req.data + def post(self, request): + req = self.request + data = req.data - subdomain_id = safe_int_cast(data.get('subdomain_id')) + subdomain_id = safe_int_cast(data.get('subdomain_id')) - response = {'status': False, 'message': 'No subdomain_id provided'} + response = {'status': False, 'message': 'No subdomain_id provided'} - name = Subdomain.objects.get(id=subdomain_id) - name.is_important = not name.is_important - name.save() + name = Subdomain.objects.get(id=subdomain_id) + name.is_important = not name.is_important + name.save() - response = {'status': True} + response = {'status': True} - return Response(response) + return Response(response) class AddTarget(APIView): - def post(self, request): - req = self.request - data = req.data - h1_team_handle = data.get('h1_team_handle') - description = data.get('description') - domain_name = data.get('domain_name') - organization_name = data.get('organization') - slug = data.get('slug') - - # Validate domain name - if not validators.domain(domain_name): - return Response({'status': False, 'message': 'Invalid domain or IP'}, status=400) - - project = Project.objects.get(slug=slug) - - # Check if the domain already exists - if Domain.objects.filter(name=domain_name, project=project).exists(): - return Response({'status': False, 'message': 'Domain already exists as a target!'}, status=400) - - # Create domain object in DB - domain, _ = Domain.objects.get_or_create(name=domain_name) - domain.project = project - domain.h1_team_handle = h1_team_handle - domain.description = description - if not domain.insert_date: - domain.insert_date = timezone.now() - domain.save() - - # Create org object in DB - if organization_name: - organization_obj = None - organization_query = Organization.objects.filter(name=organization_name) - if organization_query.exists(): - organization_obj = organization_query[0] - else: - organization_obj = Organization.objects.create( - name=organization_name, - project=project, - insert_date=timezone.now()) - organization_obj.domains.add(domain) - - return Response({ - 'status': True, - 'message': 'Domain successfully added as target!', - 'domain_name': domain_name, - 'domain_id': domain.id, - 'initiate_scan_url': reverse('start_scan', kwargs={'slug': slug, 'domain_id': domain.id}) - }) + def post(self, request): + req = self.request + data = req.data + h1_team_handle = data.get('h1_team_handle') + description = data.get('description') + domain_name = data.get('domain_name') + organization_name = data.get('organization') + slug = data.get('slug') + + # Validate domain name + if not validators.domain(domain_name): + return Response({'status': False, 'message': 'Invalid domain or IP'}, status=400) + + project = Project.objects.get(slug=slug) + + # Check if the domain already exists + if Domain.objects.filter(name=domain_name, project=project).exists(): + return Response({'status': False, 'message': 'Domain already exists as a target!'}, status=400) + + # Create domain object in DB + domain, _ = Domain.objects.get_or_create(name=domain_name) + domain.project = project + domain.h1_team_handle = h1_team_handle + domain.description = description + if not domain.insert_date: + domain.insert_date = timezone.now() + domain.save() + + # Create org object in DB + if organization_name: + organization_obj = None + organization_query = Organization.objects.filter(name=organization_name) + if organization_query.exists(): + organization_obj = organization_query[0] + else: + organization_obj = Organization.objects.create( + name=organization_name, + project=project, + insert_date=timezone.now()) + organization_obj.domains.add(domain) + + return Response({ + 'status': True, + 'message': 'Domain successfully added as target!', + 'domain_name': domain_name, + 'domain_id': domain.id, + 'initiate_scan_url': reverse('start_scan', kwargs={'slug': slug, 'domain_id': domain.id}) + }) class FetchSubscanResults(APIView): - def get(self, request): - req = self.request - # data = req.data - subscan_id = safe_int_cast(req.query_params.get('subscan_id')) - subscan = SubScan.objects.filter(id=subscan_id) - if not subscan.exists(): - return Response({ - 'status': False, - 'error': f'Subscan {subscan_id} does not exist' - }) + def get(self, request): + req = self.request + # data = req.data + subscan_id = safe_int_cast(req.query_params.get('subscan_id')) + subscan = SubScan.objects.filter(id=subscan_id) + if not subscan.exists(): + return Response({ + 'status': False, + 'error': f'Subscan {subscan_id} does not exist' + }) - subscan_data = SubScanResultSerializer(subscan.first(), many=False).data - task_name = subscan_data['type'] - subscan_results = [] + subscan_data = SubScanResultSerializer(subscan.first(), many=False).data + task_name = subscan_data['type'] + subscan_results = [] - if task_name == 'port_scan': - ips_in_subscan = IpAddress.objects.filter(ip_subscan_ids__in=subscan) - subscan_results = IpSerializer(ips_in_subscan, many=True).data + if task_name == 'port_scan': + ips_in_subscan = IpAddress.objects.filter(ip_subscan_ids__in=subscan) + subscan_results = IpSerializer(ips_in_subscan, many=True).data - elif task_name == 'vulnerability_scan': - vulns_in_subscan = Vulnerability.objects.filter(vuln_subscan_ids__in=subscan) - subscan_results = VulnerabilitySerializer(vulns_in_subscan, many=True).data + elif task_name == 'vulnerability_scan': + vulns_in_subscan = Vulnerability.objects.filter(vuln_subscan_ids__in=subscan) + subscan_results = VulnerabilitySerializer(vulns_in_subscan, many=True).data - elif task_name == 'fetch_url': - endpoints_in_subscan = EndPoint.objects.filter(endpoint_subscan_ids__in=subscan) - subscan_results = EndpointSerializer(endpoints_in_subscan, many=True).data + elif task_name == 'fetch_url': + endpoints_in_subscan = EndPoint.objects.filter(endpoint_subscan_ids__in=subscan) + subscan_results = EndpointSerializer(endpoints_in_subscan, many=True).data - elif task_name == 'dir_file_fuzz': - dirs_in_subscan = DirectoryScan.objects.filter(dir_subscan_ids__in=subscan) - subscan_results = DirectoryScanSerializer(dirs_in_subscan, many=True).data + elif task_name == 'dir_file_fuzz': + dirs_in_subscan = DirectoryScan.objects.filter(dir_subscan_ids__in=subscan) + subscan_results = DirectoryScanSerializer(dirs_in_subscan, many=True).data - elif task_name == 'subdomain_discovery': - subdomains_in_subscan = Subdomain.objects.filter(subdomain_subscan_ids__in=subscan) - subscan_results = SubdomainSerializer(subdomains_in_subscan, many=True).data + elif task_name == 'subdomain_discovery': + subdomains_in_subscan = Subdomain.objects.filter(subdomain_subscan_ids__in=subscan) + subscan_results = SubdomainSerializer(subdomains_in_subscan, many=True).data - elif task_name == 'screenshot': - subdomains_in_subscan = Subdomain.objects.filter(subdomain_subscan_ids__in=subscan, screenshot_path__isnull=False) - subscan_results = SubdomainSerializer(subdomains_in_subscan, many=True).data + elif task_name == 'screenshot': + subdomains_in_subscan = Subdomain.objects.filter(subdomain_subscan_ids__in=subscan, screenshot_path__isnull=False) + subscan_results = SubdomainSerializer(subdomains_in_subscan, many=True).data - logger.info(subscan_data) - logger.info(subscan_results) + logger.info(subscan_data) + logger.info(subscan_results) - return Response({'subscan': subscan_data, 'result': subscan_results, 'endpoint_url': reverse('api:endpoints-list'), 'vulnerability_url': reverse('api:vulnerabilities-list')}) + return Response({'subscan': subscan_data, 'result': subscan_results, 'endpoint_url': reverse('api:endpoints-list'), 'vulnerability_url': reverse('api:vulnerabilities-list')}) class ListSubScans(APIView): - def post(self, request): - req = self.request - data = req.data - subdomain_id = safe_int_cast(data.get('subdomain_id', None)) - scan_history = safe_int_cast(data.get('scan_history_id', None)) - domain_id = safe_int_cast(data.get('domain_id', None)) - response = {} - response['status'] = False - - if subdomain_id: - subscans = ( - SubScan.objects - .filter(subdomain__id=subdomain_id) - .order_by('-stop_scan_date') - ) - results = SubScanSerializer(subscans, many=True).data - if subscans: - response['status'] = True - response['results'] = results - - elif scan_history: - subscans = ( - SubScan.objects - .filter(scan_history__id=scan_history) - .order_by('-stop_scan_date') - ) - results = SubScanSerializer(subscans, many=True).data - if subscans: - response['status'] = True - response['results'] = results - - elif domain_id: - scan_history = ScanHistory.objects.filter(domain__id=domain_id) - subscans = ( - SubScan.objects - .filter(scan_history__in=scan_history) - .order_by('-stop_scan_date') - ) - results = SubScanSerializer(subscans, many=True).data - if subscans: - response['status'] = True - response['results'] = results - - return Response(response) + def post(self, request): + req = self.request + data = req.data + subdomain_id = safe_int_cast(data.get('subdomain_id', None)) + scan_history = safe_int_cast(data.get('scan_history_id', None)) + domain_id = safe_int_cast(data.get('domain_id', None)) + response = {} + response['status'] = False + + if subdomain_id: + subscans = ( + SubScan.objects + .filter(subdomain__id=subdomain_id) + .order_by('-stop_scan_date') + ) + results = SubScanSerializer(subscans, many=True).data + if subscans: + response['status'] = True + response['results'] = results + + elif scan_history: + subscans = ( + SubScan.objects + .filter(scan_history__id=scan_history) + .order_by('-stop_scan_date') + ) + results = SubScanSerializer(subscans, many=True).data + if subscans: + response['status'] = True + response['results'] = results + + elif domain_id: + scan_history = ScanHistory.objects.filter(domain__id=domain_id) + subscans = ( + SubScan.objects + .filter(scan_history__in=scan_history) + .order_by('-stop_scan_date') + ) + results = SubScanSerializer(subscans, many=True).data + if subscans: + response['status'] = True + response['results'] = results + + return Response(response) class DeleteMultipleRows(APIView): - def post(self, request): - req = self.request - data = req.data - subscan_ids = get_data_from_post_request(request, 'rows') - try: - if data['type'] == 'subscan': - subscan_ids = [int(id) for id in subscan_ids] - SubScan.objects.filter(id__in=subscan_ids).delete() - return Response({'status': True}) - except ValueError: - return Response({'status': False, 'message': 'Invalid subscan ID provided'}, status=400) - except Exception as e: - return Response({'status': False, 'message': logger.debug(e)}, status=500) + def post(self, request): + req = self.request + data = req.data + subscan_ids = get_data_from_post_request(request, 'rows') + try: + if data['type'] == 'subscan': + subscan_ids = [int(id) for id in subscan_ids] + SubScan.objects.filter(id__in=subscan_ids).delete() + return Response({'status': True}) + except ValueError: + return Response({'status': False, 'message': 'Invalid subscan ID provided'}, status=400) + except Exception as e: + return Response({'status': False, 'message': logger.debug(e)}, status=500) class StopScan(APIView): - def post(self, request): - req = self.request - data = req.data - scan_id = safe_int_cast(data.get('scan_id')) - subscan_id = safe_int_cast(data.get('subscan_id')) - response = {} - task_ids = [] - scan = None - subscan = None - if subscan_id: - try: - subscan = get_object_or_404(SubScan, id=subscan_id) - scan = subscan.scan_history - task_ids = subscan.celery_ids - subscan.status = ABORTED_TASK - subscan.stop_scan_date = timezone.now() - subscan.save() - create_scan_activity( - subscan.scan_history.id, - f'Subscan {subscan_id} aborted', - SUCCESS_TASK) - response['status'] = True - except Exception as e: - logging.error(e) - response = {'status': False, 'message': str(e)} - elif scan_id: - try: - scan = get_object_or_404(ScanHistory, id=scan_id) - task_ids = scan.celery_ids - scan.scan_status = ABORTED_TASK - scan.stop_scan_date = timezone.now() - scan.aborted_by = request.user - scan.save() - create_scan_activity( - scan.id, - "Scan aborted", - SUCCESS_TASK) - response['status'] = True - except Exception as e: - logging.error(e) - response = {'status': False, 'message': str(e)} - - logger.warning(f'Revoking tasks {task_ids}') - for task_id in task_ids: - app.control.revoke(task_id, terminate=True, signal='SIGKILL') - - # Abort running tasks - tasks = ( - ScanActivity.objects - .filter(scan_of=scan) - .filter(status=RUNNING_TASK) - .order_by('-pk') - ) - if tasks.exists(): - for task in tasks: - if subscan_id and task.id not in subscan.celery_ids: - continue - task.status = ABORTED_TASK - task.time = timezone.now() - task.save() - - return Response(response) + def post(self, request): + req = self.request + data = req.data + scan_id = safe_int_cast(data.get('scan_id')) + subscan_id = safe_int_cast(data.get('subscan_id')) + response = {} + task_ids = [] + scan = None + subscan = None + if subscan_id: + try: + subscan = get_object_or_404(SubScan, id=subscan_id) + scan = subscan.scan_history + task_ids = subscan.celery_ids + subscan.status = ABORTED_TASK + subscan.stop_scan_date = timezone.now() + subscan.save() + create_scan_activity( + subscan.scan_history.id, + f'Subscan {subscan_id} aborted', + SUCCESS_TASK) + response['status'] = True + except Exception as e: + logging.error(e) + response = {'status': False, 'message': str(e)} + elif scan_id: + try: + scan = get_object_or_404(ScanHistory, id=scan_id) + task_ids = scan.celery_ids + scan.scan_status = ABORTED_TASK + scan.stop_scan_date = timezone.now() + scan.aborted_by = request.user + scan.save() + create_scan_activity( + scan.id, + "Scan aborted", + SUCCESS_TASK) + response['status'] = True + except Exception as e: + logging.error(e) + response = {'status': False, 'message': str(e)} + + logger.warning(f'Revoking tasks {task_ids}') + for task_id in task_ids: + app.control.revoke(task_id, terminate=True, signal='SIGKILL') + + # Abort running tasks + tasks = ( + ScanActivity.objects + .filter(scan_of=scan) + .filter(status=RUNNING_TASK) + .order_by('-pk') + ) + if tasks.exists(): + for task in tasks: + if subscan_id and task.id not in subscan.celery_ids: + continue + task.status = ABORTED_TASK + task.time = timezone.now() + task.save() + + return Response(response) class InitiateSubTask(APIView): - parser_classes = [JSONParser] - - def post(self, request): - data = request.data - engine_id = safe_int_cast(data.get('engine_id')) - scan_types = data.get('tasks', []) - subdomain_ids = safe_int_cast(data.get('subdomain_ids', [])) - - if not scan_types or not subdomain_ids: - return Response({'status': False, 'error': 'Missing tasks or subdomain_ids'}, status=400) - - if isinstance(subdomain_ids, int): - subdomain_ids = [subdomain_ids] - - for subdomain_id in subdomain_ids: - logger.info(f'Running subscans {scan_types} on subdomain "{subdomain_id}" ...') - for stype in scan_types: - ctx = { - 'scan_history_id': None, - 'subdomain_id': subdomain_id, - 'scan_type': stype, - 'engine_id': engine_id - } - initiate_subscan.apply_async(kwargs=ctx) - return Response({'status': True}) + parser_classes = [JSONParser] + + def post(self, request): + data = request.data + engine_id = safe_int_cast(data.get('engine_id')) + scan_types = data.get('tasks', []) + subdomain_ids = safe_int_cast(data.get('subdomain_ids', [])) + + if not scan_types or not subdomain_ids: + return Response({'status': False, 'error': 'Missing tasks or subdomain_ids'}, status=400) + + if isinstance(subdomain_ids, int): + subdomain_ids = [subdomain_ids] + + for subdomain_id in subdomain_ids: + logger.info(f'Running subscans {scan_types} on subdomain "{subdomain_id}" ...') + for stype in scan_types: + ctx = { + 'scan_history_id': None, + 'subdomain_id': subdomain_id, + 'scan_type': stype, + 'engine_id': engine_id + } + initiate_subscan.apply_async(kwargs=ctx) + return Response({'status': True}) class DeleteSubdomain(APIView): - def post(self, request): - subdomain_ids = get_data_from_post_request(request, 'subdomain_ids') - try: - subdomain_ids = [int(id) for id in subdomain_ids] - Subdomain.objects.filter(id__in=subdomain_ids).delete() - return Response({'status': True}) - except ValueError: - return Response({'status': False, 'message': 'Invalid subdomain ID provided'}, status=400) - except Exception as e: - return Response({'status': False, 'message': logger.debug(e)}, status=500) + def post(self, request): + subdomain_ids = get_data_from_post_request(request, 'subdomain_ids') + try: + subdomain_ids = [int(id) for id in subdomain_ids] + Subdomain.objects.filter(id__in=subdomain_ids).delete() + return Response({'status': True}) + except ValueError: + return Response({'status': False, 'message': 'Invalid subdomain ID provided'}, status=400) + except Exception as e: + return Response({'status': False, 'message': logger.debug(e)}, status=500) class DeleteVulnerability(APIView): - def post(self, request): - vulnerability_ids = get_data_from_post_request(request, 'vulnerability_ids') - - # Check if vulnerability_ids is iterable - if not isinstance(vulnerability_ids, (list, tuple)): - return Response({'status': False, 'message': 'vulnerability_ids must be a list or tuple'}, status=400) - - try: - # Convert to integers - vulnerability_ids = [int(id) for id in vulnerability_ids] - # Delete vulnerabilities - Vulnerability.objects.filter(id__in=vulnerability_ids).delete() - return Response({'status': True}) - except ValueError: - return Response({'status': False, 'message': 'Invalid vulnerability ID provided'}, status=400) + def post(self, request): + vulnerability_ids = get_data_from_post_request(request, 'vulnerability_ids') + + # Check if vulnerability_ids is iterable + if not isinstance(vulnerability_ids, (list, tuple)): + return Response({'status': False, 'message': 'vulnerability_ids must be a list or tuple'}, status=400) + + try: + # Convert to integers + vulnerability_ids = [int(id) for id in vulnerability_ids] + # Delete vulnerabilities + Vulnerability.objects.filter(id__in=vulnerability_ids).delete() + return Response({'status': True}) + except ValueError: + return Response({'status': False, 'message': 'Invalid vulnerability ID provided'}, status=400) class ListInterestingKeywords(APIView): - def get(self, request, format=None): - req = self.request - keywords = get_lookup_keywords() - return Response(keywords) + def get(self, request, format=None): + req = self.request + keywords = get_lookup_keywords() + return Response(keywords) class RengineUpdateCheck(APIView): - def get(self, request): - req = self.request - github_api = \ - 'https://api.github.com/repos/Security-Tools-Alliance/rengine-ng/releases' - response = requests.get(github_api).json() - if 'message' in response: - return Response({'status': False, 'message': 'RateLimited'}) - - return_response = {} - - # get current version_number - # remove quotes from current_version - current_version = (RENGINE_CURRENT_VERSION[1:] if RENGINE_CURRENT_VERSION[0] == 'v' else RENGINE_CURRENT_VERSION).replace("'", "") - - # for consistency remove v from both if exists - latest_version = re.search(r'v(\d+\.)?(\d+\.)?(\*|\d+)', - ((response[0]['name' - ])[1:] if response[0]['name'][0] == 'v' - else response[0]['name'])) - - latest_version = latest_version.group(0) if latest_version else None - - if not latest_version: - latest_version = re.search(r'(\d+\.)?(\d+\.)?(\*|\d+)', - ((response[0]['name' - ])[1:] if response[0]['name'][0] - == 'v' else response[0]['name'])) - if latest_version: - latest_version = latest_version.group(0) - - return_response['status'] = True - return_response['latest_version'] = latest_version - return_response['current_version'] = current_version - return_response['update_available'] = version.parse(current_version) < version.parse(latest_version) - if version.parse(current_version) < version.parse(latest_version): - return_response['changelog'] = response[0]['body'] - - return Response(return_response) + def get(self, request): + req = self.request + github_api = \ + 'https://api.github.com/repos/Security-Tools-Alliance/rengine-ng/releases' + response = requests.get(github_api).json() + if 'message' in response: + return Response({'status': False, 'message': 'RateLimited'}) + + return_response = {} + + # get current version_number + # remove quotes from current_version + current_version = (RENGINE_CURRENT_VERSION[1:] if RENGINE_CURRENT_VERSION[0] == 'v' else RENGINE_CURRENT_VERSION).replace("'", "") + + # for consistency remove v from both if exists + latest_version = re.search(r'v(\d+\.)?(\d+\.)?(\*|\d+)', + ((response[0]['name' + ])[1:] if response[0]['name'][0] == 'v' + else response[0]['name'])) + + latest_version = latest_version.group(0) if latest_version else None + + if not latest_version: + latest_version = re.search(r'(\d+\.)?(\d+\.)?(\*|\d+)', + ((response[0]['name' + ])[1:] if response[0]['name'][0] + == 'v' else response[0]['name'])) + if latest_version: + latest_version = latest_version.group(0) + + return_response['status'] = True + return_response['latest_version'] = latest_version + return_response['current_version'] = current_version + return_response['update_available'] = version.parse(current_version) < version.parse(latest_version) + if version.parse(current_version) < version.parse(latest_version): + return_response['changelog'] = response[0]['body'] + + return Response(return_response) class UninstallTool(APIView): - def get(self, request): - req = self.request - tool_id = safe_int_cast(req.query_params.get('tool_id')) - tool_name = req.query_params.get('name') + def get(self, request): + req = self.request + tool_id = safe_int_cast(req.query_params.get('tool_id')) + tool_name = req.query_params.get('name') - if tool_id: - tool = InstalledExternalTool.objects.get(id=tool_id) - elif tool_name: - tool = InstalledExternalTool.objects.get(name=tool_name) + if tool_id: + tool = InstalledExternalTool.objects.get(id=tool_id) + elif tool_name: + tool = InstalledExternalTool.objects.get(name=tool_name) - if tool.is_default: - return Response({'status': False, 'message': 'Default tools can not be uninstalled'}) + if tool.is_default: + return Response({'status': False, 'message': 'Default tools can not be uninstalled'}) - # check install instructions, if it is installed using go, then remove from go bin path, - # else try to remove from github clone path + # check install instructions, if it is installed using go, then remove from go bin path, + # else try to remove from github clone path - # getting tool name is tricky! + # getting tool name is tricky! - if 'go install' in tool.install_command: - tool_name = tool.install_command.split('/')[-1].split('@')[0] - uninstall_command = 'rm /go/bin/' + tool_name - elif 'git clone' in tool.install_command: - tool_name = tool.install_command[:-1] if tool.install_command[-1] == '/' else tool.install_command - tool_name = tool_name.split('/')[-1] - uninstall_command = 'rm -rf ' + tool.github_clone_path - else: - return Response({'status': False, 'message': 'Cannot uninstall tool!'}) + if 'go install' in tool.install_command: + tool_name = tool.install_command.split('/')[-1].split('@')[0] + uninstall_command = 'rm /go/bin/' + tool_name + elif 'git clone' in tool.install_command: + tool_name = tool.install_command[:-1] if tool.install_command[-1] == '/' else tool.install_command + tool_name = tool_name.split('/')[-1] + uninstall_command = 'rm -rf ' + tool.github_clone_path + else: + return Response({'status': False, 'message': 'Cannot uninstall tool!'}) - run_command(uninstall_command) - run_command.apply_async(args=(uninstall_command,)) + run_command(uninstall_command) + run_command.apply_async(args=(uninstall_command,)) - tool.delete() + tool.delete() - return Response({'status': True, 'message': 'Uninstall Tool Success'}) + return Response({'status': True, 'message': 'Uninstall Tool Success'}) class UpdateTool(APIView): - def get(self, request): - req = self.request - tool_id = safe_int_cast(req.query_params.get('tool_id')) - tool_name = req.query_params.get('name') + def get(self, request): + req = self.request + tool_id = safe_int_cast(req.query_params.get('tool_id')) + tool_name = req.query_params.get('name') - if tool_id: - tool = InstalledExternalTool.objects.get(id=tool_id) - elif tool_name: - tool = InstalledExternalTool.objects.get(name=tool_name) + if tool_id: + tool = InstalledExternalTool.objects.get(id=tool_id) + elif tool_name: + tool = InstalledExternalTool.objects.get(name=tool_name) - # if git clone was used for installation, then we must use git pull inside project directory, - # otherwise use the same command as given + # if git clone was used for installation, then we must use git pull inside project directory, + # otherwise use the same command as given - update_command = tool.update_command.lower() + update_command = tool.update_command.lower() - if not update_command: - return Response({'status': False, 'message': tool.name + 'has missing update command! Cannot update the tool.'}) - elif update_command == 'git pull': - tool_name = tool.install_command[:-1] if tool.install_command[-1] == '/' else tool.install_command - tool_name = tool_name.split('/')[-1] - update_command = 'cd ' + str(Path(RENGINE_TOOL_GITHUB_PATH) / tool_name) + ' && git pull && cd -' + if not update_command: + return Response({'status': False, 'message': tool.name + 'has missing update command! Cannot update the tool.'}) + elif update_command == 'git pull': + tool_name = tool.install_command[:-1] if tool.install_command[-1] == '/' else tool.install_command + tool_name = tool_name.split('/')[-1] + update_command = 'cd ' + str(Path(RENGINE_TOOL_GITHUB_PATH) / tool_name) + ' && git pull && cd -' - run_command(update_command) - run_command.apply_async(args=(update_command,)) - return Response({'status': True, 'message': tool.name + ' updated successfully.'}) + run_command(update_command) + run_command.apply_async(args=(update_command,)) + return Response({'status': True, 'message': tool.name + ' updated successfully.'}) class GetExternalToolCurrentVersion(APIView): - def get(self, request): - req = self.request - # toolname is also the command - tool_id = safe_int_cast(req.query_params.get('tool_id')) - tool_name = req.query_params.get('name') - # can supply either tool id or tool_name + def get(self, request): + req = self.request + # toolname is also the command + tool_id = safe_int_cast(req.query_params.get('tool_id')) + tool_name = req.query_params.get('name') + # can supply either tool id or tool_name - tool = None + tool = None - if tool_id: - if not InstalledExternalTool.objects.filter(id=tool_id).exists(): - return Response({'status': False, 'message': 'Tool Not found'}) - tool = InstalledExternalTool.objects.get(id=tool_id) - elif tool_name: - if not InstalledExternalTool.objects.filter(name=tool_name).exists(): - return Response({'status': False, 'message': 'Tool Not found'}) - tool = InstalledExternalTool.objects.get(name=tool_name) + if tool_id: + if not InstalledExternalTool.objects.filter(id=tool_id).exists(): + return Response({'status': False, 'message': 'Tool Not found'}) + tool = InstalledExternalTool.objects.get(id=tool_id) + elif tool_name: + if not InstalledExternalTool.objects.filter(name=tool_name).exists(): + return Response({'status': False, 'message': 'Tool Not found'}) + tool = InstalledExternalTool.objects.get(name=tool_name) - if not tool.version_lookup_command: - return Response({'status': False, 'message': 'Version Lookup command not provided.'}) + if not tool.version_lookup_command: + return Response({'status': False, 'message': 'Version Lookup command not provided.'}) - version_number = None - _, stdout = run_command(tool.version_lookup_command) - version_number = re.search(re.compile(tool.version_match_regex), str(stdout)) - if not version_number: - return Response({'status': False, 'message': 'Invalid version lookup command.'}) + version_number = None + _, stdout = run_command(tool.version_lookup_command) + version_number = re.search(re.compile(tool.version_match_regex), str(stdout)) + if not version_number: + return Response({'status': False, 'message': 'Invalid version lookup command.'}) - return Response({'status': True, 'version_number': version_number.group(0), 'tool_name': tool.name}) + return Response({'status': True, 'version_number': version_number.group(0), 'tool_name': tool.name}) class GithubToolCheckGetLatestRelease(APIView): - def get(self, request): - req = self.request - - tool_id = safe_int_cast(req.query_params.get('tool_id')) - tool_name = req.query_params.get('name') - - if not InstalledExternalTool.objects.filter(id=tool_id).exists(): - return Response({'status': False, 'message': 'Tool Not found'}) - - if tool_id: - tool = InstalledExternalTool.objects.get(id=tool_id) - elif tool_name: - tool = InstalledExternalTool.objects.get(name=tool_name) - - if not tool.github_url: - return Response({'status': False, 'message': 'Github URL is not provided, Cannot check updates'}) - - # if tool_github_url has https://github.com/ remove and also remove trailing / - tool_github_url = tool.github_url.replace('http://github.com/', '').replace('https://github.com/', '') - tool_github_url = remove_lead_and_trail_slash(tool_github_url) - github_api = f'https://api.github.com/repos/{tool_github_url}/releases' - response = requests.get(github_api).json() - # check if api rate limit exceeded - if 'message' in response and response['message'] == 'RateLimited': - return Response({'status': False, 'message': 'RateLimited'}) - elif 'message' in response and response['message'] == 'Not Found': - return Response({'status': False, 'message': 'Not Found'}) - elif not response: - return Response({'status': False, 'message': 'Not Found'}) - - # only send latest release - response = response[0] - - api_response = { - 'status': True, - 'url': response['url'], - 'id': response['id'], - 'name': response['name'], - 'changelog': response['body'], - } - return Response(api_response) + def get(self, request): + req = self.request + + tool_id = safe_int_cast(req.query_params.get('tool_id')) + tool_name = req.query_params.get('name') + + if not InstalledExternalTool.objects.filter(id=tool_id).exists(): + return Response({'status': False, 'message': 'Tool Not found'}) + + if tool_id: + tool = InstalledExternalTool.objects.get(id=tool_id) + elif tool_name: + tool = InstalledExternalTool.objects.get(name=tool_name) + + if not tool.github_url: + return Response({'status': False, 'message': 'Github URL is not provided, Cannot check updates'}) + + # if tool_github_url has https://github.com/ remove and also remove trailing / + tool_github_url = tool.github_url.replace('http://github.com/', '').replace('https://github.com/', '') + tool_github_url = remove_lead_and_trail_slash(tool_github_url) + github_api = f'https://api.github.com/repos/{tool_github_url}/releases' + response = requests.get(github_api).json() + # check if api rate limit exceeded + if 'message' in response and response['message'] == 'RateLimited': + return Response({'status': False, 'message': 'RateLimited'}) + elif 'message' in response and response['message'] == 'Not Found': + return Response({'status': False, 'message': 'Not Found'}) + elif not response: + return Response({'status': False, 'message': 'Not Found'}) + + # only send latest release + response = response[0] + + api_response = { + 'status': True, + 'url': response['url'], + 'id': response['id'], + 'name': response['name'], + 'changelog': response['body'], + } + return Response(api_response) class ScanStatus(APIView): - def get(self, request): - req = self.request - slug = self.request.GET.get('project', None) - # main tasks - recently_completed_scans = ( - ScanHistory.objects - .filter(domain__project__slug=slug) - .order_by('-start_scan_date') - .filter(Q(scan_status=0) | Q(scan_status=2) | Q(scan_status=3))[:10] - ) - current_scans = ( - ScanHistory.objects - .filter(domain__project__slug=slug) - .order_by('-start_scan_date') - .filter(scan_status=1) - ) - pending_scans = ( - ScanHistory.objects - .filter(domain__project__slug=slug) - .filter(scan_status=-1) - ) - - # subtasks - recently_completed_tasks = ( - SubScan.objects - .filter(scan_history__domain__project__slug=slug) - .order_by('-start_scan_date') - .filter(Q(status=0) | Q(status=2) | Q(status=3))[:15] - ) - current_tasks = ( - SubScan.objects - .filter(scan_history__domain__project__slug=slug) - .order_by('-start_scan_date') - .filter(status=1) - ) - pending_tasks = ( - SubScan.objects - .filter(scan_history__domain__project__slug=slug) - .filter(status=-1) - ) - response = { - 'scans': { - 'pending': ScanHistorySerializer(pending_scans, many=True).data, - 'scanning': ScanHistorySerializer(current_scans, many=True).data, - 'completed': ScanHistorySerializer(recently_completed_scans, many=True).data - }, - 'tasks': { - 'pending': SubScanSerializer(pending_tasks, many=True).data, - 'running': SubScanSerializer(current_tasks, many=True).data, - 'completed': SubScanSerializer(recently_completed_tasks, many=True).data - } - } - return Response(response) + def get(self, request): + req = self.request + slug = self.request.GET.get('project', None) + # main tasks + recently_completed_scans = ( + ScanHistory.objects + .filter(domain__project__slug=slug) + .order_by('-start_scan_date') + .filter(Q(scan_status=0) | Q(scan_status=2) | Q(scan_status=3))[:10] + ) + current_scans = ( + ScanHistory.objects + .filter(domain__project__slug=slug) + .order_by('-start_scan_date') + .filter(scan_status=1) + ) + pending_scans = ( + ScanHistory.objects + .filter(domain__project__slug=slug) + .filter(scan_status=-1) + ) + + # subtasks + recently_completed_tasks = ( + SubScan.objects + .filter(scan_history__domain__project__slug=slug) + .order_by('-start_scan_date') + .filter(Q(status=0) | Q(status=2) | Q(status=3))[:15] + ) + current_tasks = ( + SubScan.objects + .filter(scan_history__domain__project__slug=slug) + .order_by('-start_scan_date') + .filter(status=1) + ) + pending_tasks = ( + SubScan.objects + .filter(scan_history__domain__project__slug=slug) + .filter(status=-1) + ) + response = { + 'scans': { + 'pending': ScanHistorySerializer(pending_scans, many=True).data, + 'scanning': ScanHistorySerializer(current_scans, many=True).data, + 'completed': ScanHistorySerializer(recently_completed_scans, many=True).data + }, + 'tasks': { + 'pending': SubScanSerializer(pending_tasks, many=True).data, + 'running': SubScanSerializer(current_tasks, many=True).data, + 'completed': SubScanSerializer(recently_completed_tasks, many=True).data + } + } + return Response(response) class Whois(APIView): - def get(self, request): - req = self.request - ip_domain = req.query_params.get('ip_domain') - if not (validators.domain(ip_domain) or validators.ipv4(ip_domain) or validators.ipv6(ip_domain)): - print(f'Ip address or domain "{ip_domain}" did not pass validator.') - return Response({'status': False, 'message': 'Invalid domain or IP'}) - is_force_update = req.query_params.get('is_reload') - is_force_update = True if is_force_update and 'true' == is_force_update.lower() else False - task = query_whois.apply_async(args=(ip_domain,is_force_update)) - response = task.wait() - return Response(response) + def get(self, request): + req = self.request + ip_domain = req.query_params.get('ip_domain') + if not (validators.domain(ip_domain) or validators.ipv4(ip_domain) or validators.ipv6(ip_domain)): + print(f'Ip address or domain "{ip_domain}" did not pass validator.') + return Response({'status': False, 'message': 'Invalid domain or IP'}) + is_force_update = req.query_params.get('is_reload') + is_force_update = True if is_force_update and 'true' == is_force_update.lower() else False + task = query_whois.apply_async(args=(ip_domain,is_force_update)) + response = task.wait() + return Response(response) class ReverseWhois(APIView): - def get(self, request): - req = self.request - lookup_keyword = req.query_params.get('lookup_keyword') - task = query_reverse_whois.apply_async(args=(lookup_keyword,)) - response = task.wait() - return Response(response) + def get(self, request): + req = self.request + lookup_keyword = req.query_params.get('lookup_keyword') + task = query_reverse_whois.apply_async(args=(lookup_keyword,)) + response = task.wait() + return Response(response) class DomainIPHistory(APIView): - def get(self, request): - req = self.request - domain = req.query_params.get('domain') - task = query_ip_history.apply_async(args=(domain,)) - response = task.wait() - return Response(response) + def get(self, request): + req = self.request + domain = req.query_params.get('domain') + task = query_ip_history.apply_async(args=(domain,)) + response = task.wait() + return Response(response) class CMSDetector(APIView): - def get(self, request): - url = request.query_params.get('url') - if not url: - return Response({'status': False, 'message': 'URL parameter is missing'}) - - try: - task = run_cmseek.delay(url) - result = task.get(timeout=300) # 5 minutes timeout - - if result['status']: - return Response(result) - else: - return Response({'status': False, 'message': 'Could not detect CMS!'}) - except Exception as e: - logger.error(f"Error in CMSDetector: {str(e)}") - return Response({'status': False, 'message': 'An unexpected error occurred.'}, status=500) + def get(self, request): + url = request.query_params.get('url') + if not url: + return Response({'status': False, 'message': 'URL parameter is missing'}) + + try: + task = run_cmseek.delay(url) + result = task.get(timeout=300) # 5 minutes timeout + + if result['status']: + return Response(result) + else: + return Response({'status': False, 'message': 'Could not detect CMS!'}) + except Exception as e: + logger.error(f"Error in CMSDetector: {str(e)}") + return Response({'status': False, 'message': 'An unexpected error occurred.'}, status=500) class IPToDomain(APIView): - def get(self, request): - req = self.request - ip_address = req.query_params.get('ip_address') - response = {} - if not ip_address: - return Response({ - 'status': False, - 'message': 'IP Address Required' - }) - try: - logger.info(f'Resolving IP address {ip_address} ...') - resolved_ips = [] - for ip in IPv4Network(ip_address, False): - domains = [] - ips = [] - try: - (domain, domains, ips) = socket.gethostbyaddr(str(ip)) - except socket.herror: - logger.info(f'No PTR record for {ip_address}') - domain = str(ip) - if domain not in domains: - domains.append(domain) - resolved_ips.append({'ip': str(ip),'domain': domain, 'domains': domains, 'ips': ips}) - response = { - 'status': True, - 'orig': ip_address, - 'ip_address': resolved_ips, - } - except Exception as e: - logger.exception(e) - response = { - 'status': False, - 'ip_address': ip_address, - 'message': f'Exception {e}' - } - finally: - return Response(response) + def get(self, request): + req = self.request + ip_address = req.query_params.get('ip_address') + response = {} + if not ip_address: + return Response({ + 'status': False, + 'message': 'IP Address Required' + }) + try: + logger.info(f'Resolving IP address {ip_address} ...') + resolved_ips = [] + for ip in IPv4Network(ip_address, False): + domains = [] + ips = [] + try: + (domain, domains, ips) = socket.gethostbyaddr(str(ip)) + except socket.herror: + logger.info(f'No PTR record for {ip_address}') + domain = str(ip) + if domain not in domains: + domains.append(domain) + resolved_ips.append({'ip': str(ip),'domain': domain, 'domains': domains, 'ips': ips}) + response = { + 'status': True, + 'orig': ip_address, + 'ip_address': resolved_ips, + } + except Exception as e: + logger.exception(e) + response = { + 'status': False, + 'ip_address': ip_address, + 'message': f'Exception {e}' + } + finally: + return Response(response) class VulnerabilityReport(APIView): - def get(self, request): - req = self.request - vulnerability_id = safe_int_cast(req.query_params.get('vulnerability_id')) - return Response({"status": send_hackerone_report(vulnerability_id)}) + def get(self, request): + req = self.request + vulnerability_id = safe_int_cast(req.query_params.get('vulnerability_id')) + return Response({"status": send_hackerone_report(vulnerability_id)}) class GetFileContents(APIView): - def get(self, request, format=None): - req = self.request - name = req.query_params.get('name') - - response = {} - response['status'] = False - - if 'nuclei_config' in req.query_params: - path = str(Path.home() / ".config" / "nuclei" / "config.yaml") - if not os.path.exists(path): - run_command(f'touch {path}') - response['message'] = 'File Created!' - with open(path, "r") as f: - response['status'] = True - response['content'] = f.read() - return Response(response) - - if 'subfinder_config' in req.query_params: - path = str(Path.home() / ".config" / "subfinder" / "config.yaml") - if not os.path.exists(path): - run_command(f'touch {path}') - response['message'] = 'File Created!' - with open(path, "r") as f: - response['status'] = True - response['content'] = f.read() - return Response(response) - - if 'naabu_config' in req.query_params: - path = str(Path.home() / ".config" / "naabu" / "config.yaml") - if not os.path.exists(path): - run_command(f'touch {path}') - response['message'] = 'File Created!' - with open(path, "r") as f: - response['status'] = True - response['content'] = f.read() - return Response(response) - - if 'theharvester_config' in req.query_params: - path = str(Path.home() / ".config" / 'theHarvester' / 'api-keys.yaml') - if not os.path.exists(path): - run_command(f'touch {path}') - response['message'] = 'File Created!' - with open(path, "r") as f: - response['status'] = True - response['content'] = f.read() - return Response(response) - - if 'amass_config' in req.query_params: - path = str(Path.home() / ".config" / "amass.ini") - if not os.path.exists(path): - run_command(f'touch {path}') - response['message'] = 'File Created!' - with open(path, "r") as f: - response['status'] = True - response['content'] = f.read() - return Response(response) - - if 'gf_pattern' in req.query_params: - basedir = str(Path.home() / '.gf') - path = str(Path.home() / '.gf' / f'{name}.json') - if is_safe_path(basedir, path) and os.path.exists(path): - with open(path, "r") as f: - content = f.read() - response['status'] = True - response['content'] = content - else: - response['message'] = "Invalid path!" - response['status'] = False - return Response(response) - - - if 'nuclei_template' in req.query_params: - safe_dir = str(Path.home() / 'nuclei-templates') - path = str(Path.home() / 'nuclei-templates' / f'{name}') - if is_safe_path(safe_dir, path) and os.path.exists(path): - with open(path.format(name), "r") as f: - content = f.read() - response['status'] = True - response['content'] = content - else: - response['message'] = 'Invalid Path!' - response['status'] = False - return Response(response) - - if 'gau_config' in req.query_params: - path = str(Path.home() / ".config" / '.gau.toml') - if not os.path.exists(path): - run_command(f'touch {path}') - response['message'] = 'File Created!' - with open(path, "r") as f: - response['status'] = True - response['content'] = f.read() - return Response(response) - - response['message'] = 'Invalid Query Params' - return Response(response) + def get(self, request, format=None): + req = self.request + name = req.query_params.get('name') + + response = {} + response['status'] = False + + if 'nuclei_config' in req.query_params: + path = str(Path.home() / ".config" / "nuclei" / "config.yaml") + if not os.path.exists(path): + run_command(f'touch {path}') + response['message'] = 'File Created!' + with open(path, "r") as f: + response['status'] = True + response['content'] = f.read() + return Response(response) + + if 'subfinder_config' in req.query_params: + path = str(Path.home() / ".config" / "subfinder" / "config.yaml") + if not os.path.exists(path): + run_command(f'touch {path}') + response['message'] = 'File Created!' + with open(path, "r") as f: + response['status'] = True + response['content'] = f.read() + return Response(response) + + if 'naabu_config' in req.query_params: + path = str(Path.home() / ".config" / "naabu" / "config.yaml") + if not os.path.exists(path): + run_command(f'touch {path}') + response['message'] = 'File Created!' + with open(path, "r") as f: + response['status'] = True + response['content'] = f.read() + return Response(response) + + if 'theharvester_config' in req.query_params: + path = str(Path.home() / ".config" / 'theHarvester' / 'api-keys.yaml') + if not os.path.exists(path): + run_command(f'touch {path}') + response['message'] = 'File Created!' + with open(path, "r") as f: + response['status'] = True + response['content'] = f.read() + return Response(response) + + if 'amass_config' in req.query_params: + path = str(Path.home() / ".config" / "amass.ini") + if not os.path.exists(path): + run_command(f'touch {path}') + response['message'] = 'File Created!' + with open(path, "r") as f: + response['status'] = True + response['content'] = f.read() + return Response(response) + + if 'gf_pattern' in req.query_params: + basedir = str(Path.home() / '.gf') + path = str(Path.home() / '.gf' / f'{name}.json') + if is_safe_path(basedir, path) and os.path.exists(path): + with open(path, "r") as f: + content = f.read() + response['status'] = True + response['content'] = content + else: + response['message'] = "Invalid path!" + response['status'] = False + return Response(response) + + + if 'nuclei_template' in req.query_params: + safe_dir = str(Path.home() / 'nuclei-templates') + path = str(Path.home() / 'nuclei-templates' / f'{name}') + if is_safe_path(safe_dir, path) and os.path.exists(path): + with open(path.format(name), "r") as f: + content = f.read() + response['status'] = True + response['content'] = content + else: + response['message'] = 'Invalid Path!' + response['status'] = False + return Response(response) + + if 'gau_config' in req.query_params: + path = str(Path.home() / ".config" / '.gau.toml') + if not os.path.exists(path): + run_command(f'touch {path}') + response['message'] = 'File Created!' + with open(path, "r") as f: + response['status'] = True + response['content'] = f.read() + return Response(response) + + response['message'] = 'Invalid Query Params' + return Response(response) class GfList(APIView): - def get(self, request): - try: - task = run_gf_list.delay() - result = task.get(timeout=30) # 30 seconds timeout - - if result['status']: - return Response(result['output']) - else: - return Response({'error': result['message']}, status=500) - except Exception as e: - logger.error(f"Error in GfList: {str(e)}") # Log the exception for internal tracking - return Response({'error': 'An unexpected error occurred. Please try again later.'}, status=500) + def get(self, request): + try: + task = run_gf_list.delay() + result = task.get(timeout=30) # 30 seconds timeout + + if result['status']: + return Response(result['output']) + else: + return Response({'error': result['message']}, status=500) + except Exception as e: + logger.error(f"Error in GfList: {str(e)}") # Log the exception for internal tracking + return Response({'error': 'An unexpected error occurred. Please try again later.'}, status=500) class ListTodoNotes(APIView): - def get(self, request, format=None): - req = self.request - notes = TodoNote.objects.all().order_by('-id') - scan_id = safe_int_cast(req.query_params.get('scan_id')) - project = req.query_params.get('project') - if project: - notes = notes.filter(project__slug=project) - target_id = safe_int_cast(req.query_params.get('target_id')) - todo_id = req.query_params.get('todo_id') - subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) - if target_id: - notes = notes.filter(scan_history__in=ScanHistory.objects.filter(domain__id=target_id)) - elif scan_id: - notes = notes.filter(scan_history__id=scan_id) - if todo_id: - notes = notes.filter(id=todo_id) - if subdomain_id: - notes = notes.filter(subdomain__id=subdomain_id) - notes = ReconNoteSerializer(notes, many=True) - return Response({'notes': notes.data}) + def get(self, request, format=None): + req = self.request + notes = TodoNote.objects.all().order_by('-id') + scan_id = safe_int_cast(req.query_params.get('scan_id')) + project = req.query_params.get('project') + if project: + notes = notes.filter(project__slug=project) + target_id = safe_int_cast(req.query_params.get('target_id')) + todo_id = req.query_params.get('todo_id') + subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) + if target_id: + notes = notes.filter(scan_history__in=ScanHistory.objects.filter(domain__id=target_id)) + elif scan_id: + notes = notes.filter(scan_history__id=scan_id) + if todo_id: + notes = notes.filter(id=todo_id) + if subdomain_id: + notes = notes.filter(subdomain__id=subdomain_id) + notes = ReconNoteSerializer(notes, many=True) + return Response({'notes': notes.data}) class ListScanHistory(APIView): - def get(self, request, format=None): - req = self.request - scan_history = ScanHistory.objects.all().order_by('-start_scan_date') - project = req.query_params.get('project') - if project: - scan_history = scan_history.filter(domain__project__slug=project) - scan_history = ScanHistorySerializer(scan_history, many=True) - return Response(scan_history.data) + def get(self, request, format=None): + req = self.request + scan_history = ScanHistory.objects.all().order_by('-start_scan_date') + project = req.query_params.get('project') + if project: + scan_history = scan_history.filter(domain__project__slug=project) + scan_history = ScanHistorySerializer(scan_history, many=True) + return Response(scan_history.data) class ListEngines(APIView): - def get(self, request, format=None): - req = self.request - engines = EngineType.objects.order_by('engine_name').all() - engine_serializer = EngineSerializer(engines, many=True) - return Response({'engines': engine_serializer.data}) + def get(self, request, format=None): + req = self.request + engines = EngineType.objects.order_by('engine_name').all() + engine_serializer = EngineSerializer(engines, many=True) + return Response({'engines': engine_serializer.data}) class ListOrganizations(APIView): - def get(self, request, format=None): - organizations = Organization.objects.all() - organization_serializer = OrganizationSerializer(organizations, many=True) - return Response({'organizations': organization_serializer.data}) + def get(self, request, format=None): + organizations = Organization.objects.all() + organization_serializer = OrganizationSerializer(organizations, many=True) + return Response({'organizations': organization_serializer.data}) class ListTargetsInOrganization(APIView): - def get(self, request, format=None): - req = self.request - organization_id = safe_int_cast(req.query_params.get('organization_id')) - organization = Organization.objects.filter(id=organization_id) - targets = Domain.objects.filter(domains__in=organization) - organization_serializer = OrganizationSerializer(organization, many=True) - targets_serializer = OrganizationTargetsSerializer(targets, many=True) - return Response({'organization': organization_serializer.data, 'domains': targets_serializer.data}) + def get(self, request, format=None): + req = self.request + organization_id = safe_int_cast(req.query_params.get('organization_id')) + organization = Organization.objects.filter(id=organization_id) + targets = Domain.objects.filter(domains__in=organization) + organization_serializer = OrganizationSerializer(organization, many=True) + targets_serializer = OrganizationTargetsSerializer(targets, many=True) + return Response({'organization': organization_serializer.data, 'domains': targets_serializer.data}) class ListTargetsWithoutOrganization(APIView): - def get(self, request, format=None): - req = self.request - targets = Domain.objects.exclude(domains__in=Organization.objects.all()) - targets_serializer = OrganizationTargetsSerializer(targets, many=True) - return Response({'domains': targets_serializer.data}) + def get(self, request, format=None): + req = self.request + targets = Domain.objects.exclude(domains__in=Organization.objects.all()) + targets_serializer = OrganizationTargetsSerializer(targets, many=True) + return Response({'domains': targets_serializer.data}) class VisualiseData(APIView): - def get(self, request, format=None): - req = self.request - if scan_id := safe_int_cast(req.query_params.get('scan_id')): - mitch_data = ScanHistory.objects.filter(id=scan_id) - serializer = VisualiseDataSerializer(mitch_data, many=True) - - # Data processing to remove duplicates - processed_data = self.process_visualisation_data(serializer.data) - - return Response(processed_data) - else: - return Response() - - def process_visualisation_data(self, data): - if not data: - return [] - - processed_data = data[0] # Assuming there's only one element in data - subdomains = processed_data.get('subdomains', []) - - # Use a dictionary to group vulnerabilities by subdomain - vuln_by_subdomain = defaultdict(list) - - for subdomain in subdomains: - subdomain_name = subdomain['name'] - vulnerabilities = subdomain.get('vulnerabilities', []) - - # Group unique vulnerabilities - unique_vulns = {} - for vuln in vulnerabilities: - vuln_key = (vuln['name'], vuln['severity']) - if vuln_key not in unique_vulns: - unique_vulns[vuln_key] = vuln - - vuln_by_subdomain[subdomain_name].extend(unique_vulns.values()) - - # Update subdomains with unique vulnerabilities - for subdomain in subdomains: - subdomain['vulnerabilities'] = vuln_by_subdomain[subdomain['name']] - - return processed_data + def get(self, request, format=None): + req = self.request + if scan_id := safe_int_cast(req.query_params.get('scan_id')): + mitch_data = ScanHistory.objects.filter(id=scan_id) + serializer = VisualiseDataSerializer(mitch_data, many=True) + + # Data processing to remove duplicates + processed_data = self.process_visualisation_data(serializer.data) + + return Response(processed_data) + else: + return Response() + + def process_visualisation_data(self, data): + if not data: + return [] + + processed_data = data[0] # Assuming there's only one element in data + subdomains = processed_data.get('subdomains', []) + + # Use a dictionary to group vulnerabilities by subdomain + vuln_by_subdomain = defaultdict(list) + + for subdomain in subdomains: + subdomain_name = subdomain['name'] + vulnerabilities = subdomain.get('vulnerabilities', []) + + # Group unique vulnerabilities + unique_vulns = {} + for vuln in vulnerabilities: + vuln_key = (vuln['name'], vuln['severity']) + if vuln_key not in unique_vulns: + unique_vulns[vuln_key] = vuln + + vuln_by_subdomain[subdomain_name].extend(unique_vulns.values()) + + # Update subdomains with unique vulnerabilities + for subdomain in subdomains: + subdomain['vulnerabilities'] = vuln_by_subdomain[subdomain['name']] + + return processed_data class ListTechnology(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) - # Determine the queryset based on the presence of target_id or scan_id - if target_id := safe_int_cast(req.query_params.get('target_id')): - subdomain_filter = Subdomain.objects.filter(target_domain__id=target_id) - elif scan_id: - subdomain_filter = Subdomain.objects.filter(scan_history__id=scan_id) - else: - subdomain_filter = Subdomain.objects.all() + # Determine the queryset based on the presence of target_id or scan_id + if target_id := safe_int_cast(req.query_params.get('target_id')): + subdomain_filter = Subdomain.objects.filter(target_domain__id=target_id) + elif scan_id: + subdomain_filter = Subdomain.objects.filter(scan_history__id=scan_id) + else: + subdomain_filter = Subdomain.objects.all() - # Fetch technologies and serialize the results - tech = Technology.objects.filter(technologies__in=subdomain_filter).annotate( - count=Count('name')).order_by('-count') - serializer = TechnologyCountSerializer(tech, many=True) + # Fetch technologies and serialize the results + tech = Technology.objects.filter(technologies__in=subdomain_filter).annotate( + count=Count('name')).order_by('-count') + serializer = TechnologyCountSerializer(tech, many=True) - return Response({"technologies": serializer.data}) + return Response({"technologies": serializer.data}) class ListDorkTypes(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - if scan_id: - dork = Dork.objects.filter( - dorks__in=ScanHistory.objects.filter(id=scan_id) - ).values('type').annotate(count=Count('type')).order_by('-count') - serializer = DorkCountSerializer(dork, many=True) - return Response({"dorks": serializer.data}) - else: - dork = Dork.objects.filter( - dorks__in=ScanHistory.objects.all() - ).values('type').annotate(count=Count('type')).order_by('-count') - serializer = DorkCountSerializer(dork, many=True) - return Response({"dorks": serializer.data}) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + if scan_id: + dork = Dork.objects.filter( + dorks__in=ScanHistory.objects.filter(id=scan_id) + ).values('type').annotate(count=Count('type')).order_by('-count') + serializer = DorkCountSerializer(dork, many=True) + return Response({"dorks": serializer.data}) + else: + dork = Dork.objects.filter( + dorks__in=ScanHistory.objects.all() + ).values('type').annotate(count=Count('type')).order_by('-count') + serializer = DorkCountSerializer(dork, many=True) + return Response({"dorks": serializer.data}) class ListEmails(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - if scan_id: - email = Email.objects.filter( - emails__in=ScanHistory.objects.filter(id=scan_id)).order_by('password') - serializer = EmailSerializer(email, many=True) - return Response({"emails": serializer.data}) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + if scan_id: + email = Email.objects.filter( + emails__in=ScanHistory.objects.filter(id=scan_id)).order_by('password') + serializer = EmailSerializer(email, many=True) + return Response({"emails": serializer.data}) class ListDorks(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - type = req.query_params.get('type') - if scan_id: - dork = Dork.objects.filter( - dorks__in=ScanHistory.objects.filter(id=scan_id)) - else: - dork = Dork.objects.filter( - dorks__in=ScanHistory.objects.all()) - if scan_id and type: - dork = dork.filter(type=type) - serializer = DorkSerializer(dork, many=True) - grouped_res = {} - for item in serializer.data: - item_type = item['type'] - if item_type not in grouped_res: - grouped_res[item_type] = [] - grouped_res[item_type].append(item) - return Response({"dorks": grouped_res}) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + type = req.query_params.get('type') + if scan_id: + dork = Dork.objects.filter( + dorks__in=ScanHistory.objects.filter(id=scan_id)) + else: + dork = Dork.objects.filter( + dorks__in=ScanHistory.objects.all()) + if scan_id and type: + dork = dork.filter(type=type) + serializer = DorkSerializer(dork, many=True) + grouped_res = {} + for item in serializer.data: + item_type = item['type'] + if item_type not in grouped_res: + grouped_res[item_type] = [] + grouped_res[item_type].append(item) + return Response({"dorks": grouped_res}) class ListEmployees(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - if scan_id: - employee = Employee.objects.filter( - employees__in=ScanHistory.objects.filter(id=scan_id)) - serializer = EmployeeSerializer(employee, many=True) - return Response({"employees": serializer.data}) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + if scan_id: + employee = Employee.objects.filter( + employees__in=ScanHistory.objects.filter(id=scan_id)) + serializer = EmployeeSerializer(employee, many=True) + return Response({"employees": serializer.data}) class ListPorts(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - target_id = safe_int_cast(req.query_params.get('target_id')) - ip_address = req.query_params.get('ip_address') - - if target_id: - port = Port.objects.filter( - ports__in=IpAddress.objects.filter( - ip_addresses__in=Subdomain.objects.filter( - target_domain__id=target_id))).distinct() - elif scan_id: - port = Port.objects.filter( - ports__in=IpAddress.objects.filter( - ip_addresses__in=Subdomain.objects.filter( - scan_history__id=scan_id))).distinct() - else: - port = Port.objects.filter( - ports__in=IpAddress.objects.filter( - ip_addresses__in=Subdomain.objects.all())).distinct() - - if ip_address: - port = port.filter(ports__address=ip_address).distinct() - - serializer = PortSerializer(port, many=True) - return Response({"ports": serializer.data}) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + target_id = safe_int_cast(req.query_params.get('target_id')) + ip_address = req.query_params.get('ip_address') + + if target_id: + port = Port.objects.filter( + ports__in=IpAddress.objects.filter( + ip_addresses__in=Subdomain.objects.filter( + target_domain__id=target_id))).distinct() + elif scan_id: + port = Port.objects.filter( + ports__in=IpAddress.objects.filter( + ip_addresses__in=Subdomain.objects.filter( + scan_history__id=scan_id))).distinct() + else: + port = Port.objects.filter( + ports__in=IpAddress.objects.filter( + ip_addresses__in=Subdomain.objects.all())).distinct() + + if ip_address: + port = port.filter(ports__address=ip_address).distinct() + + serializer = PortSerializer(port, many=True) + return Response({"ports": serializer.data}) class ListSubdomains(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - project = req.query_params.get('project') - target_id = safe_int_cast(req.query_params.get('target_id')) - ip_address = req.query_params.get('ip_address') - port = req.query_params.get('port') - tech = req.query_params.get('tech') + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + project = req.query_params.get('project') + target_id = safe_int_cast(req.query_params.get('target_id')) + ip_address = req.query_params.get('ip_address') + port = req.query_params.get('port') + tech = req.query_params.get('tech') - subdomains = Subdomain.objects.filter(target_domain__project__slug=project) if project else Subdomain.objects.all() + subdomains = Subdomain.objects.filter(target_domain__project__slug=project) if project else Subdomain.objects.all() - if scan_id: - subdomain_query = subdomains.filter(scan_history__id=scan_id).distinct('name') - elif target_id: - subdomain_query = subdomains.filter(target_domain__id=target_id).distinct('name') - else: - subdomain_query = subdomains.all().distinct('name') + if scan_id: + subdomain_query = subdomains.filter(scan_history__id=scan_id).distinct('name') + elif target_id: + subdomain_query = subdomains.filter(target_domain__id=target_id).distinct('name') + else: + subdomain_query = subdomains.all().distinct('name') - if ip_address: - subdomain_query = subdomain_query.filter(ip_addresses__address=ip_address) + if ip_address: + subdomain_query = subdomain_query.filter(ip_addresses__address=ip_address) - if tech: - subdomain_query = subdomain_query.filter(technologies__name=tech) + if tech: + subdomain_query = subdomain_query.filter(technologies__name=tech) - if port: - subdomain_query = subdomain_query.filter( - ip_addresses__in=IpAddress.objects.filter( - ports__in=Port.objects.filter( - number=port))) + if port: + subdomain_query = subdomain_query.filter( + ip_addresses__in=IpAddress.objects.filter( + ports__in=Port.objects.filter( + number=port))) - if 'only_important' in req.query_params: - subdomain_query = subdomain_query.filter(is_important=True) + if 'only_important' in req.query_params: + subdomain_query = subdomain_query.filter(is_important=True) - if 'no_lookup_interesting' in req.query_params: - serializer = OnlySubdomainNameSerializer(subdomain_query, many=True) - else: - serializer = SubdomainSerializer(subdomain_query, many=True) - return Response({"subdomains": serializer.data}) + if 'no_lookup_interesting' in req.query_params: + serializer = OnlySubdomainNameSerializer(subdomain_query, many=True) + else: + serializer = SubdomainSerializer(subdomain_query, many=True) + return Response({"subdomains": serializer.data}) - def post(self, req): - req = self.request - data = req.data + def post(self, req): + req = self.request + data = req.data - subdomain_ids = data.get('subdomain_ids') + subdomain_ids = data.get('subdomain_ids') - subdomain_names = [] + subdomain_names = [] - for id in subdomain_ids: - subdomain_names.append(Subdomain.objects.get(id=id).name) + for id in subdomain_ids: + subdomain_names.append(Subdomain.objects.get(id=id).name) - if subdomain_names: - return Response({'status': True, "results": subdomain_names}) + if subdomain_names: + return Response({'status': True, "results": subdomain_names}) - return Response({'status': False}) + return Response({'status': False}) class ListOsintUsers(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - if scan_id: - documents = MetaFinderDocument.objects.filter(scan_history__id=scan_id).exclude(author__isnull=True).values('author').distinct() - serializer = MetafinderUserSerializer(documents, many=True) - return Response({"users": serializer.data}) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + if scan_id: + documents = MetaFinderDocument.objects.filter(scan_history__id=scan_id).exclude(author__isnull=True).values('author').distinct() + serializer = MetafinderUserSerializer(documents, many=True) + return Response({"users": serializer.data}) class ListMetadata(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - if scan_id: - documents = MetaFinderDocument.objects.filter(scan_history__id=scan_id).distinct() - serializer = MetafinderDocumentSerializer(documents, many=True) - return Response({"metadata": serializer.data}) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + if scan_id: + documents = MetaFinderDocument.objects.filter(scan_history__id=scan_id).distinct() + serializer = MetafinderDocumentSerializer(documents, many=True) + return Response({"metadata": serializer.data}) class ListIPs(APIView): - def get(self, request, format=None): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - target_id = safe_int_cast(req.query_params.get('target_id')) + def get(self, request, format=None): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + target_id = safe_int_cast(req.query_params.get('target_id')) - port = req.query_params.get('port') + port = req.query_params.get('port') - if target_id: - ips = IpAddress.objects.filter( - ip_addresses__in=Subdomain.objects.filter( - target_domain__id=target_id)).distinct() - elif scan_id: - ips = IpAddress.objects.filter( - ip_addresses__in=Subdomain.objects.filter( - scan_history__id=scan_id)).distinct() - else: - ips = IpAddress.objects.filter( - ip_addresses__in=Subdomain.objects.all()).distinct() + if target_id: + ips = IpAddress.objects.filter( + ip_addresses__in=Subdomain.objects.filter( + target_domain__id=target_id)).distinct() + elif scan_id: + ips = IpAddress.objects.filter( + ip_addresses__in=Subdomain.objects.filter( + scan_history__id=scan_id)).distinct() + else: + ips = IpAddress.objects.filter( + ip_addresses__in=Subdomain.objects.all()).distinct() - if port: - ips = ips.filter( - ports__in=Port.objects.filter( - number=port)).distinct() + if port: + ips = ips.filter( + ports__in=Port.objects.filter( + number=port)).distinct() - serializer = IpSerializer(ips, many=True) - return Response({"ips": serializer.data}) + serializer = IpSerializer(ips, many=True) + return Response({"ips": serializer.data}) class IpAddressViewSet(viewsets.ModelViewSet): - queryset = Subdomain.objects.none() - serializer_class = IpSubdomainSerializer - ordering = ('name',) - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - - if scan_id: - self.queryset = Subdomain.objects.filter( - scan_history__id=scan_id).exclude( - ip_addresses__isnull=True).distinct() - else: - self.serializer_class = IpSerializer - self.queryset = IpAddress.objects.all() - return self.queryset - - def paginate_queryset(self, queryset, view=None): - if 'no_page' in self.request.query_params: - return None - return self.paginator.paginate_queryset( - queryset.order_by(*self.ordering), self.request, view=self) + queryset = Subdomain.objects.none() + serializer_class = IpSubdomainSerializer + ordering = ('name',) + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + + if scan_id: + self.queryset = Subdomain.objects.filter( + scan_history__id=scan_id).exclude( + ip_addresses__isnull=True).distinct() + else: + self.serializer_class = IpSerializer + self.queryset = IpAddress.objects.all() + return self.queryset + + def paginate_queryset(self, queryset, view=None): + if 'no_page' in self.request.query_params: + return None + return self.paginator.paginate_queryset( + queryset.order_by(*self.ordering), self.request, view=self) class SubdomainsViewSet(viewsets.ModelViewSet): - queryset = Subdomain.objects.none() - serializer_class = SubdomainSerializer - ordering = ('name',) - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - if scan_id: - if 'only_screenshot' in self.request.query_params: - return ( - Subdomain.objects - .filter(scan_history__id=scan_id) - .exclude(screenshot_path__isnull=True)) - return Subdomain.objects.filter(scan_history=scan_id) - - def paginate_queryset(self, queryset, view=None): - if 'no_page' in self.request.query_params: - return None - return self.paginator.paginate_queryset( - queryset.order_by(*self.ordering), self.request, view=self) + queryset = Subdomain.objects.none() + serializer_class = SubdomainSerializer + ordering = ('name',) + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + if scan_id: + if 'only_screenshot' in self.request.query_params: + return ( + Subdomain.objects + .filter(scan_history__id=scan_id) + .exclude(screenshot_path__isnull=True)) + return Subdomain.objects.filter(scan_history=scan_id) + + def paginate_queryset(self, queryset, view=None): + if 'no_page' in self.request.query_params: + return None + return self.paginator.paginate_queryset( + queryset.order_by(*self.ordering), self.request, view=self) class SubdomainChangesViewSet(viewsets.ModelViewSet): - ''' - This viewset will return the Subdomain changes - To get the new subdomains, we will look for ScanHistory with - subdomain_discovery = True and the status of the last scan has to be - successful and calculate difference - ''' - queryset = Subdomain.objects.none() - serializer_class = SubdomainChangesSerializer - ordering = ('name',) - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - changes = req.query_params.get('changes') - domain_id = safe_int_cast(ScanHistory.objects.filter(id=safe_int_cast(scan_id)).first().domain.id) - scan_history_query = ( - ScanHistory.objects - .filter(domain=domain_id) - .filter(tasks__overlap=['subdomain_discovery']) - .filter(id__lte=scan_id) - .exclude(Q(scan_status=-1) | Q(scan_status=1)) - ) - if scan_history_query.count() > 1: - last_scan = scan_history_query.order_by('-start_scan_date')[1] - scanned_host_q1 = ( - Subdomain.objects - .filter(scan_history__id=scan_id) - .values('name') - ) - scanned_host_q2 = ( - Subdomain.objects - .filter(scan_history__id=last_scan.id) - .values('name') - ) - added_subdomain = scanned_host_q1.difference(scanned_host_q2) - removed_subdomains = scanned_host_q2.difference(scanned_host_q1) - if changes == 'added': - return ( - Subdomain.objects - .filter(scan_history=scan_id) - .filter(name__in=added_subdomain) - .annotate( - change=Value('added', output_field=CharField()) - ) - ) - elif changes == 'removed': - return ( - Subdomain.objects - .filter(scan_history=last_scan) - .filter(name__in=removed_subdomains) - .annotate( - change=Value('removed', output_field=CharField()) - ) - ) - else: - added_subdomain = ( - Subdomain.objects - .filter(scan_history=scan_id) - .filter(name__in=added_subdomain) - .annotate( - change=Value('added', output_field=CharField()) - ) - ) - removed_subdomains = ( - Subdomain.objects - .filter(scan_history=last_scan) - .filter(name__in=removed_subdomains) - .annotate( - change=Value('removed', output_field=CharField()) - ) - ) - changes = added_subdomain.union(removed_subdomains) - return changes - return self.queryset - - def paginate_queryset(self, queryset, view=None): - if 'no_page' in self.request.query_params: - return None - return self.paginator.paginate_queryset( - queryset.order_by(*self.ordering), self.request, view=self) + ''' + This viewset will return the Subdomain changes + To get the new subdomains, we will look for ScanHistory with + subdomain_discovery = True and the status of the last scan has to be + successful and calculate difference + ''' + queryset = Subdomain.objects.none() + serializer_class = SubdomainChangesSerializer + ordering = ('name',) + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + changes = req.query_params.get('changes') + domain_id = safe_int_cast(ScanHistory.objects.filter(id=safe_int_cast(scan_id)).first().domain.id) + scan_history_query = ( + ScanHistory.objects + .filter(domain=domain_id) + .filter(tasks__overlap=['subdomain_discovery']) + .filter(id__lte=scan_id) + .exclude(Q(scan_status=-1) | Q(scan_status=1)) + ) + if scan_history_query.count() > 1: + last_scan = scan_history_query.order_by('-start_scan_date')[1] + scanned_host_q1 = ( + Subdomain.objects + .filter(scan_history__id=scan_id) + .values('name') + ) + scanned_host_q2 = ( + Subdomain.objects + .filter(scan_history__id=last_scan.id) + .values('name') + ) + added_subdomain = scanned_host_q1.difference(scanned_host_q2) + removed_subdomains = scanned_host_q2.difference(scanned_host_q1) + if changes == 'added': + return ( + Subdomain.objects + .filter(scan_history=scan_id) + .filter(name__in=added_subdomain) + .annotate( + change=Value('added', output_field=CharField()) + ) + ) + elif changes == 'removed': + return ( + Subdomain.objects + .filter(scan_history=last_scan) + .filter(name__in=removed_subdomains) + .annotate( + change=Value('removed', output_field=CharField()) + ) + ) + else: + added_subdomain = ( + Subdomain.objects + .filter(scan_history=scan_id) + .filter(name__in=added_subdomain) + .annotate( + change=Value('added', output_field=CharField()) + ) + ) + removed_subdomains = ( + Subdomain.objects + .filter(scan_history=last_scan) + .filter(name__in=removed_subdomains) + .annotate( + change=Value('removed', output_field=CharField()) + ) + ) + changes = added_subdomain.union(removed_subdomains) + return changes + return self.queryset + + def paginate_queryset(self, queryset, view=None): + if 'no_page' in self.request.query_params: + return None + return self.paginator.paginate_queryset( + queryset.order_by(*self.ordering), self.request, view=self) class EndPointChangesViewSet(viewsets.ModelViewSet): - ''' - This viewset will return the EndPoint changes - ''' - queryset = EndPoint.objects.none() - serializer_class = EndPointChangesSerializer - ordering = ('http_url',) - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - changes = req.query_params.get('changes') - domain_id = safe_int_cast(ScanHistory.objects.filter(id=scan_id).first().domain.id) - scan_history = ( - ScanHistory.objects - .filter(domain=domain_id) - .filter(tasks__overlap=['fetch_url']) - .filter(id__lte=scan_id) - .filter(scan_status=2) - ) - if scan_history.count() > 1: - last_scan = scan_history.order_by('-start_scan_date')[1] - scanned_host_q1 = ( - EndPoint.objects - .filter(scan_history__id=scan_id) - .values('http_url') - ) - scanned_host_q2 = ( - EndPoint.objects - .filter(scan_history__id=last_scan.id) - .values('http_url') - ) - added_endpoints = scanned_host_q1.difference(scanned_host_q2) - removed_endpoints = scanned_host_q2.difference(scanned_host_q1) - if changes == 'added': - return ( - EndPoint.objects - .filter(scan_history=scan_id) - .filter(http_url__in=added_endpoints) - .annotate(change=Value('added', output_field=CharField())) - ) - elif changes == 'removed': - return ( - EndPoint.objects - .filter(scan_history=last_scan) - .filter(http_url__in=removed_endpoints) - .annotate(change=Value('removed', output_field=CharField())) - ) - else: - added_endpoints = ( - EndPoint.objects - .filter(scan_history=scan_id) - .filter(http_url__in=added_endpoints) - .annotate(change=Value('added', output_field=CharField())) - ) - removed_endpoints = ( - EndPoint.objects - .filter(scan_history=last_scan) - .filter(http_url__in=removed_endpoints) - .annotate(change=Value('removed', output_field=CharField())) - ) - changes = added_endpoints.union(removed_endpoints) - return changes - return self.queryset - - def paginate_queryset(self, queryset, view=None): - if 'no_page' in self.request.query_params: - return None - return self.paginator.paginate_queryset( - queryset.order_by(*self.ordering), self.request, view=self) + ''' + This viewset will return the EndPoint changes + ''' + queryset = EndPoint.objects.none() + serializer_class = EndPointChangesSerializer + ordering = ('http_url',) + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + changes = req.query_params.get('changes') + domain_id = safe_int_cast(ScanHistory.objects.filter(id=scan_id).first().domain.id) + scan_history = ( + ScanHistory.objects + .filter(domain=domain_id) + .filter(tasks__overlap=['fetch_url']) + .filter(id__lte=scan_id) + .filter(scan_status=2) + ) + if scan_history.count() > 1: + last_scan = scan_history.order_by('-start_scan_date')[1] + scanned_host_q1 = ( + EndPoint.objects + .filter(scan_history__id=scan_id) + .values('http_url') + ) + scanned_host_q2 = ( + EndPoint.objects + .filter(scan_history__id=last_scan.id) + .values('http_url') + ) + added_endpoints = scanned_host_q1.difference(scanned_host_q2) + removed_endpoints = scanned_host_q2.difference(scanned_host_q1) + if changes == 'added': + return ( + EndPoint.objects + .filter(scan_history=scan_id) + .filter(http_url__in=added_endpoints) + .annotate(change=Value('added', output_field=CharField())) + ) + elif changes == 'removed': + return ( + EndPoint.objects + .filter(scan_history=last_scan) + .filter(http_url__in=removed_endpoints) + .annotate(change=Value('removed', output_field=CharField())) + ) + else: + added_endpoints = ( + EndPoint.objects + .filter(scan_history=scan_id) + .filter(http_url__in=added_endpoints) + .annotate(change=Value('added', output_field=CharField())) + ) + removed_endpoints = ( + EndPoint.objects + .filter(scan_history=last_scan) + .filter(http_url__in=removed_endpoints) + .annotate(change=Value('removed', output_field=CharField())) + ) + changes = added_endpoints.union(removed_endpoints) + return changes + return self.queryset + + def paginate_queryset(self, queryset, view=None): + if 'no_page' in self.request.query_params: + return None + return self.paginator.paginate_queryset( + queryset.order_by(*self.ordering), self.request, view=self) class InterestingSubdomainViewSet(viewsets.ModelViewSet): - queryset = Subdomain.objects.none() - serializer_class = SubdomainSerializer - ordering = ('name',) - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - domain_id = safe_int_cast(req.query_params.get('target_id')) - - if 'only_subdomains' in self.request.query_params: - self.serializer_class = InterestingSubdomainSerializer - - if scan_id: - self.queryset = get_interesting_subdomains(scan_history=scan_id) - elif domain_id: - self.queryset = get_interesting_subdomains(domain_id=domain_id) - else: - self.queryset = get_interesting_subdomains() - - return self.queryset - - def filter_queryset(self, qs): - qs = self.queryset.filter() - search_value = self.request.GET.get(u'search[value]', None) - _order_col = self.request.GET.get(u'order[0][column]', None) - _order_direction = self.request.GET.get(u'order[0][dir]', None) - order_col = 'content_length' - if _order_col == '0': - order_col = 'name' - elif _order_col == '1': - order_col = 'page_title' - elif _order_col == '2': - order_col = 'http_status' - elif _order_col == '3': - order_col = 'content_length' - - if _order_direction == 'desc': - order_col = f'-{order_col}' - - if search_value: - qs = self.queryset.filter( - Q(name__icontains=search_value) | - Q(page_title__icontains=search_value) | - Q(http_status__icontains=search_value) - ) - return qs.order_by(order_col) - - def paginate_queryset(self, queryset, view=None): - if 'no_page' in self.request.query_params: - return None - return self.paginator.paginate_queryset( - queryset.order_by(*self.ordering), self.request, view=self) + queryset = Subdomain.objects.none() + serializer_class = SubdomainSerializer + ordering = ('name',) + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + domain_id = safe_int_cast(req.query_params.get('target_id')) + + if 'only_subdomains' in self.request.query_params: + self.serializer_class = InterestingSubdomainSerializer + + if scan_id: + self.queryset = get_interesting_subdomains(scan_history=scan_id) + elif domain_id: + self.queryset = get_interesting_subdomains(domain_id=domain_id) + else: + self.queryset = get_interesting_subdomains() + + return self.queryset + + def filter_queryset(self, qs): + qs = self.queryset.filter() + search_value = self.request.GET.get(u'search[value]', None) + _order_col = self.request.GET.get(u'order[0][column]', None) + _order_direction = self.request.GET.get(u'order[0][dir]', None) + order_col = 'content_length' + if _order_col == '0': + order_col = 'name' + elif _order_col == '1': + order_col = 'page_title' + elif _order_col == '2': + order_col = 'http_status' + elif _order_col == '3': + order_col = 'content_length' + + if _order_direction == 'desc': + order_col = f'-{order_col}' + + if search_value: + qs = self.queryset.filter( + Q(name__icontains=search_value) | + Q(page_title__icontains=search_value) | + Q(http_status__icontains=search_value) + ) + return qs.order_by(order_col) + + def paginate_queryset(self, queryset, view=None): + if 'no_page' in self.request.query_params: + return None + return self.paginator.paginate_queryset( + queryset.order_by(*self.ordering), self.request, view=self) class InterestingEndpointViewSet(viewsets.ModelViewSet): - queryset = EndPoint.objects.none() - serializer_class = EndpointSerializer - ordering = ('http_url',) - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - target_id = safe_int_cast(req.query_params.get('target_id')) - - if 'only_endpoints' in self.request.query_params: - self.serializer_class = InterestingEndPointSerializer - if scan_id: - return get_interesting_endpoints(scan_history=scan_id) - elif target_id: - return get_interesting_endpoints(target=target_id) - else: - return get_interesting_endpoints() - - def paginate_queryset(self, queryset, view=None): - if 'no_page' in self.request.query_params: - return None - return self.paginator.paginate_queryset( - queryset.order_by(*self.ordering), self.request, view=self) + queryset = EndPoint.objects.none() + serializer_class = EndpointSerializer + ordering = ('http_url',) + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + target_id = safe_int_cast(req.query_params.get('target_id')) + + if 'only_endpoints' in self.request.query_params: + self.serializer_class = InterestingEndPointSerializer + if scan_id: + return get_interesting_endpoints(scan_history=scan_id) + elif target_id: + return get_interesting_endpoints(target=target_id) + else: + return get_interesting_endpoints() + + def paginate_queryset(self, queryset, view=None): + if 'no_page' in self.request.query_params: + return None + return self.paginator.paginate_queryset( + queryset.order_by(*self.ordering), self.request, view=self) class SubdomainDatatableViewSet(viewsets.ModelViewSet): - queryset = Subdomain.objects.none() - serializer_class = SubdomainSerializer - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - target_id = safe_int_cast(req.query_params.get('target_id')) - url_query = req.query_params.get('query_param') - ip_address = req.query_params.get('ip_address') - name = req.query_params.get('name') - project = req.query_params.get('project') - - subdomains = Subdomain.objects.filter(target_domain__project__slug=project) - - if 'is_important' in req.query_params: - subdomains = subdomains.filter(is_important=True) - - if target_id: - self.queryset = ( - subdomains - .filter(target_domain__id=target_id) - .distinct() - ) - elif url_query: - self.queryset = ( - subdomains - .filter(Q(target_domain__name=url_query)) - .distinct() - ) - elif scan_id: - self.queryset = ( - subdomains - .filter(scan_history__id=scan_id) - .distinct() - ) - else: - self.queryset = subdomains.distinct() - - if 'only_directory' in req.query_params: - self.queryset = self.queryset.exclude(directories__isnull=True) - - if ip_address: - self.queryset = self.queryset.filter(ip_addresses__address__icontains=ip_address) - - if name: - self.queryset = self.queryset.filter(name=name) - - return self.queryset - - def filter_queryset(self, qs): - qs = self.queryset.filter() - search_value = self.request.GET.get(u'search[value]', None) - _order_col = self.request.GET.get(u'order[0][column]', None) - _order_direction = self.request.GET.get(u'order[0][dir]', None) - order_col = 'content_length' - if _order_col == '0': - order_col = 'checked' - elif _order_col == '1': - order_col = 'name' - elif _order_col == '4': - order_col = 'http_status' - elif _order_col == '5': - order_col = 'page_title' - elif _order_col == '8': - order_col = 'content_length' - elif _order_col == '10': - order_col = 'response_time' - if _order_direction == 'desc': - order_col = f'-{order_col}' - # if the search query is separated by = means, it is a specific lookup - # divide the search query into two half and lookup - if search_value: - operators = ['=', '&', '|', '>', '<', '!'] - if any(x in search_value for x in operators): - if '&' in search_value: - complex_query = search_value.split('&') - for query in complex_query: - if query.strip(): - qs = qs & self.special_lookup(query.strip()) - elif '|' in search_value: - qs = Subdomain.objects.none() - complex_query = search_value.split('|') - for query in complex_query: - if query.strip(): - qs = self.special_lookup(query.strip()) | qs - else: - qs = self.special_lookup(search_value) - else: - qs = self.general_lookup(search_value) - return qs.order_by(order_col) - - def general_lookup(self, search_value): - qs = self.queryset.filter( - Q(name__icontains=search_value) | - Q(cname__icontains=search_value) | - Q(http_status__icontains=search_value) | - Q(page_title__icontains=search_value) | - Q(http_url__icontains=search_value) | - Q(technologies__name__icontains=search_value) | - Q(webserver__icontains=search_value) | - Q(ip_addresses__address__icontains=search_value) | - Q(ip_addresses__ports__number__icontains=search_value) | - Q(ip_addresses__ports__service_name__icontains=search_value) | - Q(ip_addresses__ports__description__icontains=search_value) - ) - - if 'only_directory' in self.request.query_params: - qs = qs | self.queryset.filter( - Q(directories__directory_files__name__icontains=search_value) - ) - - return qs - - def special_lookup(self, search_value): - qs = self.queryset.filter() - if '=' in search_value: - search_param = search_value.split("=") - title = search_param[0].lower().strip() - content = search_param[1].lower().strip() - if 'name' in title: - qs = self.queryset.filter(name__icontains=content) - elif 'page_title' in title: - qs = self.queryset.filter(page_title__icontains=content) - elif 'http_url' in title: - qs = self.queryset.filter(http_url__icontains=content) - elif 'content_type' in title: - qs = self.queryset.filter(content_type__icontains=content) - elif 'cname' in title: - qs = self.queryset.filter(cname__icontains=content) - elif 'webserver' in title: - qs = self.queryset.filter(webserver__icontains=content) - elif 'ip_addresses' in title: - qs = self.queryset.filter( - ip_addresses__address__icontains=content) - elif 'is_important' in title: - if 'true' in content.lower(): - qs = self.queryset.filter(is_important=True) - else: - qs = self.queryset.filter(is_important=False) - elif 'port' in title: - qs = ( - self.queryset - .filter(ip_addresses__ports__number__icontains=content) - | - self.queryset - .filter(ip_addresses__ports__service_name__icontains=content) - | - self.queryset - .filter(ip_addresses__ports__description__icontains=content) - ) - elif 'technology' in title: - qs = ( - self.queryset - .filter(technologies__name__icontains=content) - ) - elif 'http_status' in title: - try: - int_http_status = int(content) - qs = self.queryset.filter(http_status=int_http_status) - except Exception as e: - print(e) - elif 'content_length' in title: - try: - int_http_status = int(content) - qs = self.queryset.filter(content_length=int_http_status) - except Exception as e: - print(e) - - elif '>' in search_value: - search_param = search_value.split(">") - title = search_param[0].lower().strip() - content = search_param[1].lower().strip() - if 'http_status' in title: - try: - int_val = int(content) - qs = self.queryset.filter(http_status__gt=int_val) - except Exception as e: - print(e) - elif 'content_length' in title: - try: - int_val = int(content) - qs = self.queryset.filter(content_length__gt=int_val) - except Exception as e: - print(e) - - elif '<' in search_value: - search_param = search_value.split("<") - title = search_param[0].lower().strip() - content = search_param[1].lower().strip() - if 'http_status' in title: - try: - int_val = int(content) - qs = self.queryset.filter(http_status__lt=int_val) - except Exception as e: - print(e) - elif 'content_length' in title: - try: - int_val = int(content) - qs = self.queryset.filter(content_length__lt=int_val) - except Exception as e: - print(e) - - elif '!' in search_value: - search_param = search_value.split("!") - title = search_param[0].lower().strip() - content = search_param[1].lower().strip() - if 'name' in title: - qs = self.queryset.exclude(name__icontains=content) - elif 'page_title' in title: - qs = self.queryset.exclude(page_title__icontains=content) - elif 'http_url' in title: - qs = self.queryset.exclude(http_url__icontains=content) - elif 'content_type' in title: - qs = ( - self.queryset - .exclude(content_type__icontains=content) - ) - elif 'cname' in title: - qs = self.queryset.exclude(cname__icontains=content) - elif 'webserver' in title: - qs = self.queryset.exclude(webserver__icontains=content) - elif 'ip_addresses' in title: - qs = self.queryset.exclude( - ip_addresses__address__icontains=content) - elif 'port' in title: - qs = ( - self.queryset - .exclude(ip_addresses__ports__number__icontains=content) - | - self.queryset - .exclude(ip_addresses__ports__service_name__icontains=content) - | - self.queryset - .exclude(ip_addresses__ports__description__icontains=content) - ) - elif 'technology' in title: - qs = ( - self.queryset - .exclude(technologies__name__icontains=content) - ) - elif 'http_status' in title: - try: - int_http_status = int(content) - qs = self.queryset.exclude(http_status=int_http_status) - except Exception as e: - print(e) - elif 'content_length' in title: - try: - int_http_status = int(content) - qs = self.queryset.exclude(content_length=int_http_status) - except Exception as e: - print(e) - - return qs + queryset = Subdomain.objects.none() + serializer_class = SubdomainSerializer + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + target_id = safe_int_cast(req.query_params.get('target_id')) + url_query = req.query_params.get('query_param') + ip_address = req.query_params.get('ip_address') + name = req.query_params.get('name') + project = req.query_params.get('project') + + subdomains = Subdomain.objects.filter(target_domain__project__slug=project) + + if 'is_important' in req.query_params: + subdomains = subdomains.filter(is_important=True) + + if target_id: + self.queryset = ( + subdomains + .filter(target_domain__id=target_id) + .distinct() + ) + elif url_query: + self.queryset = ( + subdomains + .filter(Q(target_domain__name=url_query)) + .distinct() + ) + elif scan_id: + self.queryset = ( + subdomains + .filter(scan_history__id=scan_id) + .distinct() + ) + else: + self.queryset = subdomains.distinct() + + if 'only_directory' in req.query_params: + self.queryset = self.queryset.exclude(directories__isnull=True) + + if ip_address: + self.queryset = self.queryset.filter(ip_addresses__address__icontains=ip_address) + + if name: + self.queryset = self.queryset.filter(name=name) + + return self.queryset + + def filter_queryset(self, qs): + qs = self.queryset.filter() + search_value = self.request.GET.get(u'search[value]', None) + _order_col = self.request.GET.get(u'order[0][column]', None) + _order_direction = self.request.GET.get(u'order[0][dir]', None) + order_col = 'content_length' + if _order_col == '0': + order_col = 'checked' + elif _order_col == '1': + order_col = 'name' + elif _order_col == '4': + order_col = 'http_status' + elif _order_col == '5': + order_col = 'page_title' + elif _order_col == '8': + order_col = 'content_length' + elif _order_col == '10': + order_col = 'response_time' + if _order_direction == 'desc': + order_col = f'-{order_col}' + # if the search query is separated by = means, it is a specific lookup + # divide the search query into two half and lookup + if search_value: + operators = ['=', '&', '|', '>', '<', '!'] + if any(x in search_value for x in operators): + if '&' in search_value: + complex_query = search_value.split('&') + for query in complex_query: + if query.strip(): + qs = qs & self.special_lookup(query.strip()) + elif '|' in search_value: + qs = Subdomain.objects.none() + complex_query = search_value.split('|') + for query in complex_query: + if query.strip(): + qs = self.special_lookup(query.strip()) | qs + else: + qs = self.special_lookup(search_value) + else: + qs = self.general_lookup(search_value) + return qs.order_by(order_col) + + def general_lookup(self, search_value): + qs = self.queryset.filter( + Q(name__icontains=search_value) | + Q(cname__icontains=search_value) | + Q(http_status__icontains=search_value) | + Q(page_title__icontains=search_value) | + Q(http_url__icontains=search_value) | + Q(technologies__name__icontains=search_value) | + Q(webserver__icontains=search_value) | + Q(ip_addresses__address__icontains=search_value) | + Q(ip_addresses__ports__number__icontains=search_value) | + Q(ip_addresses__ports__service_name__icontains=search_value) | + Q(ip_addresses__ports__description__icontains=search_value) + ) + + if 'only_directory' in self.request.query_params: + qs = qs | self.queryset.filter( + Q(directories__directory_files__name__icontains=search_value) + ) + + return qs + + def special_lookup(self, search_value): + qs = self.queryset.filter() + if '=' in search_value: + search_param = search_value.split("=") + title = search_param[0].lower().strip() + content = search_param[1].lower().strip() + if 'name' in title: + qs = self.queryset.filter(name__icontains=content) + elif 'page_title' in title: + qs = self.queryset.filter(page_title__icontains=content) + elif 'http_url' in title: + qs = self.queryset.filter(http_url__icontains=content) + elif 'content_type' in title: + qs = self.queryset.filter(content_type__icontains=content) + elif 'cname' in title: + qs = self.queryset.filter(cname__icontains=content) + elif 'webserver' in title: + qs = self.queryset.filter(webserver__icontains=content) + elif 'ip_addresses' in title: + qs = self.queryset.filter( + ip_addresses__address__icontains=content) + elif 'is_important' in title: + if 'true' in content.lower(): + qs = self.queryset.filter(is_important=True) + else: + qs = self.queryset.filter(is_important=False) + elif 'port' in title: + qs = ( + self.queryset + .filter(ip_addresses__ports__number__icontains=content) + | + self.queryset + .filter(ip_addresses__ports__service_name__icontains=content) + | + self.queryset + .filter(ip_addresses__ports__description__icontains=content) + ) + elif 'technology' in title: + qs = ( + self.queryset + .filter(technologies__name__icontains=content) + ) + elif 'http_status' in title: + try: + int_http_status = int(content) + qs = self.queryset.filter(http_status=int_http_status) + except Exception as e: + print(e) + elif 'content_length' in title: + try: + int_http_status = int(content) + qs = self.queryset.filter(content_length=int_http_status) + except Exception as e: + print(e) + + elif '>' in search_value: + search_param = search_value.split(">") + title = search_param[0].lower().strip() + content = search_param[1].lower().strip() + if 'http_status' in title: + try: + int_val = int(content) + qs = self.queryset.filter(http_status__gt=int_val) + except Exception as e: + print(e) + elif 'content_length' in title: + try: + int_val = int(content) + qs = self.queryset.filter(content_length__gt=int_val) + except Exception as e: + print(e) + + elif '<' in search_value: + search_param = search_value.split("<") + title = search_param[0].lower().strip() + content = search_param[1].lower().strip() + if 'http_status' in title: + try: + int_val = int(content) + qs = self.queryset.filter(http_status__lt=int_val) + except Exception as e: + print(e) + elif 'content_length' in title: + try: + int_val = int(content) + qs = self.queryset.filter(content_length__lt=int_val) + except Exception as e: + print(e) + + elif '!' in search_value: + search_param = search_value.split("!") + title = search_param[0].lower().strip() + content = search_param[1].lower().strip() + if 'name' in title: + qs = self.queryset.exclude(name__icontains=content) + elif 'page_title' in title: + qs = self.queryset.exclude(page_title__icontains=content) + elif 'http_url' in title: + qs = self.queryset.exclude(http_url__icontains=content) + elif 'content_type' in title: + qs = ( + self.queryset + .exclude(content_type__icontains=content) + ) + elif 'cname' in title: + qs = self.queryset.exclude(cname__icontains=content) + elif 'webserver' in title: + qs = self.queryset.exclude(webserver__icontains=content) + elif 'ip_addresses' in title: + qs = self.queryset.exclude( + ip_addresses__address__icontains=content) + elif 'port' in title: + qs = ( + self.queryset + .exclude(ip_addresses__ports__number__icontains=content) + | + self.queryset + .exclude(ip_addresses__ports__service_name__icontains=content) + | + self.queryset + .exclude(ip_addresses__ports__description__icontains=content) + ) + elif 'technology' in title: + qs = ( + self.queryset + .exclude(technologies__name__icontains=content) + ) + elif 'http_status' in title: + try: + int_http_status = int(content) + qs = self.queryset.exclude(http_status=int_http_status) + except Exception as e: + print(e) + elif 'content_length' in title: + try: + int_http_status = int(content) + qs = self.queryset.exclude(content_length=int_http_status) + except Exception as e: + print(e) + + return qs class ListActivityLogsViewSet(viewsets.ModelViewSet): - serializer_class = CommandSerializer - queryset = Command.objects.none() - def get_queryset(self): - req = self.request - activity_id = safe_int_cast(req.query_params.get('activity_id')) - self.queryset = Command.objects.filter(activity__id=activity_id).order_by('id') - return self.queryset + serializer_class = CommandSerializer + queryset = Command.objects.none() + def get_queryset(self): + req = self.request + activity_id = safe_int_cast(req.query_params.get('activity_id')) + self.queryset = Command.objects.filter(activity__id=activity_id).order_by('id') + return self.queryset class ListScanLogsViewSet(viewsets.ModelViewSet): - serializer_class = CommandSerializer - queryset = Command.objects.none() - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - self.queryset = Command.objects.filter(scan_history__id=scan_id).order_by('id') - return self.queryset + serializer_class = CommandSerializer + queryset = Command.objects.none() + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_id')) + self.queryset = Command.objects.filter(scan_history__id=scan_id).order_by('id') + return self.queryset class ListEndpoints(APIView): - def get(self, request, format=None): - req = self.request + def get(self, request, format=None): + req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_id')) - target_id = safe_int_cast(req.query_params.get('target_id')) - subdomain_name = req.query_params.get('subdomain_name') - pattern = req.query_params.get('pattern') + scan_id = safe_int_cast(req.query_params.get('scan_id')) + target_id = safe_int_cast(req.query_params.get('target_id')) + subdomain_name = req.query_params.get('subdomain_name') + pattern = req.query_params.get('pattern') - if scan_id: - endpoints = ( - EndPoint.objects - .filter(scan_history__id=scan_id) - ) - elif target_id: - endpoints = ( - EndPoint.objects - .filter(target_domain__id=target_id) - .distinct() - ) - else: - endpoints = EndPoint.objects.all() + if scan_id: + endpoints = ( + EndPoint.objects + .filter(scan_history__id=scan_id) + ) + elif target_id: + endpoints = ( + EndPoint.objects + .filter(target_domain__id=target_id) + .distinct() + ) + else: + endpoints = EndPoint.objects.all() - if subdomain_name: - endpoints = endpoints.filter(subdomain__name=subdomain_name) + if subdomain_name: + endpoints = endpoints.filter(subdomain__name=subdomain_name) - if pattern: - endpoints = endpoints.filter(matched_gf_patterns__icontains=pattern) + if pattern: + endpoints = endpoints.filter(matched_gf_patterns__icontains=pattern) - if 'only_urls' in req.query_params: - endpoints_serializer = EndpointOnlyURLsSerializer(endpoints, many=True) + if 'only_urls' in req.query_params: + endpoints_serializer = EndpointOnlyURLsSerializer(endpoints, many=True) - else: - endpoints_serializer = EndpointSerializer(endpoints, many=True) + else: + endpoints_serializer = EndpointSerializer(endpoints, many=True) - return Response({'endpoints': endpoints_serializer.data}) + return Response({'endpoints': endpoints_serializer.data}) class EndPointViewSet(viewsets.ModelViewSet): - queryset = EndPoint.objects.none() - serializer_class = EndpointSerializer - - def get_queryset(self): - req = self.request - - scan_id = safe_int_cast(req.query_params.get('scan_history')) - target_id = safe_int_cast(req.query_params.get('target_id')) - url_query = req.query_params.get('query_param') - subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) - project = req.query_params.get('project') - - endpoints_obj = EndPoint.objects.filter(scan_history__domain__project__slug=project) - - gf_tag = req.query_params.get( - 'gf_tag') if 'gf_tag' in req.query_params else None - - if scan_id: - endpoints = ( - endpoints_obj - .filter(scan_history__id=scan_id) - .distinct() - .order_by('id') - ) - else: - endpoints = endpoints_obj.distinct().order_by('id') - - if url_query: - endpoints = ( - endpoints - .filter(Q(target_domain__name=url_query)) - .distinct() - .order_by('id') - ) - - if gf_tag: - endpoints = endpoints.filter(matched_gf_patterns__icontains=gf_tag) - - if target_id: - endpoints = endpoints.filter(target_domain__id=target_id) - - if subdomain_id: - endpoints = endpoints.filter(subdomain__id=subdomain_id) - - if 'only_urls' in req.query_params: - self.serializer_class = EndpointOnlyURLsSerializer - - # Filter status code 404 and 0 - # endpoints = ( - # endpoints - # .exclude(http_status=0) - # .exclude(http_status=None) - # .exclude(http_status=404) - # ) - - self.queryset = endpoints - - return self.queryset - - def filter_queryset(self, qs): - qs = self.queryset.filter() - search_value = self.request.GET.get(u'search[value]', None) - _order_col = self.request.GET.get(u'order[0][column]', None) - _order_direction = self.request.GET.get(u'order[0][dir]', None) - if search_value or _order_col or _order_direction: - order_col = 'content_length' - if _order_col == '1': - order_col = 'http_url' - elif _order_col == '2': - order_col = 'http_status' - elif _order_col == '3': - order_col = 'page_title' - elif _order_col == '4': - order_col = 'matched_gf_patterns' - elif _order_col == '5': - order_col = 'content_type' - elif _order_col == '6': - order_col = 'content_length' - elif _order_col == '7': - order_col = 'techs' - elif _order_col == '8': - order_col = 'webserver' - elif _order_col == '9': - order_col = 'response_time' - if _order_direction == 'desc': - order_col = f'-{order_col}' - # if the search query is separated by = means, it is a specific lookup - # divide the search query into two half and lookup - if '=' in search_value or '&' in search_value or '|' in search_value or '>' in search_value or '<' in search_value or '!' in search_value: - if '&' in search_value: - complex_query = search_value.split('&') - for query in complex_query: - if query.strip(): - qs = qs & self.special_lookup(query.strip()) - elif '|' in search_value: - qs = Subdomain.objects.none() - complex_query = search_value.split('|') - for query in complex_query: - if query.strip(): - qs = self.special_lookup(query.strip()) | qs - else: - qs = self.special_lookup(search_value) - else: - qs = self.general_lookup(search_value) - return qs.order_by(order_col) - return qs - - def general_lookup(self, search_value): - return \ - self.queryset.filter(Q(http_url__icontains=search_value) | - Q(page_title__icontains=search_value) | - Q(http_status__icontains=search_value) | - Q(content_type__icontains=search_value) | - Q(webserver__icontains=search_value) | - Q(techs__name__icontains=search_value) | - Q(content_type__icontains=search_value) | - Q(matched_gf_patterns__icontains=search_value)) - - def special_lookup(self, search_value): - qs = self.queryset.filter() - if '=' in search_value: - search_param = search_value.split("=") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'http_url' in lookup_title: - qs = self.queryset.filter(http_url__icontains=lookup_content) - elif 'page_title' in lookup_title: - qs = ( - self.queryset - .filter(page_title__icontains=lookup_content) - ) - elif 'content_type' in lookup_title: - qs = ( - self.queryset - .filter(content_type__icontains=lookup_content) - ) - elif 'webserver' in lookup_title: - qs = self.queryset.filter(webserver__icontains=lookup_content) - elif 'technology' in lookup_title: - qs = ( - self.queryset - .filter(techs__name__icontains=lookup_content) - ) - elif 'gf_pattern' in lookup_title: - qs = ( - self.queryset - .filter(matched_gf_patterns__icontains=lookup_content) - ) - elif 'http_status' in lookup_title: - try: - int_http_status = int(lookup_content) - qs = self.queryset.filter(http_status=int_http_status) - except Exception as e: - print(e) - elif 'content_length' in lookup_title: - try: - int_http_status = int(lookup_content) - qs = self.queryset.filter(content_length=int_http_status) - except Exception as e: - print(e) - elif '>' in search_value: - search_param = search_value.split(">") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'http_status' in lookup_title: - try: - int_val = int(lookup_content) - qs = ( - self.queryset - .filter(http_status__gt=int_val) - ) - except Exception as e: - print(e) - elif 'content_length' in lookup_title: - try: - int_val = int(lookup_content) - qs = self.queryset.filter(content_length__gt=int_val) - except Exception as e: - print(e) - elif '<' in search_value: - search_param = search_value.split("<") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'http_status' in lookup_title: - try: - int_val = int(lookup_content) - qs = self.queryset.filter(http_status__lt=int_val) - except Exception as e: - print(e) - elif 'content_length' in lookup_title: - try: - int_val = int(lookup_content) - qs = self.queryset.filter(content_length__lt=int_val) - except Exception as e: - print(e) - elif '!' in search_value: - search_param = search_value.split("!") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'http_url' in lookup_title: - qs = ( - self.queryset - .exclude(http_url__icontains=lookup_content) - ) - elif 'page_title' in lookup_title: - qs = ( - self.queryset - .exclude(page_title__icontains=lookup_content) - ) - elif 'content_type' in lookup_title: - qs = ( - self.queryset - .exclude(content_type__icontains=lookup_content) - ) - elif 'webserver' in lookup_title: - qs = ( - self.queryset - .exclude(webserver__icontains=lookup_content) - ) - elif 'technology' in lookup_title: - qs = ( - self.queryset - .exclude(techs__name__icontains=lookup_content) - ) - elif 'gf_pattern' in lookup_title: - qs = ( - self.queryset - .exclude(matched_gf_patterns__icontains=lookup_content) - ) - elif 'http_status' in lookup_title: - try: - int_http_status = int(lookup_content) - qs = self.queryset.exclude(http_status=int_http_status) - except Exception as e: - print(e) - elif 'content_length' in lookup_title: - try: - int_http_status = int(lookup_content) - qs = self.queryset.exclude(content_length=int_http_status) - except Exception as e: - print(e) - return qs + queryset = EndPoint.objects.none() + serializer_class = EndpointSerializer + + def get_queryset(self): + req = self.request + + scan_id = safe_int_cast(req.query_params.get('scan_history')) + target_id = safe_int_cast(req.query_params.get('target_id')) + url_query = req.query_params.get('query_param') + subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) + project = req.query_params.get('project') + + endpoints_obj = EndPoint.objects.filter(scan_history__domain__project__slug=project) + + gf_tag = req.query_params.get( + 'gf_tag') if 'gf_tag' in req.query_params else None + + if scan_id: + endpoints = ( + endpoints_obj + .filter(scan_history__id=scan_id) + .distinct() + .order_by('id') + ) + else: + endpoints = endpoints_obj.distinct().order_by('id') + + if url_query: + endpoints = ( + endpoints + .filter(Q(target_domain__name=url_query)) + .distinct() + .order_by('id') + ) + + if gf_tag: + endpoints = endpoints.filter(matched_gf_patterns__icontains=gf_tag) + + if target_id: + endpoints = endpoints.filter(target_domain__id=target_id) + + if subdomain_id: + endpoints = endpoints.filter(subdomain__id=subdomain_id) + + if 'only_urls' in req.query_params: + self.serializer_class = EndpointOnlyURLsSerializer + + # Filter status code 404 and 0 + # endpoints = ( + # endpoints + # .exclude(http_status=0) + # .exclude(http_status=None) + # .exclude(http_status=404) + # ) + + self.queryset = endpoints + + return self.queryset + + def filter_queryset(self, qs): + qs = self.queryset.filter() + search_value = self.request.GET.get(u'search[value]', None) + _order_col = self.request.GET.get(u'order[0][column]', None) + _order_direction = self.request.GET.get(u'order[0][dir]', None) + if search_value or _order_col or _order_direction: + order_col = 'content_length' + if _order_col == '1': + order_col = 'http_url' + elif _order_col == '2': + order_col = 'http_status' + elif _order_col == '3': + order_col = 'page_title' + elif _order_col == '4': + order_col = 'matched_gf_patterns' + elif _order_col == '5': + order_col = 'content_type' + elif _order_col == '6': + order_col = 'content_length' + elif _order_col == '7': + order_col = 'techs' + elif _order_col == '8': + order_col = 'webserver' + elif _order_col == '9': + order_col = 'response_time' + if _order_direction == 'desc': + order_col = f'-{order_col}' + # if the search query is separated by = means, it is a specific lookup + # divide the search query into two half and lookup + if '=' in search_value or '&' in search_value or '|' in search_value or '>' in search_value or '<' in search_value or '!' in search_value: + if '&' in search_value: + complex_query = search_value.split('&') + for query in complex_query: + if query.strip(): + qs = qs & self.special_lookup(query.strip()) + elif '|' in search_value: + qs = Subdomain.objects.none() + complex_query = search_value.split('|') + for query in complex_query: + if query.strip(): + qs = self.special_lookup(query.strip()) | qs + else: + qs = self.special_lookup(search_value) + else: + qs = self.general_lookup(search_value) + return qs.order_by(order_col) + return qs + + def general_lookup(self, search_value): + return \ + self.queryset.filter(Q(http_url__icontains=search_value) | + Q(page_title__icontains=search_value) | + Q(http_status__icontains=search_value) | + Q(content_type__icontains=search_value) | + Q(webserver__icontains=search_value) | + Q(techs__name__icontains=search_value) | + Q(content_type__icontains=search_value) | + Q(matched_gf_patterns__icontains=search_value)) + + def special_lookup(self, search_value): + qs = self.queryset.filter() + if '=' in search_value: + search_param = search_value.split("=") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'http_url' in lookup_title: + qs = self.queryset.filter(http_url__icontains=lookup_content) + elif 'page_title' in lookup_title: + qs = ( + self.queryset + .filter(page_title__icontains=lookup_content) + ) + elif 'content_type' in lookup_title: + qs = ( + self.queryset + .filter(content_type__icontains=lookup_content) + ) + elif 'webserver' in lookup_title: + qs = self.queryset.filter(webserver__icontains=lookup_content) + elif 'technology' in lookup_title: + qs = ( + self.queryset + .filter(techs__name__icontains=lookup_content) + ) + elif 'gf_pattern' in lookup_title: + qs = ( + self.queryset + .filter(matched_gf_patterns__icontains=lookup_content) + ) + elif 'http_status' in lookup_title: + try: + int_http_status = int(lookup_content) + qs = self.queryset.filter(http_status=int_http_status) + except Exception as e: + print(e) + elif 'content_length' in lookup_title: + try: + int_http_status = int(lookup_content) + qs = self.queryset.filter(content_length=int_http_status) + except Exception as e: + print(e) + elif '>' in search_value: + search_param = search_value.split(">") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'http_status' in lookup_title: + try: + int_val = int(lookup_content) + qs = ( + self.queryset + .filter(http_status__gt=int_val) + ) + except Exception as e: + print(e) + elif 'content_length' in lookup_title: + try: + int_val = int(lookup_content) + qs = self.queryset.filter(content_length__gt=int_val) + except Exception as e: + print(e) + elif '<' in search_value: + search_param = search_value.split("<") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'http_status' in lookup_title: + try: + int_val = int(lookup_content) + qs = self.queryset.filter(http_status__lt=int_val) + except Exception as e: + print(e) + elif 'content_length' in lookup_title: + try: + int_val = int(lookup_content) + qs = self.queryset.filter(content_length__lt=int_val) + except Exception as e: + print(e) + elif '!' in search_value: + search_param = search_value.split("!") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'http_url' in lookup_title: + qs = ( + self.queryset + .exclude(http_url__icontains=lookup_content) + ) + elif 'page_title' in lookup_title: + qs = ( + self.queryset + .exclude(page_title__icontains=lookup_content) + ) + elif 'content_type' in lookup_title: + qs = ( + self.queryset + .exclude(content_type__icontains=lookup_content) + ) + elif 'webserver' in lookup_title: + qs = ( + self.queryset + .exclude(webserver__icontains=lookup_content) + ) + elif 'technology' in lookup_title: + qs = ( + self.queryset + .exclude(techs__name__icontains=lookup_content) + ) + elif 'gf_pattern' in lookup_title: + qs = ( + self.queryset + .exclude(matched_gf_patterns__icontains=lookup_content) + ) + elif 'http_status' in lookup_title: + try: + int_http_status = int(lookup_content) + qs = self.queryset.exclude(http_status=int_http_status) + except Exception as e: + print(e) + elif 'content_length' in lookup_title: + try: + int_http_status = int(lookup_content) + qs = self.queryset.exclude(content_length=int_http_status) + except Exception as e: + print(e) + return qs class DirectoryViewSet(viewsets.ModelViewSet): queryset = DirectoryFile.objects.none() @@ -2588,307 +2854,377 @@ def get_queryset(self): .order_by('id') class ProjectViewSet(viewsets.ModelViewSet): - serializer_class = ProjectSerializer - permission_classes = [IsAuthenticated] + serializer_class = ProjectSerializer + permission_classes = [IsAuthenticated] - def get_queryset(self): - return Project.objects.filter(user=self.request.user) + def get_queryset(self): + return Project.objects.filter(user=self.request.user) - def perform_create(self, serializer): - serializer.save(user=self.request.user) + def perform_create(self, serializer): + serializer.save(user=self.request.user) - def perform_update(self, serializer): - if serializer.instance.user != self.request.user: - raise PermissionDenied("You don't have permission to modify this project.") - serializer.save() + def perform_update(self, serializer): + if serializer.instance.user != self.request.user: + raise PermissionDenied("You don't have permission to modify this project.") + serializer.save() class VulnerabilityViewSet(viewsets.ModelViewSet): - queryset = Vulnerability.objects.none() - serializer_class = VulnerabilitySerializer - - def get_queryset(self): - req = self.request - scan_id = safe_int_cast(req.query_params.get('scan_history')) - target_id = safe_int_cast(req.query_params.get('target_id')) - domain = req.query_params.get('domain') - severity = req.query_params.get('severity') - subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) - subdomain_name = req.query_params.get('subdomain') - vulnerability_name = req.query_params.get('vulnerability_name') - slug = self.request.GET.get('project', None) - - if slug: - vulnerabilities = Vulnerability.objects.filter(scan_history__domain__project__slug=slug) - else: - vulnerabilities = Vulnerability.objects.all() - - if scan_id: - qs = ( - vulnerabilities - .filter(scan_history__id=scan_id) - .distinct() - ) - elif target_id: - qs = ( - vulnerabilities - .filter(target_domain__id=target_id) - .distinct() - ) - elif subdomain_name: - subdomains = Subdomain.objects.filter(name=subdomain_name) - qs = ( - vulnerabilities - .filter(subdomain__in=subdomains) - .distinct() - ) - else: - qs = vulnerabilities.distinct() - - if domain: - qs = qs.filter(Q(target_domain__name=domain)).distinct() - if vulnerability_name: - qs = qs.filter(Q(name=vulnerability_name)).distinct() - if severity: - qs = qs.filter(severity=severity) - if subdomain_id: - qs = qs.filter(subdomain__id=subdomain_id) - self.queryset = qs - return self.queryset - - def filter_queryset(self, qs): - qs = self.queryset.filter() - search_value = self.request.GET.get(u'search[value]', None) - _order_col = self.request.GET.get(u'order[0][column]', None) - _order_direction = self.request.GET.get(u'order[0][dir]', None) - if search_value or _order_col or _order_direction: - order_col = 'severity' - if _order_col == '1': - order_col = 'source' - elif _order_col == '3': - order_col = 'name' - elif _order_col == '7': - order_col = 'severity' - elif _order_col == '11': - order_col = 'http_url' - elif _order_col == '15': - order_col = 'open_status' - - if _order_direction == 'desc': - order_col = f'-{order_col}' - # if the search query is separated by = means, it is a specific lookup - # divide the search query into two half and lookup - operators = ['=', '&', '|', '>', '<', '!'] - if any(x in search_value for x in operators): - if '&' in search_value: - complex_query = search_value.split('&') - for query in complex_query: - if query.strip(): - qs = qs & self.special_lookup(query.strip()) - elif '|' in search_value: - qs = Subdomain.objects.none() - complex_query = search_value.split('|') - for query in complex_query: - if query.strip(): - qs = self.special_lookup(query.strip()) | qs - else: - qs = self.special_lookup(search_value) - else: - qs = self.general_lookup(search_value) - return qs.order_by(order_col) - return qs.order_by('-severity') - - def general_lookup(self, search_value): - qs = ( - self.queryset - .filter(Q(http_url__icontains=search_value) | - Q(target_domain__name__icontains=search_value) | - Q(template__icontains=search_value) | - Q(template_id__icontains=search_value) | - Q(name__icontains=search_value) | - Q(severity__icontains=search_value) | - Q(description__icontains=search_value) | - Q(extracted_results__icontains=search_value) | - Q(references__url__icontains=search_value) | - Q(cve_ids__name__icontains=search_value) | - Q(cwe_ids__name__icontains=search_value) | - Q(cvss_metrics__icontains=search_value) | - Q(cvss_score__icontains=search_value) | - Q(type__icontains=search_value) | - Q(open_status__icontains=search_value) | - Q(hackerone_report_id__icontains=search_value) | - Q(tags__name__icontains=search_value)) - ) - return qs - - def special_lookup(self, search_value): - qs = self.queryset.filter() - if '=' in search_value: - search_param = search_value.split("=") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'severity' in lookup_title: - severity_value = NUCLEI_SEVERITY_MAP.get(lookup_content, -1) - qs = ( - self.queryset - .filter(severity=severity_value) - ) - elif 'name' in lookup_title: - qs = ( - self.queryset - .filter(name__icontains=lookup_content) - ) - elif 'http_url' in lookup_title: - qs = ( - self.queryset - .filter(http_url__icontains=lookup_content) - ) - elif 'template' in lookup_title: - qs = ( - self.queryset - .filter(template__icontains=lookup_content) - ) - elif 'template_id' in lookup_title: - qs = ( - self.queryset - .filter(template_id__icontains=lookup_content) - ) - elif 'cve_id' in lookup_title or 'cve' in lookup_title: - qs = ( - self.queryset - .filter(cve_ids__name__icontains=lookup_content) - ) - elif 'cwe_id' in lookup_title or 'cwe' in lookup_title: - qs = ( - self.queryset - .filter(cwe_ids__name__icontains=lookup_content) - ) - elif 'cvss_metrics' in lookup_title: - qs = ( - self.queryset - .filter(cvss_metrics__icontains=lookup_content) - ) - elif 'cvss_score' in lookup_title: - qs = ( - self.queryset - .filter(cvss_score__exact=lookup_content) - ) - elif 'type' in lookup_title: - qs = ( - self.queryset - .filter(type__icontains=lookup_content) - ) - elif 'tag' in lookup_title: - qs = ( - self.queryset - .filter(tags__name__icontains=lookup_content) - ) - elif 'status' in lookup_title: - open_status = lookup_content == 'open' - qs = ( - self.queryset - .filter(open_status=open_status) - ) - elif 'description' in lookup_title: - qs = ( - self.queryset - .filter(Q(description__icontains=lookup_content) | - Q(template__icontains=lookup_content) | - Q(extracted_results__icontains=lookup_content)) - ) - elif '!' in search_value: - search_param = search_value.split("!") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'severity' in lookup_title: - severity_value = NUCLEI_SEVERITY_MAP.get(lookup_title, -1) - qs = ( - self.queryset - .exclude(severity=severity_value) - ) - elif 'name' in lookup_title: - qs = ( - self.queryset - .exclude(name__icontains=lookup_content) - ) - elif 'http_url' in lookup_title: - qs = ( - self.queryset - .exclude(http_url__icontains=lookup_content) - ) - elif 'template' in lookup_title: - qs = ( - self.queryset - .exclude(template__icontains=lookup_content) - ) - elif 'template_id' in lookup_title: - qs = ( - self.queryset - .exclude(template_id__icontains=lookup_content) - ) - elif 'cve_id' in lookup_title or 'cve' in lookup_title: - qs = ( - self.queryset - .exclude(cve_ids__icontains=lookup_content) - ) - elif 'cwe_id' in lookup_title or 'cwe' in lookup_title: - qs = ( - self.queryset - .exclude(cwe_ids__icontains=lookup_content) - ) - elif 'cvss_metrics' in lookup_title: - qs = ( - self.queryset - .exclude(cvss_metrics__icontains=lookup_content) - ) - elif 'cvss_score' in lookup_title: - qs = ( - self.queryset - .exclude(cvss_score__exact=lookup_content) - ) - elif 'type' in lookup_title: - qs = ( - self.queryset - .exclude(type__icontains=lookup_content) - ) - elif 'tag' in lookup_title: - qs = ( - self.queryset - .exclude(tags__icontains=lookup_content) - ) - elif 'status' in lookup_title: - open_status = lookup_content == 'open' - qs = ( - self.queryset - .exclude(open_status=open_status) - ) - elif 'description' in lookup_title: - qs = ( - self.queryset - .exclude(Q(description__icontains=lookup_content) | - Q(template__icontains=lookup_content) | - Q(extracted_results__icontains=lookup_content)) - ) - - elif '>' in search_value: - search_param = search_value.split(">") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'cvss_score' in lookup_title: - try: - val = float(lookup_content) - qs = self.queryset.filter(cvss_score__gt=val) - except Exception as e: - print(e) - - elif '<' in search_value: - search_param = search_value.split("<") - lookup_title = search_param[0].lower().strip() - lookup_content = search_param[1].lower().strip() - if 'cvss_score' in lookup_title: - try: - val = int(lookup_content) - qs = self.queryset.filter(cvss_score__lt=val) - except Exception as e: - print(e) - - return qs + queryset = Vulnerability.objects.none() + serializer_class = VulnerabilitySerializer + + def get_queryset(self): + req = self.request + scan_id = safe_int_cast(req.query_params.get('scan_history')) + target_id = safe_int_cast(req.query_params.get('target_id')) + domain = req.query_params.get('domain') + severity = req.query_params.get('severity') + subdomain_id = safe_int_cast(req.query_params.get('subdomain_id')) + subdomain_name = req.query_params.get('subdomain') + vulnerability_name = req.query_params.get('vulnerability_name') + slug = self.request.GET.get('project', None) + + if slug: + vulnerabilities = Vulnerability.objects.filter(scan_history__domain__project__slug=slug) + else: + vulnerabilities = Vulnerability.objects.all() + + if scan_id: + qs = ( + vulnerabilities + .filter(scan_history__id=scan_id) + .distinct() + ) + elif target_id: + qs = ( + vulnerabilities + .filter(target_domain__id=target_id) + .distinct() + ) + elif subdomain_name: + subdomains = Subdomain.objects.filter(name=subdomain_name) + qs = ( + vulnerabilities + .filter(subdomain__in=subdomains) + .distinct() + ) + else: + qs = vulnerabilities.distinct() + + if domain: + qs = qs.filter(Q(target_domain__name=domain)).distinct() + if vulnerability_name: + qs = qs.filter(Q(name=vulnerability_name)).distinct() + if severity: + qs = qs.filter(severity=severity) + if subdomain_id: + qs = qs.filter(subdomain__id=subdomain_id) + self.queryset = qs + return self.queryset + + def filter_queryset(self, qs): + qs = self.queryset.filter() + search_value = self.request.GET.get(u'search[value]', None) + _order_col = self.request.GET.get(u'order[0][column]', None) + _order_direction = self.request.GET.get(u'order[0][dir]', None) + if search_value or _order_col or _order_direction: + order_col = 'severity' + if _order_col == '1': + order_col = 'source' + elif _order_col == '3': + order_col = 'name' + elif _order_col == '7': + order_col = 'severity' + elif _order_col == '11': + order_col = 'http_url' + elif _order_col == '15': + order_col = 'open_status' + + if _order_direction == 'desc': + order_col = f'-{order_col}' + # if the search query is separated by = means, it is a specific lookup + # divide the search query into two half and lookup + operators = ['=', '&', '|', '>', '<', '!'] + if any(x in search_value for x in operators): + if '&' in search_value: + complex_query = search_value.split('&') + for query in complex_query: + if query.strip(): + qs = qs & self.special_lookup(query.strip()) + elif '|' in search_value: + qs = Subdomain.objects.none() + complex_query = search_value.split('|') + for query in complex_query: + if query.strip(): + qs = self.special_lookup(query.strip()) | qs + else: + qs = self.special_lookup(search_value) + else: + qs = self.general_lookup(search_value) + return qs.order_by(order_col) + return qs.order_by('-severity') + + def general_lookup(self, search_value): + qs = ( + self.queryset + .filter(Q(http_url__icontains=search_value) | + Q(target_domain__name__icontains=search_value) | + Q(template__icontains=search_value) | + Q(template_id__icontains=search_value) | + Q(name__icontains=search_value) | + Q(severity__icontains=search_value) | + Q(description__icontains=search_value) | + Q(extracted_results__icontains=search_value) | + Q(references__icontains=search_value) | + Q(cve_ids__name__icontains=search_value) | + Q(cwe_ids__name__icontains=search_value) | + Q(cvss_metrics__icontains=search_value) | + Q(cvss_score__icontains=search_value) | + Q(type__icontains=search_value) | + Q(open_status__icontains=search_value) | + Q(hackerone_report_id__icontains=search_value) | + Q(tags__name__icontains=search_value)) + ) + return qs + + def special_lookup(self, search_value): + qs = self.queryset.filter() + if '=' in search_value: + search_param = search_value.split("=") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'severity' in lookup_title: + severity_value = NUCLEI_SEVERITY_MAP.get(lookup_content, -1) + qs = ( + self.queryset + .filter(severity=severity_value) + ) + elif 'name' in lookup_title: + qs = ( + self.queryset + .filter(name__icontains=lookup_content) + ) + elif 'http_url' in lookup_title: + qs = ( + self.queryset + .filter(http_url__icontains=lookup_content) + ) + elif 'template' in lookup_title: + qs = ( + self.queryset + .filter(template__icontains=lookup_content) + ) + elif 'template_id' in lookup_title: + qs = ( + self.queryset + .filter(template_id__icontains=lookup_content) + ) + elif 'cve_id' in lookup_title or 'cve' in lookup_title: + qs = ( + self.queryset + .filter(cve_ids__name__icontains=lookup_content) + ) + elif 'cwe_id' in lookup_title or 'cwe' in lookup_title: + qs = ( + self.queryset + .filter(cwe_ids__name__icontains=lookup_content) + ) + elif 'cvss_metrics' in lookup_title: + qs = ( + self.queryset + .filter(cvss_metrics__icontains=lookup_content) + ) + elif 'cvss_score' in lookup_title: + qs = ( + self.queryset + .filter(cvss_score__exact=lookup_content) + ) + elif 'type' in lookup_title: + qs = ( + self.queryset + .filter(type__icontains=lookup_content) + ) + elif 'tag' in lookup_title: + qs = ( + self.queryset + .filter(tags__name__icontains=lookup_content) + ) + elif 'status' in lookup_title: + open_status = lookup_content == 'open' + qs = ( + self.queryset + .filter(open_status=open_status) + ) + elif 'description' in lookup_title: + qs = ( + self.queryset + .filter(Q(description__icontains=lookup_content) | + Q(template__icontains=lookup_content) | + Q(extracted_results__icontains=lookup_content)) + ) + elif '!' in search_value: + search_param = search_value.split("!") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'severity' in lookup_title: + severity_value = NUCLEI_SEVERITY_MAP.get(lookup_title, -1) + qs = ( + self.queryset + .exclude(severity=severity_value) + ) + elif 'name' in lookup_title: + qs = ( + self.queryset + .exclude(name__icontains=lookup_content) + ) + elif 'http_url' in lookup_title: + qs = ( + self.queryset + .exclude(http_url__icontains=lookup_content) + ) + elif 'template' in lookup_title: + qs = ( + self.queryset + .exclude(template__icontains=lookup_content) + ) + elif 'template_id' in lookup_title: + qs = ( + self.queryset + .exclude(template_id__icontains=lookup_content) + ) + elif 'cve_id' in lookup_title or 'cve' in lookup_title: + qs = ( + self.queryset + .exclude(cve_ids__icontains=lookup_content) + ) + elif 'cwe_id' in lookup_title or 'cwe' in lookup_title: + qs = ( + self.queryset + .exclude(cwe_ids__icontains=lookup_content) + ) + elif 'cvss_metrics' in lookup_title: + qs = ( + self.queryset + .exclude(cvss_metrics__icontains=lookup_content) + ) + elif 'cvss_score' in lookup_title: + qs = ( + self.queryset + .exclude(cvss_score__exact=lookup_content) + ) + elif 'type' in lookup_title: + qs = ( + self.queryset + .exclude(type__icontains=lookup_content) + ) + elif 'tag' in lookup_title: + qs = ( + self.queryset + .exclude(tags__icontains=lookup_content) + ) + elif 'status' in lookup_title: + open_status = lookup_content == 'open' + qs = ( + self.queryset + .exclude(open_status=open_status) + ) + elif 'description' in lookup_title: + qs = ( + self.queryset + .exclude(Q(description__icontains=lookup_content) | + Q(template__icontains=lookup_content) | + Q(extracted_results__icontains=lookup_content)) + ) + + elif '>' in search_value: + search_param = search_value.split(">") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'cvss_score' in lookup_title: + try: + val = float(lookup_content) + qs = self.queryset.filter(cvss_score__gt=val) + except Exception as e: + print(e) + + elif '<' in search_value: + search_param = search_value.split("<") + lookup_title = search_param[0].lower().strip() + lookup_content = search_param[1].lower().strip() + if 'cvss_score' in lookup_title: + try: + val = int(lookup_content) + qs = self.queryset.filter(cvss_score__lt=val) + except Exception as e: + print(e) + + return qs + +class LLMModelsManager(APIView): + def get(self, request): + """Get all available LLM models (GPT and Ollama) and currently selected model""" + try: + # Get default GPT models + all_models = DEFAULT_GPT_MODELS.copy() + + # Get Ollama models + try: + response = requests.get(f'{OLLAMA_INSTANCE}/api/tags') + if response.status_code == 200: + ollama_models = response.json().get('models', []) + date_format = "%Y-%m-%dT%H:%M:%S" + all_models.extend([{ + **model, + 'modified_at': datetime.strptime(model['modified_at'].split('.')[0], date_format), + 'is_local': True, + } for model in ollama_models]) + except Exception as e: + logger.error(f"Error fetching Ollama models: {str(e)}") + + # Get currently selected model + selected_model = OllamaSettings.objects.first() + selected_model_name = selected_model.selected_model if selected_model else 'gpt-3.5-turbo' + + # Mark selected model + for model in all_models: + if model['name'] == selected_model_name: + model['selected'] = True + + # Add model capabilities + for model in all_models: + # Strip tags from model name (e.g., "llama2:latest" -> "llama2") + base_model_name = model['name'].split(':')[0] + if base_model_name in MODEL_REQUIREMENTS: + model['capabilities'] = MODEL_REQUIREMENTS[base_model_name] + + return Response({ + 'status': True, + 'models': all_models, + 'selected_model': selected_model_name, + 'openai_key_error': not get_open_ai_key() and 'gpt' in selected_model_name + }) + + except Exception as e: + logger.error(f"Error in LLMModelsManager GET: {str(e)}") + return Response({ + 'status': False, + 'error': 'Failed to fetch LLM models', + 'message': str(e) + }, status=500) + +@api_view(['GET']) +def websocket_status(request): + """Check if WebSocket server is available""" + try: + channel_layer = get_channel_layer() + return Response({ + 'status': True, + 'websocket_enabled': bool(channel_layer), + 'websocket_endpoints': { + 'ollama_download': '/ws/ollama/download/{model_name}/', + } + }) + except Exception as e: + return Response({ + 'status': False, + 'error': str(e) + }, status=500) diff --git a/web/api/ws_urls.py b/web/api/ws_urls.py new file mode 100644 index 000000000..cf101a5d2 --- /dev/null +++ b/web/api/ws_urls.py @@ -0,0 +1,11 @@ +from django.urls import path +from . import consumers, views + +websocket_urlpatterns = [ + path('ollama/download//', consumers.OllamaDownloadConsumer.as_asgi()), +] + +# Normal HTTP URLs for WebSocket discovery +urlpatterns = [ + path('status/', views.websocket_status, name='websocket_status'), +] \ No newline at end of file diff --git a/web/config/default_yaml_config.yaml b/web/config/default_yaml_config.yaml index 4e31bce86..2b70b6415 100644 --- a/web/config/default_yaml_config.yaml +++ b/web/config/default_yaml_config.yaml @@ -124,7 +124,7 @@ vulnerability_scan: { 'rate_limit': 150, 'retries': 1, 'timeout': 5, - 'fetch_gpt_report': true, + 'fetch_llm_report': true, 'nuclei': { 'use_nuclei_config': false, 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical'], diff --git a/web/dashboard/fixtures/dashboard.json b/web/dashboard/fixtures/dashboard.json index 75dde2cf9..da78d6e4e 100644 --- a/web/dashboard/fixtures/dashboard.json +++ b/web/dashboard/fixtures/dashboard.json @@ -4,34 +4,21 @@ "pk": 1, "fields": { "name": "Default", + "description": null, "slug": "default", - "insert_date": "2024-09-03T21:23:21.459Z" + "insert_date": "2024-11-13T01:59:02.841Z", + "users": [] } }, { "model": "dashboard.project", - "pk": 2, + "pk": 34, "fields": { - "name": "My Project", - "slug": "my-project", - "insert_date": "2024-09-04T00:32:08.839Z" - } -}, -{ - "model": "dashboard.project", - "pk": 3, - "fields": { - "name": "My Other Project", - "slug": "my-other-project", - "insert_date": "2024-09-04T00:32:31.475Z" - } -}, -{ - "model": "dashboard.ollamasettings", - "pk": 1, - "fields": { - "selected_model": "gpt-3", - "use_ollama": false + "name": "Another project", + "description": null, + "slug": "another-project", + "insert_date": "2024-11-13T03:05:09.478Z", + "users": [] } } ] diff --git a/web/dashboard/templates/dashboard/onboarding.html b/web/dashboard/templates/dashboard/onboarding.html index e34b8dd8c..560340e5a 100644 --- a/web/dashboard/templates/dashboard/onboarding.html +++ b/web/dashboard/templates/dashboard/onboarding.html @@ -58,7 +58,7 @@

Default API Keys

If you have API keys for these services, please enter them here.

-

OpenAI keys will be used to generate vulnerability description, remediation, impact and vulnerability report writing using ChatGPT.

+

OpenAI keys will be used to generate vulnerability description, remediation, impact and vulnerability report writing using LLM.

{% if openai_key %} {% else %} diff --git a/web/fixtures/auth.json b/web/fixtures/auth.json index a22e89ef7..9be82d339 100644 --- a/web/fixtures/auth.json +++ b/web/fixtures/auth.json @@ -1551,797 +1551,630 @@ "model": "auth.permission", "pk": 173, "fields": { - "name": "Can add vulnerability reference", + "name": "Can add vulnerability tags", "content_type": 44, - "codename": "add_vulnerabilityreference" + "codename": "add_vulnerabilitytags" } }, { "model": "auth.permission", "pk": 174, "fields": { - "name": "Can change vulnerability reference", + "name": "Can change vulnerability tags", "content_type": 44, - "codename": "change_vulnerabilityreference" + "codename": "change_vulnerabilitytags" } }, { "model": "auth.permission", "pk": 175, "fields": { - "name": "Can delete vulnerability reference", + "name": "Can delete vulnerability tags", "content_type": 44, - "codename": "delete_vulnerabilityreference" + "codename": "delete_vulnerabilitytags" } }, { "model": "auth.permission", "pk": 176, "fields": { - "name": "Can view vulnerability reference", + "name": "Can view vulnerability tags", "content_type": 44, - "codename": "view_vulnerabilityreference" + "codename": "view_vulnerabilitytags" } }, { "model": "auth.permission", "pk": 177, "fields": { - "name": "Can add vulnerability tags", + "name": "Can add directory file", "content_type": 45, - "codename": "add_vulnerabilitytags" + "codename": "add_directoryfile" } }, { "model": "auth.permission", "pk": 178, "fields": { - "name": "Can change vulnerability tags", + "name": "Can change directory file", "content_type": 45, - "codename": "change_vulnerabilitytags" + "codename": "change_directoryfile" } }, { "model": "auth.permission", "pk": 179, "fields": { - "name": "Can delete vulnerability tags", + "name": "Can delete directory file", "content_type": 45, - "codename": "delete_vulnerabilitytags" + "codename": "delete_directoryfile" } }, { "model": "auth.permission", "pk": 180, "fields": { - "name": "Can view vulnerability tags", + "name": "Can view directory file", "content_type": 45, - "codename": "view_vulnerabilitytags" + "codename": "view_directoryfile" } }, { "model": "auth.permission", "pk": 181, "fields": { - "name": "Can add directory file", + "name": "Can add directory scan", "content_type": 46, - "codename": "add_directoryfile" + "codename": "add_directoryscan" } }, { "model": "auth.permission", "pk": 182, "fields": { - "name": "Can change directory file", + "name": "Can change directory scan", "content_type": 46, - "codename": "change_directoryfile" + "codename": "change_directoryscan" } }, { "model": "auth.permission", "pk": 183, "fields": { - "name": "Can delete directory file", + "name": "Can delete directory scan", "content_type": 46, - "codename": "delete_directoryfile" + "codename": "delete_directoryscan" } }, { "model": "auth.permission", "pk": 184, "fields": { - "name": "Can view directory file", + "name": "Can view directory scan", "content_type": 46, - "codename": "view_directoryfile" + "codename": "view_directoryscan" } }, { "model": "auth.permission", "pk": 185, "fields": { - "name": "Can add directory scan", + "name": "Can add cve id", "content_type": 47, - "codename": "add_directoryscan" + "codename": "add_cveid" } }, { "model": "auth.permission", "pk": 186, "fields": { - "name": "Can change directory scan", + "name": "Can change cve id", "content_type": 47, - "codename": "change_directoryscan" + "codename": "change_cveid" } }, { "model": "auth.permission", "pk": 187, "fields": { - "name": "Can delete directory scan", + "name": "Can delete cve id", "content_type": 47, - "codename": "delete_directoryscan" + "codename": "delete_cveid" } }, { "model": "auth.permission", "pk": 188, "fields": { - "name": "Can view directory scan", + "name": "Can view cve id", "content_type": 47, - "codename": "view_directoryscan" + "codename": "view_cveid" } }, { "model": "auth.permission", "pk": 189, "fields": { - "name": "Can add cve id", + "name": "Can add cwe id", "content_type": 48, - "codename": "add_cveid" + "codename": "add_cweid" } }, { "model": "auth.permission", "pk": 190, "fields": { - "name": "Can change cve id", + "name": "Can change cwe id", "content_type": 48, - "codename": "change_cveid" + "codename": "change_cweid" } }, { "model": "auth.permission", "pk": 191, "fields": { - "name": "Can delete cve id", + "name": "Can delete cwe id", "content_type": 48, - "codename": "delete_cveid" + "codename": "delete_cweid" } }, { "model": "auth.permission", "pk": 192, "fields": { - "name": "Can view cve id", + "name": "Can view cwe id", "content_type": 48, - "codename": "view_cveid" + "codename": "view_cweid" } }, { "model": "auth.permission", "pk": 193, "fields": { - "name": "Can add cwe id", + "name": "Can add waf", "content_type": 49, - "codename": "add_cweid" + "codename": "add_waf" } }, { "model": "auth.permission", "pk": 194, "fields": { - "name": "Can change cwe id", + "name": "Can change waf", "content_type": 49, - "codename": "change_cweid" + "codename": "change_waf" } }, { "model": "auth.permission", "pk": 195, "fields": { - "name": "Can delete cwe id", + "name": "Can delete waf", "content_type": 49, - "codename": "delete_cweid" + "codename": "delete_waf" } }, { "model": "auth.permission", "pk": 196, "fields": { - "name": "Can view cwe id", + "name": "Can view waf", "content_type": 49, - "codename": "view_cweid" + "codename": "view_waf" } }, { "model": "auth.permission", "pk": 197, "fields": { - "name": "Can add waf", + "name": "Can add country iso", "content_type": 50, - "codename": "add_waf" + "codename": "add_countryiso" } }, { "model": "auth.permission", "pk": 198, "fields": { - "name": "Can change waf", + "name": "Can change country iso", "content_type": 50, - "codename": "change_waf" + "codename": "change_countryiso" } }, { "model": "auth.permission", "pk": 199, "fields": { - "name": "Can delete waf", + "name": "Can delete country iso", "content_type": 50, - "codename": "delete_waf" + "codename": "delete_countryiso" } }, { "model": "auth.permission", "pk": 200, "fields": { - "name": "Can view waf", + "name": "Can view country iso", "content_type": 50, - "codename": "view_waf" + "codename": "view_countryiso" } }, { "model": "auth.permission", "pk": 201, "fields": { - "name": "Can add country iso", + "name": "Can add command", "content_type": 51, - "codename": "add_countryiso" + "codename": "add_command" } }, { "model": "auth.permission", "pk": 202, "fields": { - "name": "Can change country iso", + "name": "Can change command", "content_type": 51, - "codename": "change_countryiso" + "codename": "change_command" } }, { "model": "auth.permission", "pk": 203, "fields": { - "name": "Can delete country iso", + "name": "Can delete command", "content_type": 51, - "codename": "delete_countryiso" + "codename": "delete_command" } }, { "model": "auth.permission", "pk": 204, "fields": { - "name": "Can view country iso", + "name": "Can view command", "content_type": 51, - "codename": "view_countryiso" + "codename": "view_command" } }, { "model": "auth.permission", "pk": 205, "fields": { - "name": "Can add command", + "name": "Can add s3 bucket", "content_type": 52, - "codename": "add_command" + "codename": "add_s3bucket" } }, { "model": "auth.permission", "pk": 206, "fields": { - "name": "Can change command", + "name": "Can change s3 bucket", "content_type": 52, - "codename": "change_command" + "codename": "change_s3bucket" } }, { "model": "auth.permission", "pk": 207, "fields": { - "name": "Can delete command", + "name": "Can delete s3 bucket", "content_type": 52, - "codename": "delete_command" + "codename": "delete_s3bucket" } }, { "model": "auth.permission", "pk": 208, "fields": { - "name": "Can view command", + "name": "Can view s3 bucket", "content_type": 52, - "codename": "view_command" + "codename": "view_s3bucket" } }, { "model": "auth.permission", "pk": 209, "fields": { - "name": "Can add gpt vulnerability report", + "name": "Can add llm vulnerability report", "content_type": 53, - "codename": "add_gptvulnerabilityreport" + "codename": "add_llmvulnerabilityreport" } }, { "model": "auth.permission", "pk": 210, "fields": { - "name": "Can change gpt vulnerability report", + "name": "Can change llm vulnerability report", "content_type": 53, - "codename": "change_gptvulnerabilityreport" + "codename": "change_llmvulnerabilityreport" } }, { "model": "auth.permission", "pk": 211, "fields": { - "name": "Can delete gpt vulnerability report", + "name": "Can delete llm vulnerability report", "content_type": 53, - "codename": "delete_gptvulnerabilityreport" + "codename": "delete_llmvulnerabilityreport" } }, { "model": "auth.permission", "pk": 212, "fields": { - "name": "Can view gpt vulnerability report", + "name": "Can view llm vulnerability report", "content_type": 53, - "codename": "view_gptvulnerabilityreport" + "codename": "view_llmvulnerabilityreport" } }, { "model": "auth.permission", "pk": 213, - "fields": { - "name": "Can add s3 bucket", - "content_type": 54, - "codename": "add_s3bucket" - } -}, -{ - "model": "auth.permission", - "pk": 214, - "fields": { - "name": "Can change s3 bucket", - "content_type": 54, - "codename": "change_s3bucket" - } -}, -{ - "model": "auth.permission", - "pk": 215, - "fields": { - "name": "Can delete s3 bucket", - "content_type": 54, - "codename": "delete_s3bucket" - } -}, -{ - "model": "auth.permission", - "pk": 216, - "fields": { - "name": "Can view s3 bucket", - "content_type": 54, - "codename": "view_s3bucket" - } -}, -{ - "model": "auth.permission", - "pk": 217, "fields": { "name": "Can add todo note", - "content_type": 55, + "content_type": 54, "codename": "add_todonote" } }, { "model": "auth.permission", - "pk": 218, + "pk": 214, "fields": { "name": "Can change todo note", - "content_type": 55, + "content_type": 54, "codename": "change_todonote" } }, { "model": "auth.permission", - "pk": 219, + "pk": 215, "fields": { "name": "Can delete todo note", - "content_type": 55, + "content_type": 54, "codename": "delete_todonote" } }, { "model": "auth.permission", - "pk": 220, + "pk": 216, "fields": { "name": "Can view todo note", - "content_type": 55, + "content_type": 54, "codename": "view_todonote" } }, { "model": "auth.permission", - "pk": 221, + "pk": 217, "fields": { "name": "Can add crontab", - "content_type": 56, + "content_type": 55, "codename": "add_crontabschedule" } }, { "model": "auth.permission", - "pk": 222, + "pk": 218, "fields": { "name": "Can change crontab", - "content_type": 56, + "content_type": 55, "codename": "change_crontabschedule" } }, { "model": "auth.permission", - "pk": 223, + "pk": 219, "fields": { "name": "Can delete crontab", - "content_type": 56, + "content_type": 55, "codename": "delete_crontabschedule" } }, { "model": "auth.permission", - "pk": 224, + "pk": 220, "fields": { "name": "Can view crontab", - "content_type": 56, + "content_type": 55, "codename": "view_crontabschedule" } }, { "model": "auth.permission", - "pk": 225, + "pk": 221, "fields": { "name": "Can add interval", - "content_type": 57, + "content_type": 56, "codename": "add_intervalschedule" } }, { "model": "auth.permission", - "pk": 226, + "pk": 222, "fields": { "name": "Can change interval", - "content_type": 57, + "content_type": 56, "codename": "change_intervalschedule" } }, { "model": "auth.permission", - "pk": 227, + "pk": 223, "fields": { "name": "Can delete interval", - "content_type": 57, + "content_type": 56, "codename": "delete_intervalschedule" } }, { "model": "auth.permission", - "pk": 228, + "pk": 224, "fields": { "name": "Can view interval", - "content_type": 57, + "content_type": 56, "codename": "view_intervalschedule" } }, { "model": "auth.permission", - "pk": 229, + "pk": 225, "fields": { "name": "Can add periodic task", - "content_type": 58, + "content_type": 57, "codename": "add_periodictask" } }, { "model": "auth.permission", - "pk": 230, + "pk": 226, "fields": { "name": "Can change periodic task", - "content_type": 58, + "content_type": 57, "codename": "change_periodictask" } }, { "model": "auth.permission", - "pk": 231, + "pk": 227, "fields": { "name": "Can delete periodic task", - "content_type": 58, + "content_type": 57, "codename": "delete_periodictask" } }, { "model": "auth.permission", - "pk": 232, + "pk": 228, "fields": { "name": "Can view periodic task", - "content_type": 58, + "content_type": 57, "codename": "view_periodictask" } }, { "model": "auth.permission", - "pk": 233, + "pk": 229, "fields": { "name": "Can add periodic tasks", - "content_type": 59, + "content_type": 58, "codename": "add_periodictasks" } }, { "model": "auth.permission", - "pk": 234, + "pk": 230, "fields": { "name": "Can change periodic tasks", - "content_type": 59, + "content_type": 58, "codename": "change_periodictasks" } }, { "model": "auth.permission", - "pk": 235, + "pk": 231, "fields": { "name": "Can delete periodic tasks", - "content_type": 59, + "content_type": 58, "codename": "delete_periodictasks" } }, { "model": "auth.permission", - "pk": 236, + "pk": 232, "fields": { "name": "Can view periodic tasks", - "content_type": 59, + "content_type": 58, "codename": "view_periodictasks" } }, { "model": "auth.permission", - "pk": 237, + "pk": 233, "fields": { "name": "Can add solar event", - "content_type": 60, + "content_type": 59, "codename": "add_solarschedule" } }, { "model": "auth.permission", - "pk": 238, + "pk": 234, "fields": { "name": "Can change solar event", - "content_type": 60, + "content_type": 59, "codename": "change_solarschedule" } }, { "model": "auth.permission", - "pk": 239, + "pk": 235, "fields": { "name": "Can delete solar event", - "content_type": 60, + "content_type": 59, "codename": "delete_solarschedule" } }, { "model": "auth.permission", - "pk": 240, + "pk": 236, "fields": { "name": "Can view solar event", - "content_type": 60, + "content_type": 59, "codename": "view_solarschedule" } }, { "model": "auth.permission", - "pk": 241, + "pk": 237, "fields": { "name": "Can add clocked", - "content_type": 61, + "content_type": 60, "codename": "add_clockedschedule" } }, { "model": "auth.permission", - "pk": 242, + "pk": 238, "fields": { "name": "Can change clocked", - "content_type": 61, + "content_type": 60, "codename": "change_clockedschedule" } }, { "model": "auth.permission", - "pk": 243, + "pk": 239, "fields": { "name": "Can delete clocked", - "content_type": 61, + "content_type": 60, "codename": "delete_clockedschedule" } }, { "model": "auth.permission", - "pk": 244, + "pk": 240, "fields": { "name": "Can view clocked", - "content_type": 61, + "content_type": 60, "codename": "view_clockedschedule" } }, -{ - "model": "auth.permission", - "pk": 245, - "fields": { - "name": "Modify Scan Report", - "content_type": 4, - "codename": "modify_scan_report" - } -}, -{ - "model": "auth.permission", - "pk": 246, - "fields": { - "name": "Modify Scan Configurations", - "content_type": 4, - "codename": "modify_scan_configurations" - } -}, -{ - "model": "auth.permission", - "pk": 247, - "fields": { - "name": "Modify Wordlists", - "content_type": 4, - "codename": "modify_wordlists" - } -}, -{ - "model": "auth.permission", - "pk": 248, - "fields": { - "name": "Modify Targets", - "content_type": 4, - "codename": "modify_targets" - } -}, -{ - "model": "auth.permission", - "pk": 249, - "fields": { - "name": "Initiate Scans Subscans", - "content_type": 4, - "codename": "initiate_scans_subscans" - } -}, -{ - "model": "auth.permission", - "pk": 250, - "fields": { - "name": "Modify Scan Results", - "content_type": 4, - "codename": "modify_scan_results" - } -}, -{ - "model": "auth.permission", - "pk": 251, - "fields": { - "name": "Modify Interesting Lookup", - "content_type": 4, - "codename": "modify_interesting_lookup" - } -}, -{ - "model": "auth.group", - "pk": 1, - "fields": { - "name": "penetration_tester", - "permissions": [] - } -}, -{ - "model": "auth.group", - "pk": 2, - "fields": { - "name": "auditor", - "permissions": [] - } -}, { "model": "auth.user", "pk": 1, "fields": { - "password": "pbkdf2_sha256$260000$CqI5854efTfSzMuRGapUcC$czVBVTOeS0doZ2xNWBTDxIPbyAvkef+vB41oaDGNYDE=", - "last_login": "2024-09-03T21:23:15.084Z", + "password": "pbkdf2_sha256$260000$WrppgNE2XFSWfeCqKVIF0U$FM2wHJgt1Iw0dHZUGLiKtcMUWwyDFlo/Frsaz/jzJos=", + "last_login": "2024-11-13T01:58:59.003Z", "is_superuser": true, "username": "rengine", "first_name": "", "last_name": "", - "email": "rengine@example.com", + "email": "", "is_staff": true, "is_active": true, - "date_joined": "2024-09-03T21:22:52.707Z", + "date_joined": "2024-11-13T01:54:21.443Z", "groups": [], "user_permissions": [] } -}, -{ - "model": "auth.user", - "pk": 2, - "fields": { - "password": "pbkdf2_sha256$260000$CZZAPOMIBDEuFR6GWP44YY$zmorFyJzWeyX0+jnFnItiRwjsaPQ9S3g8KgqDNaluc0=", - "last_login": null, - "is_superuser": false, - "username": "pentest", - "first_name": "", - "last_name": "", - "email": "", - "is_staff": false, - "is_active": true, - "date_joined": "2024-09-04T00:31:18.262Z", - "groups": [ - 1 - ], - "user_permissions": [ - 249, - 251, - 246, - 245, - 250, - 248, - 247 - ] - } -}, -{ - "model": "auth.user", - "pk": 3, - "fields": { - "password": "pbkdf2_sha256$260000$8XXTKhzOwW3sDLL7kQmgjb$a1nISv6K/uW+KBw4yzQCkRJb6ydo7Nk/Y99RAqUGXQo=", - "last_login": null, - "is_superuser": false, - "username": "audit", - "first_name": "", - "last_name": "", - "email": "", - "is_staff": false, - "is_active": true, - "date_joined": "2024-09-04T00:31:43.172Z", - "groups": [ - 2 - ], - "user_permissions": [ - 251, - 245, - 250 - ] - } } ] diff --git a/web/fixtures/contenttypes.json b/web/fixtures/contenttypes.json new file mode 100644 index 000000000..30f1ccab2 --- /dev/null +++ b/web/fixtures/contenttypes.json @@ -0,0 +1,482 @@ +[ +{ + "model": "contenttypes.contenttype", + "pk": 1, + "fields": { + "app_label": "admin", + "model": "logentry" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 2, + "fields": { + "app_label": "auth", + "model": "permission" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 3, + "fields": { + "app_label": "auth", + "model": "group" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 4, + "fields": { + "app_label": "auth", + "model": "user" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 5, + "fields": { + "app_label": "contenttypes", + "model": "contenttype" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 6, + "fields": { + "app_label": "sessions", + "model": "session" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 7, + "fields": { + "app_label": "dashboard", + "model": "searchhistory" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 8, + "fields": { + "app_label": "dashboard", + "model": "project" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 9, + "fields": { + "app_label": "dashboard", + "model": "netlasapikey" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 10, + "fields": { + "app_label": "dashboard", + "model": "openaiapikey" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 11, + "fields": { + "app_label": "dashboard", + "model": "ollamasettings" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 12, + "fields": { + "app_label": "targetApp", + "model": "domain" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 13, + "fields": { + "app_label": "targetApp", + "model": "organization" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 14, + "fields": { + "app_label": "targetApp", + "model": "domaininfo" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 15, + "fields": { + "app_label": "targetApp", + "model": "domainregistration" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 16, + "fields": { + "app_label": "targetApp", + "model": "dnsrecord" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 17, + "fields": { + "app_label": "targetApp", + "model": "nameserver" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 18, + "fields": { + "app_label": "targetApp", + "model": "registrar" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 19, + "fields": { + "app_label": "targetApp", + "model": "whoisstatus" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 20, + "fields": { + "app_label": "targetApp", + "model": "historicalip" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 21, + "fields": { + "app_label": "targetApp", + "model": "relateddomain" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 22, + "fields": { + "app_label": "scanEngine", + "model": "configuration" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 23, + "fields": { + "app_label": "scanEngine", + "model": "enginetype" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 24, + "fields": { + "app_label": "scanEngine", + "model": "hackerone" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 25, + "fields": { + "app_label": "scanEngine", + "model": "installedexternaltool" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 26, + "fields": { + "app_label": "scanEngine", + "model": "interestinglookupmodel" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 27, + "fields": { + "app_label": "scanEngine", + "model": "notification" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 28, + "fields": { + "app_label": "scanEngine", + "model": "proxy" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 29, + "fields": { + "app_label": "scanEngine", + "model": "vulnerabilityreportsetting" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 30, + "fields": { + "app_label": "scanEngine", + "model": "wordlist" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 31, + "fields": { + "app_label": "startScan", + "model": "dork" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 32, + "fields": { + "app_label": "startScan", + "model": "email" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 33, + "fields": { + "app_label": "startScan", + "model": "employee" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 34, + "fields": { + "app_label": "startScan", + "model": "endpoint" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 35, + "fields": { + "app_label": "startScan", + "model": "ipaddress" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 36, + "fields": { + "app_label": "startScan", + "model": "port" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 37, + "fields": { + "app_label": "startScan", + "model": "scanhistory" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 38, + "fields": { + "app_label": "startScan", + "model": "subdomain" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 39, + "fields": { + "app_label": "startScan", + "model": "technology" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 40, + "fields": { + "app_label": "startScan", + "model": "vulnerability" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 41, + "fields": { + "app_label": "startScan", + "model": "subscan" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 42, + "fields": { + "app_label": "startScan", + "model": "scanactivity" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 43, + "fields": { + "app_label": "startScan", + "model": "metafinderdocument" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 44, + "fields": { + "app_label": "startScan", + "model": "vulnerabilitytags" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 45, + "fields": { + "app_label": "startScan", + "model": "directoryfile" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 46, + "fields": { + "app_label": "startScan", + "model": "directoryscan" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 47, + "fields": { + "app_label": "startScan", + "model": "cveid" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 48, + "fields": { + "app_label": "startScan", + "model": "cweid" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 49, + "fields": { + "app_label": "startScan", + "model": "waf" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 50, + "fields": { + "app_label": "startScan", + "model": "countryiso" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 51, + "fields": { + "app_label": "startScan", + "model": "command" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 52, + "fields": { + "app_label": "startScan", + "model": "s3bucket" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 53, + "fields": { + "app_label": "startScan", + "model": "llmvulnerabilityreport" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 54, + "fields": { + "app_label": "recon_note", + "model": "todonote" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 55, + "fields": { + "app_label": "django_celery_beat", + "model": "crontabschedule" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 56, + "fields": { + "app_label": "django_celery_beat", + "model": "intervalschedule" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 57, + "fields": { + "app_label": "django_celery_beat", + "model": "periodictask" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 58, + "fields": { + "app_label": "django_celery_beat", + "model": "periodictasks" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 59, + "fields": { + "app_label": "django_celery_beat", + "model": "solarschedule" + } +}, +{ + "model": "contenttypes.contenttype", + "pk": 60, + "fields": { + "app_label": "django_celery_beat", + "model": "clockedschedule" + } +} +] diff --git a/web/fixtures/default_scan_engines.yaml b/web/fixtures/default_scan_engines.yaml index 744861ba4..f02aa83f5 100644 --- a/web/fixtures/default_scan_engines.yaml +++ b/web/fixtures/default_scan_engines.yaml @@ -28,7 +28,7 @@ 'jpg', 'jpeg', 'gif', 'mp4', 'mpeg', 'mp3'],\r\n 'threads': 30\r\n}\r\nvulnerability_scan: {\r\n \ 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n \ 'enable_http_crawl': true,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n - \ 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': + \ 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_llm_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical']\r\n }\r\n}\r\nwaf_detection: {\r\n\r\n}\r\nscreenshot: {\r\n 'enable_http_crawl': true,\r\n 'intensity': 'normal',\r\n 'timeout': @@ -70,7 +70,7 @@ \ ],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\nvulnerability_scan: {\r\n 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n \ 'enable_http_crawl': true,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n - \ 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': + \ 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_llm_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical']\r\n }\r\n}" default_engine: true @@ -97,7 +97,7 @@ \ ],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\nvulnerability_scan: {\r\n 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n \ 'enable_http_crawl': false,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n - \ 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': + \ 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_llm_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['low', 'medium', 'high', 'critical']\r\n }\r\n}" default_engine: true diff --git a/web/fixtures/rolepermissions.json b/web/fixtures/rolepermissions.json new file mode 100644 index 000000000..0d4f101c7 --- /dev/null +++ b/web/fixtures/rolepermissions.json @@ -0,0 +1,2 @@ +[ +] diff --git a/web/reNgine/asgi.py b/web/reNgine/asgi.py new file mode 100644 index 000000000..e4dd4eb60 --- /dev/null +++ b/web/reNgine/asgi.py @@ -0,0 +1,14 @@ +import os +from django.core.asgi import get_asgi_application +from channels.routing import ProtocolTypeRouter, URLRouter +from channels.auth import AuthMiddlewareStack +from .routing import websocket_urlpatterns + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reNgine.settings') + +application = ProtocolTypeRouter({ + "http": get_asgi_application(), + "websocket": AuthMiddlewareStack( + URLRouter(websocket_urlpatterns) + ), +}) \ No newline at end of file diff --git a/web/reNgine/definitions.py b/web/reNgine/definitions.py index ce1c710c7..d37f7cbab 100644 --- a/web/reNgine/definitions.py +++ b/web/reNgine/definitions.py @@ -23,7 +23,7 @@ AMASS_WORDLIST = 'amass_wordlist' AUTO_CALIBRATION = 'auto_calibration' CUSTOM_HEADER = 'custom_header' -FETCH_GPT_REPORT = 'fetch_gpt_report' +FETCH_LLM_REPORT = 'fetch_llm_report' RUN_NUCLEI = 'run_nuclei' RUN_CRLFUZZ = 'run_crlfuzz' RUN_DALFOX = 'run_dalfox' @@ -437,95 +437,5 @@ # 404 page url FOUR_OH_FOUR_URL = '/404/' - -############################################################################### -# OLLAMA DEFINITIONS -############################################################################### -OLLAMA_INSTANCE = 'http://ollama:11434' - -DEFAULT_GPT_MODELS = [ - { - 'name': 'gpt-3', - 'model': 'gpt-3', - 'modified_at': '', - 'details': { - 'family': 'GPT', - 'parameter_size': '~175B', - } - }, - { - 'name': 'gpt-3.5-turbo', - 'model': 'gpt-3.5-turbo', - 'modified_at': '', - 'details': { - 'family': 'GPT', - 'parameter_size': '~7B', - } - }, - { - 'name': 'gpt-4', - 'model': 'gpt-4', - 'modified_at': '', - 'details': { - 'family': 'GPT', - 'parameter_size': '~1.7T', - } - }, - { - 'name': 'gpt-4-turbo', - 'model': 'gpt-4', - 'modified_at': '', - 'details': { - 'family': 'GPT', - 'parameter_size': '~1.7T', - } - } -] - - - -# GPT Vulnerability Report Generator -VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE = """ - You are a highly skilled penetration tester who has recently completed a penetration testing. - You will be given with a - - Vulnerability title - - Vulnerable URL - - and some description about the vulnerability. - Your job is to write a detailed technical penetration testing report based on the given Vulnerability details. - The purpose of this report is to provide an in-depth analysis of the vulnerabilities discovered during the penetration testing engagement. - - The penetration testing report must contain all separated by \n\n - - - Vulnerability description - Include a detailed vulnerability description, include any known CVE IDs, any known existing vulnerabilities. - - Impact - Include what this vulnerability can impact for web applications. - - Remediation - Include steps to remediate this vulnerability. Separate each new remediations by - and a new line \n - - References - Include any references URL about this vulnerability, any existing CVE ID, or news articles etc. Separate each new references by - and a new line \n. Only include http urls - - Do not write 'Penetration Testing Report:' on the title. -""" - - -ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT = """ - You are a highly skilled penetration tester who has recently completed a reconnaissance on a target. - As a penetration tester, I've conducted a thorough reconnaissance on a specific subdomain. - Based on my reconnaissance you will be given with a - - Subdomain Name - - Subdomain Page Title - - Open Ports if any detected - - HTTP Status - - Technologies Detected - - Content Type - - Web Server - - Page Content Length - I'm seeking insights into potential technical web application attacks that could be executed on this subdomain, along with explanations for why these attacks are feasible given the discovered information. - Please provide a detailed list of these attack types and their underlying technical rationales on every attacks you suggested. - Also suggest if any CVE ID, known exploits, existing vulnerabilities, any news articles URL related to the information provided to you. -""" - - # OSINT GooFuzz Path GOFUZZ_EXEC_PATH = 'GooFuzz' diff --git a/web/reNgine/gpt.py b/web/reNgine/gpt.py deleted file mode 100644 index 47af6c43f..000000000 --- a/web/reNgine/gpt.py +++ /dev/null @@ -1,134 +0,0 @@ -import openai -import re -from reNgine.common_func import get_open_ai_key, extract_between -from reNgine.definitions import VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE, ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT, OLLAMA_INSTANCE -from langchain_community.llms import Ollama - -from dashboard.models import OllamaSettings -import logging - -logger = logging.getLogger(__name__) - -class GPTVulnerabilityReportGenerator: - - def __init__(self): - selected_model = OllamaSettings.objects.first() - self.model_name = selected_model.selected_model if selected_model else 'gpt-3.5-turbo' - self.use_ollama = selected_model.use_ollama if selected_model else False - self.openai_api_key = None - self.ollama = None - - def get_vulnerability_description(self, description): - """Generate Vulnerability Description using GPT. - - Args: - description (str): Vulnerability Description message to pass to GPT. - - Returns: - (dict) of { - 'description': (str) - 'impact': (str), - 'remediation': (str), - 'references': (list) of urls - } - """ - print(f"Generating Vulnerability Description for: {description}") - if self.use_ollama: - prompt = VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE + "\nUser: " + description - self.ollama = Ollama( - base_url=OLLAMA_INSTANCE, - model=self.model_name - ) - response_content = self.ollama(prompt) - else: - openai_api_key = get_open_ai_key() - if not openai_api_key: - return { - 'status': False, - 'error': 'OpenAI API Key not set' - } - try: - openai.api_key = openai_api_key - gpt_response = openai.ChatCompletion.create( - model=self.model_name, - messages=[ - {'role': 'system', 'content': VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE}, - {'role': 'user', 'content': description} - ] - ) - - response_content = gpt_response['choices'][0]['message']['content'] - except Exception as e: - return { - 'status': False, - 'error': str(e) - } - vuln_description_pattern = re.compile( - r"[Vv]ulnerability [Dd]escription:(.*?)(?:\n\n[Ii]mpact:|$)", - re.DOTALL - ) - impact_pattern = re.compile( - r"[Ii]mpact:(.*?)(?:\n\n[Rr]emediation:|$)", - re.DOTALL - ) - remediation_pattern = re.compile( - r"[Rr]emediation:(.*?)(?:\n\n[Rr]eferences:|$)", - re.DOTALL - ) - - description_section = extract_between(response_content, vuln_description_pattern) - impact_section = extract_between(response_content, impact_pattern) - remediation_section = extract_between(response_content, remediation_pattern) - references_start_index = response_content.find("References:") - references_section = response_content[references_start_index + len("References:"):].strip() - - url_pattern = re.compile(r'https://\S+') - urls = url_pattern.findall(references_section) - - return { - 'status': True, - 'description': description_section, - 'impact': impact_section, - 'remediation': remediation_section, - 'references': urls, - } - -class GPTAttackSuggestionGenerator: - - def __init__(self): - self.api_key = get_open_ai_key() - self.model_name = 'gpt-3.5-turbo' - if not self.api_key: - self.ollama = Ollama(base_url='http://ollama:11434', model="llama2-uncensored") - - def get_attack_suggestion(self, input): - ''' - input (str): input for gpt - ''' - try: - if not self.api_key: - prompt = ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT + "\nUser: " + input - response_content = self.ollama(prompt) - else: - openai.api_key = self.api_key - gpt_response = openai.ChatCompletion.create( - model=self.model_name, - messages=[ - {'role': 'system', 'content': ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT}, - {'role': 'user', 'content': input} - ] - ) - response_content = gpt_response['choices'][0]['message']['content'] - - return { - 'status': True, - 'description': response_content, - 'input': input - } - except ValueError as e: - logger.error("Error in get_attack_suggestion: %s", str(e), exc_info=True) - return { - 'status': False, - 'error': "An error occurred while processing your request.", - 'input': input - } diff --git a/web/reNgine/llm/config.py b/web/reNgine/llm/config.py new file mode 100644 index 000000000..39af74023 --- /dev/null +++ b/web/reNgine/llm/config.py @@ -0,0 +1,394 @@ +from typing import Dict, Any + +############################################################################### +# OLLAMA DEFINITIONS +############################################################################### + +OLLAMA_INSTANCE = 'http://ollama:11434' + +############################################################################### +# LLM SYSTEM PROMPTS +############################################################################### + +VULNERABILITY_CONTEXT = """ +You are an expert penetration tester specializing in web application security assessments. Your task is to analyze the following vulnerability information: + - Vulnerability title + - Vulnerable URL + - Vulnerability description + +Keep the tone technical and professional. Focus on actionable insights. Avoid generic statements. +""" + +VULNERABILITY_TECHNICAL_DESCRIPTION_PROMPT = """ +Provide a detailed technical description of the vulnerability, including: + - Detailed technical explanation + - Associated CVE IDs and CVSS scores if applicable + - Attack vectors and exploitation methods + - Any prerequisites or conditions required for exploitation +I don't want to see any other information in the response. +""" + +VULNERABILITY_BUSINESS_IMPACT_PROMPT = """ +Describe the business impact of this vulnerability, including: + - Direct security implications + - Potential business consequences + - Data exposure risks + - Compliance implications +I don't want to see any other information in the response. +""" + +VULNERABILITY_REMEDIATION_STEPS_PROMPT = """ +List the remediation steps for this vulnerability, including: + - Specific, actionable steps + - Code examples where relevant + - Configuration changes if needed + - Security controls to prevent similar issues + Format: Each step prefixed with "- " on a new line +I don't want to see any other information in the response. +""" + +VULNERABILITY_REFERENCES_PROMPT = """ +Provide references related to this vulnerability, focusing on: + - Validated HTTP/HTTPS URLs + - Official documentation, security advisories, and research papers + - Relevant CVE details and exploit databases + Format: Each reference prefixed with "- " on a new line +I don't want to see any other information in the response. +""" + +ATTACK_SUGGESTION_LLM_SYSTEM_PROMPT = """ +You are an advanced penetration tester specializing in web application security. Based on the reconnaissance data provided: + - Subdomain Name + - Page Title + - Open Ports + - HTTP Status + - Technologies Stack + - Content Type + - Web Server + - Content Length + +Provide a structured analysis in the following format: + +1. ATTACK SURFACE ANALYSIS + - Enumerate potential entry points + - Identify technology-specific vulnerabilities + - List version-specific known vulnerabilities + - Map attack surface to MITRE ATT&CK framework where applicable + +2. PRIORITIZED ATTACK VECTORS + For each suggested attack: + - Attack name and classification + - Technical rationale based on observed data + - Specific exploitation methodology + - Success probability assessment + - Potential impact rating + +3. RELEVANT SECURITY CONTEXT + - CVE IDs with CVSS scores + - Existing proof-of-concept exploits + - Recent security advisories + - Relevant threat intelligence + Only include verified HTTP/HTTPS URLs + +Focus on actionable, evidence-based suggestions. Prioritize attacks based on feasibility and impact. +Avoid theoretical attacks without supporting evidence from the reconnaissance data. +""" + +############################################################################### +# LLM CONFIGURATION +############################################################################### + +LLM_CONFIG: Dict[str, Any] = { + 'providers': { + 'openai': { + 'default_model': 'gpt-4', + 'models': [ + 'gpt-4-turbo', + 'gpt-4', + 'gpt-3.5-turbo', + 'gpt-3' + ], + 'api_version': '2024-02-15', + 'max_tokens': 2000, + 'temperature': 0.7, + }, + 'ollama': { + 'default_model': 'llama2', + 'models': [ + 'llama2', + 'mistral', + 'codellama', + 'gemma' + ], + 'timeout': 30, + 'max_retries': 3, + } + }, + 'ollama_url': OLLAMA_INSTANCE, + 'timeout': 30, + 'max_retries': 3, + 'prompts': { + 'vulnerability': { + 'context': VULNERABILITY_CONTEXT, + 'technical': VULNERABILITY_TECHNICAL_DESCRIPTION_PROMPT, + 'impact': VULNERABILITY_BUSINESS_IMPACT_PROMPT, + 'remediation': VULNERABILITY_REMEDIATION_STEPS_PROMPT, + 'references': VULNERABILITY_REFERENCES_PROMPT, + }, + 'attack': ATTACK_SUGGESTION_LLM_SYSTEM_PROMPT + } +} + +############################################################################### +# DEFAULT GPT MODELS +############################################################################### + +DEFAULT_GPT_MODELS = [ + { + 'name': 'gpt-3', + 'model': 'gpt-3', + 'modified_at': '', + 'details': { + 'family': 'GPT', + 'parameter_size': '~175B', + } + }, + { + 'name': 'gpt-3.5-turbo', + 'model': 'gpt-3.5-turbo', + 'modified_at': '', + 'details': { + 'family': 'GPT', + 'parameter_size': '~7B', + } + }, + { + 'name': 'gpt-4', + 'model': 'gpt-4', + 'modified_at': '', + 'details': { + 'family': 'GPT', + 'parameter_size': '~1.7T', + } + }, + { + 'name': 'gpt-4-turbo', + 'model': 'gpt-4', + 'modified_at': '', + 'details': { + 'family': 'GPT', + 'parameter_size': '~1.7T', + } + } +] + +############################################################################### +# MODEL CAPABILITIES +############################################################################### + +MODEL_REQUIREMENTS = { + # OpenAI Models + 'gpt-3': { + 'min_tokens': 64, + 'max_tokens': 2048, + 'supports_functions': True, + 'best_for': ['Basic analysis', 'General purpose tasks'], + 'provider': 'openai' + }, + 'gpt-3.5-turbo': { + 'min_tokens': 64, + 'max_tokens': 4096, + 'supports_functions': True, + 'best_for': ['Quick analysis', 'Basic suggestions', 'Cost effective solutions'], + 'provider': 'openai' + }, + 'gpt-4': { + 'min_tokens': 128, + 'max_tokens': 8192, + 'supports_functions': True, + 'best_for': ['Deep security analysis', 'Complex reasoning', 'Advanced security tasks'], + 'provider': 'openai' + }, + 'gpt-4-turbo': { + 'min_tokens': 128, + 'max_tokens': 128000, + 'supports_functions': True, + 'best_for': ['Complex analysis', 'Technical details', 'Latest AI capabilities'], + 'provider': 'openai' + }, + + # Llama Family Models + 'llama2': { + 'min_tokens': 32, + 'max_tokens': 4096, + 'supports_functions': False, + 'best_for': ['Local processing', 'Privacy focused tasks', 'Balanced performance'], + 'provider': 'ollama' + }, + 'llama2-uncensored': { + 'min_tokens': 32, + 'max_tokens': 4096, + 'supports_functions': False, + 'best_for': ['Unfiltered analysis', 'Security research', 'Red team operations'], + 'provider': 'ollama' + }, + 'llama3': { + 'min_tokens': 64, + 'max_tokens': 8192, + 'supports_functions': False, + 'best_for': ['Advanced reasoning', 'Improved context', 'Technical analysis'], + 'provider': 'ollama' + }, + 'llama3.1': { + 'min_tokens': 64, + 'max_tokens': 8192, + 'supports_functions': False, + 'best_for': ['Enhanced comprehension', 'Security assessment', 'Detailed analysis'], + 'provider': 'ollama' + }, + 'llama3.2': { + 'min_tokens': 64, + 'max_tokens': 16384, + 'supports_functions': False, + 'best_for': ['Long context', 'Complex security analysis', 'Advanced reasoning'], + 'provider': 'ollama' + }, + + # Other Specialized Models + 'mistral': { + 'min_tokens': 32, + 'max_tokens': 8192, + 'supports_functions': False, + 'best_for': ['Efficient processing', 'Technical analysis', 'Performance optimization'], + 'provider': 'ollama' + }, + 'mistral-medium': { + 'min_tokens': 32, + 'max_tokens': 8192, + 'supports_functions': False, + 'best_for': ['Balanced analysis', 'Improved accuracy', 'Technical tasks'], + 'provider': 'ollama' + }, + 'mistral-large': { + 'min_tokens': 64, + 'max_tokens': 16384, + 'supports_functions': False, + 'best_for': ['Deep technical analysis', 'Complex reasoning', 'High accuracy'], + 'provider': 'ollama' + }, + 'codellama': { + 'min_tokens': 32, + 'max_tokens': 4096, + 'supports_functions': False, + 'best_for': ['Code analysis', 'Vulnerability assessment', 'Technical documentation'], + 'provider': 'ollama' + }, + 'qwen2.5': { + 'min_tokens': 64, + 'max_tokens': 8192, + 'supports_functions': False, + 'best_for': ['Multilingual analysis', 'Efficient processing', 'Technical understanding'], + 'provider': 'ollama' + }, + 'gemma': { + 'min_tokens': 32, + 'max_tokens': 4096, + 'supports_functions': False, + 'best_for': ['Lightweight analysis', 'Quick assessment', 'General tasks'], + 'provider': 'ollama' + }, + 'solar': { + 'min_tokens': 64, + 'max_tokens': 8192, + 'supports_functions': False, + 'best_for': ['Creative analysis', 'Unique perspectives', 'Alternative approaches'], + 'provider': 'ollama' + }, + 'yi': { + 'min_tokens': 64, + 'max_tokens': 8192, + 'supports_functions': False, + 'best_for': ['Comprehensive analysis', 'Detailed explanations', 'Technical depth'], + 'provider': 'ollama' + } +} + +############################################################################### +# RECOMMENDED MODELS +############################################################################### + +RECOMMENDED_MODELS = { + 'llama2-uncensored': { + 'name': 'llama2-uncensored', + 'description': 'Optimized for security research and penetration testing, unrestricted responses', + 'tags': ['7b', '70b'], + 'size_options': { + '7b': '~4GB RAM', + '70b': '~35GB RAM' + } + }, + 'llama3.1': { + 'name': 'llama3.1', + 'description': 'Advanced model with enhanced security analysis capabilities and better context understanding', + 'tags': ['8b', '70b', '405b'], + 'size_options': { + '8b': '~4GB RAM', + '70b': '~35GB RAM', + '405b': '~200GB RAM' + } + }, + 'llama3.2': { + 'name': 'llama3.2', + 'description': 'Latest LLaMA model with improved reasoning for complex security scenarios and vulnerability analysis', + 'tags': ['1b', '3b'], + 'size_options': { + '1b': '~1GB RAM', + '3b': '~2GB RAM' + } + }, + 'codellama': { + 'name': 'codellama', + 'description': 'Specialized for code analysis, vulnerability assessment, and secure coding practices', + 'tags': ['7b', '13b', '34b', '70b'], + 'size_options': { + '7b': '~4GB RAM', + '13b': '~8GB RAM', + '34b': '~18GB RAM', + '70b': '~35GB RAM' + } + }, + 'mistral': { + 'name': 'mistral', + 'description': 'Excellent for technical security analysis and threat modeling', + 'tags': ['7b'], + 'size_options': { + '7b': '~4GB RAM' + } + }, + 'dolphin-phi': { + 'name': 'dolphin-phi', + 'description': 'Fast and efficient for real-time security analysis, good balance of performance and accuracy', + 'tags': ['2.7b'], + 'size_options': { + '2.7b': '~2GB RAM' + } + }, + 'openchat': { + 'name': 'openchat', + 'description': 'Strong reasoning capabilities for complex security scenarios and attack chain analysis', + 'tags': ['7b'], + 'size_options': { + '7b': '~4GB RAM' + } + }, + 'solar': { + 'name': 'solar', + 'description': 'Advanced reasoning for security implications and vulnerability impact assessment', + 'tags': ['10.7b'], + 'size_options': { + '10.7b': '~6GB RAM' + } + } +} + +LLM_CONFIG['providers']['ollama']['recommended_models'] = RECOMMENDED_MODELS \ No newline at end of file diff --git a/web/reNgine/llm/llm.py b/web/reNgine/llm/llm.py new file mode 100644 index 000000000..d891df0b1 --- /dev/null +++ b/web/reNgine/llm/llm.py @@ -0,0 +1,265 @@ +from typing import Optional, Dict, Any +import logging +from abc import ABC, abstractmethod +import openai +from langchain_community.llms import Ollama +from reNgine.llm.config import LLM_CONFIG +from reNgine.llm.utils import get_default_llm_model +from reNgine.llm.validators import LLMProvider, LLMResponse +from reNgine.common_func import get_open_ai_key + +logger = logging.getLogger(__name__) + +class BaseLLMGenerator(ABC): + """Base class for LLM generators with common functionality""" + + def __init__(self, provider: Optional[LLMProvider] = None): + """Initialize the LLM generator with optional provider""" + self.api_key = get_open_ai_key() + self.config = LLM_CONFIG + self.model_name = self._get_model_name() + self.provider = provider or self._get_default_provider() + self.ollama = None + + if self.provider == LLMProvider.OLLAMA: + self._setup_ollama() + + @abstractmethod + def _get_model_name(self) -> str: + """Get the model name to use""" + pass + + @abstractmethod + def _get_default_provider(self) -> LLMProvider: + """Get the default provider based on configuration""" + pass + + def _setup_ollama(self) -> None: + """Setup Ollama client with configuration""" + ollama_config = self.config['providers']['ollama'] + self.ollama = Ollama( + base_url=self.config['ollama_url'], + model=self.model_name, + timeout=ollama_config['timeout'] + ) + + def _validate_input(self, input_data: str, model_name: str = None) -> str: + """Validate input data using Pydantic model""" + if not input_data or not isinstance(input_data, str): + raise ValueError("Input data must be a non-empty string") + + # Additional model validation if provided + if model_name and not isinstance(model_name, str): + raise ValueError("Model name must be a string") + + return input_data + +class LLMVulnerabilityReportGenerator(BaseLLMGenerator): + """Generator for vulnerability reports using LLM""" + + def _get_model_name(self) -> str: + """Get model name from database or default""" + return get_default_llm_model() + + def _get_default_provider(self) -> LLMProvider: + """Get default provider based on model requirements""" + model_name = self._get_model_name() + if model_name in self.config['providers']['openai']['models']: + return LLMProvider.OPENAI + return LLMProvider.OLLAMA + + def _get_provider_config(self) -> Dict[str, Any]: + """Get provider specific configuration""" + provider_key = self.provider.value + return self.config['providers'][provider_key] + + def _validate_input(self, input_data: str, model_name: str = None) -> str: + """Validate the input data and model name""" + if not input_data or not isinstance(input_data, str): + raise ValueError("Input data must be a non-empty string") + + # Additional model validation if provided + if model_name and not isinstance(model_name, str): + raise ValueError("Model name must be a string") + + return input_data + + def get_vulnerability_report(self, description: str, model_name: str = None) -> dict: + """ + Generate vulnerability report using LLM by asking specific questions for each section + + Args: + description: Raw vulnerability description + model_name: Optional model name to use + + Returns: + dict: Response containing structured data + """ + try: + validated_input = self._validate_input(description, model_name) + vulnerability_prompt = LLM_CONFIG['prompts']['vulnerability'] + context = vulnerability_prompt['context'] + + # Generate each section separately + technical = self._get_section_response(validated_input, context + vulnerability_prompt['technical']) + impact = self._get_section_response(validated_input, context + vulnerability_prompt['impact']) + remediation = self._get_section_response(validated_input, context + vulnerability_prompt['remediation']) + references = self._get_section_response(validated_input, context + vulnerability_prompt['references']) + + # Combine sections into a single response + response = { + "description": technical, + "impact": impact, + "remediation": remediation, + "references": references + } + + logger.debug(f'Response: {response}') + return LLMResponse( + status=True, + **response + ).to_dict() + + except Exception as e: + logger.error(f"Error in get_vulnerability_report: {str(e)}", exc_info=True) + return LLMResponse( + status=False, + error=str(e) + ).to_dict() + + def _get_section_response(self, input_data: str, prompt: str) -> str: + """ + Get response for a specific section using LLM + + Args: + input_data: Validated input data + prompt: Specific prompt for the section + + Returns: + str: Response content for the section + """ + try: + if self.provider == LLMProvider.OLLAMA: + response_content = self._get_ollama_response(prompt, input_data) + else: + response_content = self._get_openai_response(prompt, input_data, model_name=None) + + # Clean and return the response + return response_content.strip() + + except Exception as e: + logger.error(f"Error in _get_section_response: {str(e)}") + return "" + + def _get_ollama_response(self, prompt: str, description: str) -> str: + """Get response from Ollama""" + prompt = f"{prompt}\nUser: {description}" + logger.debug(f'Ollama Prompt: {prompt}') + response = self.ollama(prompt) + logger.debug(f'Ollama Response: {response}') + return str(response) if response is not None else "" + + def _get_openai_response(self, prompt: str, description: str, model_name: str = None) -> str: + """Get response from OpenAI""" + if not self.api_key: + raise ValueError("OpenAI API Key not set") + + openai.api_key = self.api_key + + response = openai.ChatCompletion.create( + model=model_name or self.model_name, + messages=[ + {'role': 'system', 'content': prompt}, + {'role': 'user', 'content': description} + ], + **self._get_provider_config() + ) + return response['choices'][0]['message']['content'] + +class LLMAttackSuggestionGenerator(BaseLLMGenerator): + """Generator for attack suggestions using LLM""" + + def _get_model_name(self) -> str: + """Get model name from database or default""" + return get_default_llm_model() + + def _get_default_provider(self) -> LLMProvider: + """Get default provider based on model requirements""" + model_name = self._get_model_name() + if model_name in self.config['providers']['openai']['models']: + return LLMProvider.OPENAI + return LLMProvider.OLLAMA + + def _get_provider_config(self) -> Dict[str, Any]: + """Get provider specific configuration""" + provider_key = self.provider.value + return self.config['providers'][provider_key] + + def _validate_input(self, input_data: str, model_name: str = None) -> str: + """Validate the input data and model name""" + if not input_data or not isinstance(input_data, str): + raise ValueError("Input data must be a non-empty string") + + # Additional model validation if provided + if model_name and not isinstance(model_name, str): + raise ValueError("Model name must be a string") + + return input_data + + def get_attack_suggestion(self, input_data: str, model_name: str = None) -> dict: + """ + Generate attack suggestions using LLM + + Args: + input_data: Reconnaissance data + + Returns: + dict: Response containing status and description + """ + try: + # Validate both input data and model name + validated_input = self._validate_input(input_data, model_name) + + # Get response from appropriate provider + if self.provider == LLMProvider.OLLAMA: + response_content = self._get_ollama_response(validated_input) + else: + response_content = self._get_openai_response(validated_input, model_name) + + return { + 'status': True, + 'description': response_content, + 'input': input_data, + 'model_name': model_name + } + + except Exception as e: + logger.error(f"Error in get_attack_suggestion: {str(e)}", exc_info=True) + return { + 'status': False, + 'error': str(e), + 'input': input_data, + 'model_name': model_name + } + + def _get_ollama_response(self, description: str) -> str: + """Get response from Ollama""" + prompt = f"{self.config['prompts']['attack']}\nUser: {description}" + return self.ollama(prompt) + + def _get_openai_response(self, description: str, model_name: str) -> str: + """Get response from OpenAI""" + if not self.api_key: + raise ValueError("OpenAI API Key not set") + + openai.api_key = self.api_key + + response = openai.ChatCompletion.create( + model=model_name, + messages=[ + {'role': 'system', 'content': self.config['prompts']['attack']}, + {'role': 'user', 'content': description} + ], + **self._get_provider_config() + ) + return response['choices'][0]['message']['content'] diff --git a/web/reNgine/llm/utils.py b/web/reNgine/llm/utils.py new file mode 100644 index 000000000..825f71be7 --- /dev/null +++ b/web/reNgine/llm/utils.py @@ -0,0 +1,87 @@ +from django.contrib import messages +from dashboard.models import OllamaSettings +from reNgine.llm.config import LLM_CONFIG +import logging +from markdown import markdown + +logger = logging.getLogger(__name__) + +def get_default_llm_model(): + """ + Get the default LLM model from database or fallback to default + Returns the model name as string + """ + try: + ollama_settings = OllamaSettings.objects.first() + if ollama_settings and ollama_settings.selected_model: + return ollama_settings.selected_model + except Exception as e: + logger.error(f"Error while retrieving default LLM model: {e}") + + # Fallback to default model from config based on provider + try: + if ollama_settings and ollama_settings.use_ollama: + return LLM_CONFIG['providers']['ollama']['default_model'] + return LLM_CONFIG['providers']['openai']['default_model'] + except Exception as e: + logger.error(f"Error while getting default model from config: {e}") + return 'gpt-3.5-turbo' # Ultimate fallback + +def validate_llm_model(request, model_name): + """Check if LLM model exists and is available""" + try: + # Check if model exists in LLMToolkit + if not LLMToolkit.is_model_available(model_name): + messages.info( + request, + f"Model {model_name} is not available. " + f'Configure your LLM models here.', + extra_tags='safe' + ) + return False + return True + except Exception as e: + logger.error(f"Error while validating LLM model: {e}") + return False + +def get_llm_vuln_input_description(title, path): + vulnerability_description = '' + vulnerability_description += f'Vulnerability Title: {title}' + # llm gives concise vulnerability description when a vulnerable URL is provided + vulnerability_description += f'\nVulnerable URL: {path}' + + return vulnerability_description + +def convert_markdown_to_html(markdown_text): + if markdown_text is None: + return "" + + # Extract LLM badge if present (at the beginning of the text) + llm_badge = "" + if markdown_text.startswith('[LLM:'): + llm_name = markdown_text[5:markdown_text.index(']')] + llm_badge = f'Generated by {llm_name}
' + markdown_text = markdown_text[markdown_text.index(']')+1:].strip() + + # Configure Markdown with specific options + html_content = markdown(markdown_text, + extensions=[ + 'fenced_code', + 'tables', + 'nl2br', + 'sane_lists', # Better list handling + 'def_list', # Definition lists support + ], + ) + + # Add Bootstrap classes and clean up formatting + html_content = (html_content + .replace('
', '
')
+        .replace('
    ', '
      ') + .replace('
        ', '
          ') # Convert ordered lists to unordered + .replace('
      ', '
    ') + .replace('\n\n', '
    ') + .replace('\n', '') + ) + + return llm_badge + html_content diff --git a/web/reNgine/llm/validators.py b/web/reNgine/llm/validators.py new file mode 100644 index 000000000..9860e55db --- /dev/null +++ b/web/reNgine/llm/validators.py @@ -0,0 +1,67 @@ +from typing import Optional, List +from pydantic import BaseModel, Field, validator +from enum import Enum + +class LLMProvider(str, Enum): + OPENAI = "openai" + OLLAMA = "ollama" + +class ModelCapabilities(BaseModel): + min_tokens: int + max_tokens: int + supports_functions: bool + best_for: List[str] + provider: str + +class LLMInputData(BaseModel): + description: str + llm_model: Optional[str] = Field(default=None) + provider: Optional[LLMProvider] = Field(default=None) + capabilities: Optional[ModelCapabilities] = Field(default=None) + + @validator('description') + def validate_description(cls, v): + if not v or len(v.strip()) < 10: + raise ValueError("Description must be at least 10 characters long") + return v.strip() + + class Config: + json_schema_extra = { + "example": { + "description": "SQL Injection vulnerability found in login form", + "llm_model": "gpt-3.5-turbo", + "provider": "openai", + "capabilities": { + "min_tokens": 64, + "max_tokens": 2048, + "supports_functions": True, + "best_for": ["quick_analysis"], + "provider": "openai" + } + } + } + +class LLMResponse: + def __init__(self, status: bool, description: str = None, impact: str = None, + remediation: str = None, references: list = None, error: str = None): + self.status = status + self.description = description + self.impact = impact + self.remediation = remediation + self.references = references or [] + self.error = error + + def get(self, key, default=None): + """Add dictionary-like get method""" + return getattr(self, key, default) + + def to_dict(self): + """Convert to dictionary""" + return { + 'status': self.status, + 'description': self.description, + 'impact': self.impact, + 'remediation': self.remediation, + 'references': self.references, + 'error': self.error + } \ No newline at end of file diff --git a/web/reNgine/routing.py b/web/reNgine/routing.py new file mode 100644 index 000000000..d3dbbc729 --- /dev/null +++ b/web/reNgine/routing.py @@ -0,0 +1,14 @@ +from channels.auth import AuthMiddlewareStack +from channels.routing import ProtocolTypeRouter, URLRouter +from django.urls import re_path +from api.consumers import OllamaDownloadConsumer + +websocket_urlpatterns = [ + re_path(r'^ws/ollama/download/(?P[\w\-\.]+)/$', OllamaDownloadConsumer.as_asgi()), +] + +application = ProtocolTypeRouter({ + 'websocket': AuthMiddlewareStack( + URLRouter(websocket_urlpatterns) + ), +}) \ No newline at end of file diff --git a/web/reNgine/settings.py b/web/reNgine/settings.py index 1a63d823e..8fe3f2dc1 100644 --- a/web/reNgine/settings.py +++ b/web/reNgine/settings.py @@ -50,7 +50,7 @@ DEFAULT_HTTP_TIMEOUT = env.int('DEFAULT_HTTP_TIMEOUT', default=5) # seconds DEFAULT_RETRIES = env.int('DEFAULT_RETRIES', default=1) DEFAULT_THREADS = env.int('DEFAULT_THREADS', default=30) -DEFAULT_GET_GPT_REPORT = env.bool('DEFAULT_GET_GPT_REPORT', default=True) +DEFAULT_GET_LLM_REPORT = env.bool('DEFAULT_GET_LLM_REPORT', default=True) # Globals ALLOWED_HOSTS = ['*'] @@ -94,7 +94,8 @@ 'django_extensions', 'mathfilters', 'drf_yasg', - 'rolepermissions' + 'rolepermissions', + 'channels', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', @@ -357,3 +358,18 @@ def show_toolbar(request): INSTALLED_APPS.append('debug_toolbar') MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware') + +# Channels configuration +ASGI_APPLICATION = 'reNgine.routing.application' + +CHANNEL_LAYERS = { + 'default': { + 'BACKEND': 'channels_redis.core.RedisChannelLayer', + 'CONFIG': { + 'hosts': [('redis', 6379)], + }, + }, +} + +# WebSocket settings +WEBSOCKET_ACCEPT_ALL = True # For development, change in production diff --git a/web/reNgine/tasks.py b/web/reNgine/tasks.py index a3ed329fc..dd7c9f9ea 100644 --- a/web/reNgine/tasks.py +++ b/web/reNgine/tasks.py @@ -28,12 +28,12 @@ from metafinder.extractor import extract_metadata_from_google_search from reNgine.celery import app -from reNgine.gpt import GPTVulnerabilityReportGenerator +from reNgine.llm.llm import LLMVulnerabilityReportGenerator +from reNgine.llm.utils import get_llm_vuln_input_description, convert_markdown_to_html from reNgine.celery_custom_task import RengineTask from reNgine.common_func import * from reNgine.definitions import * from reNgine.settings import * -from reNgine.gpt import * from reNgine.utilities import * from scanEngine.models import (EngineType, InstalledExternalTool, Notification, Proxy) from startScan.models import * @@ -2108,7 +2108,7 @@ def vulnerability_scan(self, urls=[], ctx={}, description=None): return None @app.task(name='nuclei_individual_severity_module', queue='main_scan_queue', base=RengineTask, bind=True) -def nuclei_individual_severity_module(self, cmd, severity, enable_http_crawl, should_fetch_gpt_report, ctx={}, description=None): +def nuclei_individual_severity_module(self, cmd, severity, enable_http_crawl, should_fetch_llm_report, ctx={}, description=None): ''' This celery task will run vulnerability scan in parallel. All severities supplied should run in parallel as grouped tasks. @@ -2266,11 +2266,10 @@ def nuclei_individual_severity_module(self, cmd, severity, enable_http_crawl, sh } self.notify(fields=fields) - # after vulnerability scan is done, we need to run gpt if - # should_fetch_gpt_report and openapi key exists + # after vulnerability scan is done, we need to run llm if + # should_fetch_llm_report and openapi key exists - if should_fetch_gpt_report and OpenAiAPIKey.objects.all().first(): - logger.info('Getting Vulnerability GPT Report') + if should_fetch_llm_report and OpenAiAPIKey.objects.exists(): vulns = Vulnerability.objects.filter( scan_history__id=self.scan_id ).filter( @@ -2279,7 +2278,7 @@ def nuclei_individual_severity_module(self, cmd, severity, enable_http_crawl, sh severity=0 ) # find all unique vulnerabilities based on path and title - # all unique vulnerability will go thru gpt function and get report + # all unique vulnerability will go thru llm function and get report # once report is got, it will be matched with other vulnerabilities and saved unique_vulns = set() for vuln in vulns: @@ -2288,79 +2287,110 @@ def nuclei_individual_severity_module(self, cmd, severity, enable_http_crawl, sh unique_vulns = list(unique_vulns) with concurrent.futures.ThreadPoolExecutor(max_workers=DEFAULT_THREADS) as executor: - future_to_gpt = {executor.submit(get_vulnerability_gpt_report, vuln): vuln for vuln in unique_vulns} + future_to_llm = {executor.submit(llm_vulnerability_report, vuln): vuln for vuln in unique_vulns} # Wait for all tasks to complete - for future in concurrent.futures.as_completed(future_to_gpt): - gpt = future_to_gpt[future] + for future in concurrent.futures.as_completed(future_to_llm): + vuln = future_to_llm[future] try: future.result() except Exception as e: - logger.error(f"Exception for Vulnerability {vuln}: {e}") + logger.error(f"Exception for Vulnerability {vuln[0]} - {vuln[1]}: {e}") # Display title and path return None +@app.task(name='llm_vulnerability_report', bind=False, queue='llm_queue') +def llm_vulnerability_report(vulnerability_id=None, vuln_tuple=None): + """ + Generate and store Vulnerability Report using LLM. + Can be called either with a vulnerability_id or a vuln_tuple (title, path) -def get_vulnerability_gpt_report(vuln): - title = vuln[0] - path = vuln[1] - logger.info(f'Getting GPT Report for {title}, PATH: {path}') - # check if in db already exists - stored = GPTVulnerabilityReport.objects.filter( - url_path=path - ).filter( - title=title - ).first() - if stored: - response = { - 'description': stored.description, - 'impact': stored.impact, - 'remediation': stored.remediation, - 'references': [url.url for url in stored.references.all()] - } - else: - report = GPTVulnerabilityReportGenerator() - vulnerability_description = get_gpt_vuln_input_description( - title, - path - ) - response = report.get_vulnerability_description(vulnerability_description) - add_gpt_description_db( - title, - path, - response.get('description'), - response.get('impact'), - response.get('remediation'), - response.get('references', []) - ) + Args: + vulnerability_id (int, optional): Vulnerability ID to fetch Description + vuln_tuple (tuple, optional): Tuple containing (title, path) + + Returns: + dict: LLM response containing description, impact, remediation and references + """ + logger.info('Getting LLM Vulnerability Description') + try: + # Get title and path from either vulnerability_id or vuln_tuple + if vulnerability_id: + lookup_vulnerability = Vulnerability.objects.get(id=vulnerability_id) + lookup_url = urlparse(lookup_vulnerability.http_url) + title = lookup_vulnerability.name + path = lookup_url.path + elif vuln_tuple: + title, path = vuln_tuple + else: + raise ValueError("Either vulnerability_id or vuln_tuple must be provided") + logger.info(f'Processing vulnerability: {title}, PATH: {path}') - for vuln in Vulnerability.objects.filter(name=title, http_url__icontains=path): - vuln.description = response.get('description', vuln.description) - vuln.impact = response.get('impact') - vuln.remediation = response.get('remediation') - vuln.is_gpt_used = True - vuln.save() + # Check if report already exists in database + stored = LLMVulnerabilityReport.objects.filter( + url_path=path, + title=title + ).first() - for url in response.get('references', []): - ref, created = VulnerabilityReference.objects.get_or_create(url=url) - vuln.references.add(ref) + if stored: + response = { + 'status': True, + 'description': stored.formatted_description, + 'impact': stored.formatted_impact, + 'remediation': stored.formatted_remediation, + 'references': stored.formatted_references, + } + logger.info(f'Found stored report: {stored}') + else: + # Generate new report + vulnerability_description = get_llm_vuln_input_description(title, path) + llm_generator = LLMVulnerabilityReportGenerator() + response = llm_generator.get_vulnerability_report(vulnerability_description) + + # Store new report in database + llm_report = LLMVulnerabilityReport() + llm_report.url_path = path + llm_report.title = title + llm_report.description = response.get('description') + llm_report.impact = response.get('impact') + llm_report.remediation = response.get('remediation') + llm_report.references = response.get('references') + llm_report.save() + logger.info('Added new report to database') + + # Update all matching vulnerabilities + vulnerabilities = Vulnerability.objects.filter( + name=title, + http_url__icontains=path + ) + + for vuln in vulnerabilities: + # Update vulnerability fields + vuln.description = response.get('description', vuln.description) + vuln.impact = response.get('impact') + vuln.remediation = response.get('remediation') + vuln.is_llm_used = True + vuln.references = response.get('references') + vuln.save() + logger.info(f'Updated vulnerability {vuln.id} with LLM report') + response['description'] = convert_markdown_to_html(response.get('description', '')) + response['impact'] = convert_markdown_to_html(response.get('impact', '')) + response['remediation'] = convert_markdown_to_html(response.get('remediation', '')) + response['references'] = convert_markdown_to_html(response.get('references', '')) -def add_gpt_description_db(title, path, description, impact, remediation, references): - gpt_report = GPTVulnerabilityReport() - gpt_report.url_path = path - gpt_report.title = title - gpt_report.description = description - gpt_report.impact = impact - gpt_report.remediation = remediation - gpt_report.save() + return response + + except Exception as e: + error_msg = f"Error in get_vulnerability_report: {str(e)}" + logger.error(error_msg) + return { + 'status': False, + 'error': error_msg + } - for url in references: - ref, created = VulnerabilityReference.objects.get_or_create(url=url) - gpt_report.references.add(ref) - gpt_report.save() @app.task(name='nuclei_scan', queue='main_scan_queue', base=RengineTask, bind=True) def nuclei_scan(self, urls=[], ctx={}, description=None): @@ -2386,7 +2416,7 @@ def nuclei_scan(self, urls=[], ctx={}, description=None): custom_header = config.get(CUSTOM_HEADER) or self.yaml_configuration.get(CUSTOM_HEADER) if custom_header: custom_header = generate_header_param(custom_header, 'common') - should_fetch_gpt_report = config.get(FETCH_GPT_REPORT, DEFAULT_GET_GPT_REPORT) + should_fetch_llm_report = config.get(FETCH_LLM_REPORT, DEFAULT_GET_LLM_REPORT) proxy = get_random_proxy() nuclei_specific_config = config.get('nuclei', {}) use_nuclei_conf = nuclei_specific_config.get(USE_NUCLEI_CONFIG, False) @@ -2474,7 +2504,7 @@ def nuclei_scan(self, urls=[], ctx={}, description=None): cmd, severity, enable_http_crawl, - should_fetch_gpt_report, + should_fetch_llm_report, ctx=custom_ctx, description=f'Nuclei Scan with severity {severity}' ) @@ -2500,7 +2530,7 @@ def dalfox_xss_scan(self, urls=[], ctx={}, description=None): description (str, optional): Task description shown in UI. """ vuln_config = self.yaml_configuration.get(VULNERABILITY_SCAN) or {} - should_fetch_gpt_report = vuln_config.get(FETCH_GPT_REPORT, DEFAULT_GET_GPT_REPORT) + should_fetch_llm_report = vuln_config.get(FETCH_LLM_REPORT, DEFAULT_GET_LLM_REPORT) dalfox_config = vuln_config.get(DALFOX) or {} custom_header = dalfox_config.get(CUSTOM_HEADER) or self.yaml_configuration.get(CUSTOM_HEADER) if custom_header: @@ -2593,11 +2623,11 @@ def dalfox_xss_scan(self, urls=[], ctx={}, description=None): if not vuln: continue - # after vulnerability scan is done, we need to run gpt if - # should_fetch_gpt_report and openapi key exists + # after vulnerability scan is done, we need to run llm if + # should_fetch_llm_report and openapi key exists - if should_fetch_gpt_report and OpenAiAPIKey.objects.all().first(): - logger.info('Getting Dalfox Vulnerability GPT Report') + if should_fetch_llm_report and OpenAiAPIKey.objects.all().first(): + logger.info('Getting Dalfox Vulnerability LLM Report') vulns = Vulnerability.objects.filter( scan_history__id=self.scan_id ).filter( @@ -2611,15 +2641,15 @@ def dalfox_xss_scan(self, urls=[], ctx={}, description=None): _vulns.append((vuln.name, vuln.http_url)) with concurrent.futures.ThreadPoolExecutor(max_workers=DEFAULT_THREADS) as executor: - future_to_gpt = {executor.submit(get_vulnerability_gpt_report, vuln): vuln for vuln in _vulns} + future_to_llm = {executor.submit(llm_vulnerability_report, vuln): vuln for vuln in _vulns} # Wait for all tasks to complete - for future in concurrent.futures.as_completed(future_to_gpt): - gpt = future_to_gpt[future] + for future in concurrent.futures.as_completed(future_to_llm): + vuln = future_to_llm[future] try: future.result() except Exception as e: - logger.error(f"Exception for Vulnerability {vuln}: {e}") + logger.error(f"Exception for Vulnerability {vuln[0]} - {vuln[1]}: {e}") # Display title and path return results @@ -2632,7 +2662,7 @@ def crlfuzz_scan(self, urls=[], ctx={}, description=None): description (str, optional): Task description shown in UI. """ vuln_config = self.yaml_configuration.get(VULNERABILITY_SCAN) or {} - should_fetch_gpt_report = vuln_config.get(FETCH_GPT_REPORT, DEFAULT_GET_GPT_REPORT) + should_fetch_llm_report = vuln_config.get(FETCH_LLM_REPORT, DEFAULT_GET_LLM_REPORT) custom_header = vuln_config.get(CUSTOM_HEADER) or self.yaml_configuration.get(CUSTOM_HEADER) if custom_header: custom_header = generate_header_param(custom_header, 'common') @@ -2719,11 +2749,11 @@ def crlfuzz_scan(self, urls=[], ctx={}, description=None): if not vuln: continue - # after vulnerability scan is done, we need to run gpt if - # should_fetch_gpt_report and openapi key exists + # after vulnerability scan is done, we need to run llm if + # should_fetch_llm_report and openapi key exists - if should_fetch_gpt_report and OpenAiAPIKey.objects.all().first(): - logger.info('Getting CRLFuzz Vulnerability GPT Report') + if should_fetch_llm_report and OpenAiAPIKey.objects.all().first(): + logger.info('Getting CRLFuzz Vulnerability LLM Report') vulns = Vulnerability.objects.filter( scan_history__id=self.scan_id ).filter( @@ -2737,15 +2767,15 @@ def crlfuzz_scan(self, urls=[], ctx={}, description=None): _vulns.append((vuln.name, vuln.http_url)) with concurrent.futures.ThreadPoolExecutor(max_workers=DEFAULT_THREADS) as executor: - future_to_gpt = {executor.submit(get_vulnerability_gpt_report, vuln): vuln for vuln in _vulns} + future_to_llm = {executor.submit(llm_vulnerability_report, vuln): vuln for vuln in _vulns} # Wait for all tasks to complete - for future in concurrent.futures.as_completed(future_to_gpt): - gpt = future_to_gpt[future] + for future in concurrent.futures.as_completed(future_to_llm): + vuln = future_to_llm[future] try: future.result() except Exception as e: - logger.error(f"Exception for Vulnerability {vuln}: {e}") + logger.error(f"Exception for Vulnerability {vuln[0]} - {vuln[1]}: {e}") # Display title and path return results @@ -4532,11 +4562,9 @@ def save_vulnerability(**vuln_data): vuln.save() # Save vuln reference - for url in references or []: - ref, created = VulnerabilityReference.objects.get_or_create(url=url) - if created: - vuln.references.add(ref) - vuln.save() + if references: + vuln.references = references + vuln.save() # Save subscan id in vuln object if subscan: @@ -4835,71 +4863,6 @@ def query_ip_history(domain): return get_domain_historical_ip_address(domain) - -@app.task(name='gpt_vulnerability_description', bind=False, queue='gpt_queue') -def gpt_vulnerability_description(vulnerability_id): - """Generate and store Vulnerability Description using GPT. - - Args: - vulnerability_id (Vulnerability Model ID): Vulnerability ID to fetch Description. - """ - logger.info('Getting GPT Vulnerability Description') - try: - lookup_vulnerability = Vulnerability.objects.get(id=vulnerability_id) - lookup_url = urlparse(lookup_vulnerability.http_url) - path = lookup_url.path - except Exception as e: - return { - 'status': False, - 'error': str(e) - } - - # check in db GPTVulnerabilityReport model if vulnerability description and path matches - stored = GPTVulnerabilityReport.objects.filter(url_path=path).filter(title=lookup_vulnerability.name).first() - if stored: - response = { - 'status': True, - 'description': stored.description, - 'impact': stored.impact, - 'remediation': stored.remediation, - 'references': [url.url for url in stored.references.all()] - } - else: - vulnerability_description = get_gpt_vuln_input_description( - lookup_vulnerability.name, - path - ) - # one can add more description here later - - gpt_generator = GPTVulnerabilityReportGenerator() - response = gpt_generator.get_vulnerability_description(vulnerability_description) - add_gpt_description_db( - lookup_vulnerability.name, - path, - response.get('description'), - response.get('impact'), - response.get('remediation'), - response.get('references', []) - ) - - # for all vulnerabilities with the same vulnerability name this description has to be stored. - # also the consition is that the url must contain a part of this. - - for vuln in Vulnerability.objects.filter(name=lookup_vulnerability.name, http_url__icontains=path): - vuln.description = response.get('description', vuln.description) - vuln.impact = response.get('impact') - vuln.remediation = response.get('remediation') - vuln.is_gpt_used = True - vuln.save() - - for url in response.get('references', []): - ref, created = VulnerabilityReference.objects.get_or_create(url=url) - vuln.references.add(ref) - vuln.save() - - return response - - @app.task(name='run_wafw00f', bind=False, queue='run_command_queue') def run_wafw00f(url): try: diff --git a/web/reNgine/urls.py b/web/reNgine/urls.py index c8fbb3715..661c60091 100644 --- a/web/reNgine/urls.py +++ b/web/reNgine/urls.py @@ -62,5 +62,6 @@ 'page_not_found/', page_not_found, name='page_not_found'), + path('ws/', include('api.ws_urls')), ] + static(settings.MEDIA_URL, document_root=settings.RENGINE_RESULTS) + \ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) diff --git a/web/reNgine/utilities.py b/web/reNgine/utilities.py index d985d2e00..a23558571 100644 --- a/web/reNgine/utilities.py +++ b/web/reNgine/utilities.py @@ -67,16 +67,6 @@ def format(self, record): record.__dict__.setdefault('task_id', '') return super().format(record) - -def get_gpt_vuln_input_description(title, path): - vulnerability_description = '' - vulnerability_description += f'Vulnerability Title: {title}' - # gpt gives concise vulnerability description when a vulnerable URL is provided - vulnerability_description += f'\nVulnerable URL: {path}' - - return vulnerability_description - - def replace_nulls(obj): if isinstance(obj, str): return obj.replace("\x00", "") diff --git a/web/recon_note/fixtures/recon_note.json b/web/recon_note/fixtures/recon_note.json index 0e9be820a..1892458d3 100644 --- a/web/recon_note/fixtures/recon_note.json +++ b/web/recon_note/fixtures/recon_note.json @@ -3,11 +3,37 @@ "model": "recon_note.todonote", "pk": 1, "fields": { - "title": "Check for vuln", - "description": "There's a lot", + "title": "SQLi", + "description": "SQLi", "scan_history": 1, - "subdomain": 24, + "subdomain": 88, "is_done": false, + "is_important": true, + "project": 1 + } +}, +{ + "model": "recon_note.todonote", + "pk": 2, + "fields": { + "title": "XSS", + "description": "", + "scan_history": 1, + "subdomain": 88, + "is_done": false, + "is_important": false, + "project": 1 + } +}, +{ + "model": "recon_note.todonote", + "pk": 3, + "fields": { + "title": "SSRF", + "description": "", + "scan_history": 1, + "subdomain": 88, + "is_done": true, "is_important": false, "project": 1 } diff --git a/web/recon_note/static/note/js/todo.js b/web/recon_note/static/note/js/todo.js index 174df53cd..077b28dc7 100644 --- a/web/recon_note/static/note/js/todo.js +++ b/web/recon_note/static/note/js/todo.js @@ -192,7 +192,7 @@ const dynamicBadgeNotification = function(setTodoCategoryCount) { } }; - // Mettre à jour les badges en fonction de la catégorie + // Update badges based on the category if (todoCategoryCount === 'allList' || todoCategoryCount === undefined) { updateBadge(getBadgeTodoAllListDiv, get_TodoListElementsCount); } diff --git a/web/scanEngine/fixtures/scanEngine.json b/web/scanEngine/fixtures/scanEngine.json index 030b7b148..753a79da6 100644 --- a/web/scanEngine/fixtures/scanEngine.json +++ b/web/scanEngine/fixtures/scanEngine.json @@ -4,8 +4,8 @@ "pk": 1, "fields": { "engine_name": "Full Scan", - "yaml_configuration": "subdomain_discovery: {\r\n 'uses_tools': ['subfinder', 'ctfr', 'sublist3r', 'tlsx', 'oneforall', 'netlas'],\r\n 'enable_http_crawl': true,\r\n 'threads': 30,\r\n 'timeout': 5,\r\n}\r\nhttp_crawl: {}\r\nport_scan: {\r\n 'enable_http_crawl': true,\r\n 'timeout': 5,\r\n # 'exclude_ports': [],\r\n # 'exclude_subdomains': [],\r\n 'ports': ['top-100'],\r\n 'rate_limit': 150,\r\n 'threads': 30,\r\n 'passive': false,\r\n # 'use_naabu_config': false,\r\n # 'enable_nmap': true,\r\n # 'nmap_cmd': '',\r\n # 'nmap_script': '',\r\n # 'nmap_script_args': ''\r\n}\r\nosint: {\r\n 'discover': [\r\n 'emails',\r\n 'metainfo',\r\n 'employees'\r\n ],\r\n 'dorks': [\r\n 'login_pages',\r\n 'admin_panels',\r\n 'dashboard_pages',\r\n 'stackoverflow',\r\n 'social_media',\r\n 'project_management',\r\n 'code_sharing',\r\n 'config_files',\r\n 'jenkins',\r\n 'wordpress_files',\r\n 'php_error',\r\n 'exposed_documents',\r\n 'db_files',\r\n 'git_exposed'\r\n ],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\ndir_file_fuzz: {\r\n 'auto_calibration': true,\r\n 'enable_http_crawl': true,\r\n 'rate_limit': 150,\r\n 'extensions': [],\r\n 'follow_redirect': false,\r\n 'max_time': 0,\r\n 'match_http_status': [200, 204],\r\n 'recursive_level': 0,\r\n 'stop_on_error': false,\r\n 'timeout': 5,\r\n 'threads': 30,\r\n 'wordlist_name': 'default', # fuzz-Bo0oM\r\n}\r\nfetch_url: {\r\n 'uses_tools': ['gospider', 'hakrawler', 'waybackurls', 'katana', 'gau'],\r\n 'remove_duplicate_endpoints': true,\r\n 'duplicate_fields': ['content_length', 'page_title'],\r\n 'follow_redirect': false,\r\n 'enable_http_crawl': true,\r\n 'gf_patterns': ['debug_logic', 'idor', 'interestingEXT', 'interestingparams', 'interestingsubs', 'lfi', 'rce', 'redirect', 'sqli', 'ssrf', 'ssti', 'xss'],\r\n 'ignore_file_extensions': ['png', 'jpg', 'jpeg', 'gif', 'mp4', 'mpeg', 'mp3'],\r\n 'threads': 30\r\n}\r\nvulnerability_scan: {\r\n 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n 'enable_http_crawl': true,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical']\r\n }\r\n}\r\nwaf_detection: {\r\n\r\n}\r\nscreenshot: {\r\n 'enable_http_crawl': true,\r\n 'intensity': 'normal',\r\n 'timeout': 10,\r\n 'threads': 40\r\n}\r\n\r\n# custom_header: \"Cookie: Test\"", - "default_engine": true + "yaml_configuration": "# Global vars for all tools\r\n#\r\n# Custom header - FFUF, Nuclei, Dalfox, CRL Fuzz, HTTPx, Fetch URL (Hakrawler, Katana, Gospider)\r\n# custom_header: {\r\n# 'Cookie':'Test',\r\n# 'User-Agent': 'Mozilla/5.0',\r\n# 'Custom-Header': 'My custom header'\r\n# }\r\n# 'user_agent': '' # Dalfox only\r\n# 'enable_http_crawl': true # All tools\r\n# 'timeout': 10 # Subdomain discovery, Screenshot, Port scan, FFUF, Nuclei \r\n# 'threads': 30 # All tools\r\n# 'rate_limit': 150 # Port scan, FFUF, Nuclei\r\n# 'intensity': 'normal' # Screenshot (grab only the root endpoints of each subdomain), Nuclei (reduce number of endpoints to scan), OSINT (not implemented yet)\r\n# 'retries': 1 # Nuclei\r\n\r\nsubdomain_discovery: {\r\n 'uses_tools': ['subfinder', 'ctfr', 'sublist3r', 'tlsx', 'oneforall', 'netlas'], # amass-passive, amass-active, All\r\n 'enable_http_crawl': true,\r\n 'threads': 30,\r\n 'timeout': 5,\r\n # 'use_subfinder_config': false,\r\n # 'use_amass_config': false,\r\n # 'amass_wordlist': 'deepmagic.com-prefixes-top50000'\r\n}\r\nhttp_crawl: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0'\r\n # },\r\n # 'threads': 30,\r\n # 'follow_redirect': false\r\n}\r\nport_scan: {\r\n 'enable_http_crawl': true,\r\n 'timeout': 5,\r\n # 'exclude_ports': [],\r\n # 'exclude_subdomains': [],\r\n 'ports': ['top-100'],\r\n 'rate_limit': 150,\r\n 'threads': 30,\r\n 'passive': false,\r\n # 'use_naabu_config': false,\r\n # 'enable_nmap': true,\r\n # 'nmap_cmd': '',\r\n # 'nmap_script': '',\r\n # 'nmap_script_args': ''\r\n}\r\nosint: {\r\n 'discover': [\r\n 'emails',\r\n 'metainfo',\r\n 'employees'\r\n ],\r\n 'dorks': [\r\n 'login_pages',\r\n 'admin_panels',\r\n 'dashboard_pages',\r\n 'stackoverflow',\r\n 'social_media',\r\n 'project_management',\r\n 'code_sharing',\r\n 'config_files',\r\n 'jenkins',\r\n 'wordpress_files',\r\n 'php_error',\r\n 'exposed_documents',\r\n 'db_files',\r\n 'git_exposed'\r\n ],\r\n # 'custom_dorks': [],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\ndir_file_fuzz: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0',\r\n # 'Custom-Header': 'My custom header'\r\n # },\r\n 'auto_calibration': true,\r\n 'enable_http_crawl': true,\r\n 'rate_limit': 150,\r\n 'extensions': [],\r\n # 'extensions': ['html', 'php','git','yaml','conf','cnf','config','gz','env','log','db','mysql','bak','asp','aspx','txt','conf','sql','json','yml','pdf'],\r\n 'follow_redirect': false,\r\n 'max_time': 0,\r\n 'match_http_status': [200, 204],\r\n 'recursive_level': 0,\r\n 'stop_on_error': false,\r\n 'timeout': 5,\r\n 'threads': 30,\r\n 'wordlist_name': 'default', # fuzz-Bo0oM\r\n}\r\nfetch_url: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0',\r\n # 'Custom-Header': 'My custom header'\r\n # },\r\n 'uses_tools': ['gospider', 'hakrawler', 'waybackurls', 'katana', 'gau'],\r\n 'remove_duplicate_endpoints': true,\r\n 'duplicate_fields': ['content_length', 'page_title'],\r\n 'follow_redirect': false,\r\n 'enable_http_crawl': true,\r\n 'gf_patterns': ['debug_logic', 'idor', 'interestingEXT', 'interestingparams', 'interestingsubs', 'lfi', 'rce', 'redirect', 'sqli', 'ssrf', 'ssti', 'xss'],\r\n 'ignore_file_extensions': ['png', 'jpg', 'jpeg', 'gif', 'mp4', 'mpeg', 'mp3'],\r\n 'threads': 30,\r\n # 'exclude_subdomains': false\r\n}\r\nvulnerability_scan: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0',\r\n # 'Custom-Header': 'My custom header'\r\n # },\r\n 'run_nuclei': true,\r\n 'run_dalfox': false,\r\n 'run_crlfuzz': false,\r\n 'run_s3scanner': false,\r\n 'enable_http_crawl': true,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical'],\r\n # 'tags': [], # Nuclei tags (https://github.com/projectdiscovery/nuclei-templates)\r\n # 'templates': [], # Nuclei templates (https://github.com/projectdiscovery/nuclei-templates)\r\n # 'custom_templates': [] # Nuclei custom templates uploaded in reNgine\r\n }\r\n}\r\nwaf_detection: {\r\n 'enable_http_crawl': true\r\n}\r\nscreenshot: {\r\n 'enable_http_crawl': true,\r\n 'intensity': 'normal',\r\n 'timeout': 10,\r\n 'threads': 40\r\n}", + "default_engine": null } }, { @@ -31,7 +31,7 @@ "pk": 4, "fields": { "engine_name": "Vulnerability Scan", - "yaml_configuration": "subdomain_discovery: {\r\n 'uses_tools': ['subfinder', 'ctfr', 'sublist3r', 'tlsx', 'oneforall', 'netlas'],\r\n 'enable_http_crawl': true,\r\n 'threads': 30,\r\n 'timeout': 5,\r\n}\r\nhttp_crawl: {}\r\nosint: {\r\n 'discover': [\r\n 'emails',\r\n 'metainfo',\r\n 'employees'\r\n ],\r\n 'dorks': [\r\n 'login_pages',\r\n 'admin_panels',\r\n 'dashboard_pages',\r\n 'stackoverflow',\r\n 'social_media',\r\n 'project_management',\r\n 'code_sharing',\r\n 'config_files',\r\n 'jenkins',\r\n 'wordpress_files',\r\n 'php_error',\r\n 'exposed_documents',\r\n 'db_files',\r\n 'git_exposed'\r\n ],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\nvulnerability_scan: {\r\n 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n 'enable_http_crawl': true,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical']\r\n }\r\n}", + "yaml_configuration": "subdomain_discovery: {\r\n 'uses_tools': ['subfinder', 'ctfr', 'sublist3r', 'tlsx', 'oneforall', 'netlas'],\r\n 'enable_http_crawl': true,\r\n 'threads': 30,\r\n 'timeout': 5,\r\n}\r\nhttp_crawl: {}\r\nosint: {\r\n 'discover': [\r\n 'emails',\r\n 'metainfo',\r\n 'employees'\r\n ],\r\n 'dorks': [\r\n 'login_pages',\r\n 'admin_panels',\r\n 'dashboard_pages',\r\n 'stackoverflow',\r\n 'social_media',\r\n 'project_management',\r\n 'code_sharing',\r\n 'config_files',\r\n 'jenkins',\r\n 'wordpress_files',\r\n 'php_error',\r\n 'exposed_documents',\r\n 'db_files',\r\n 'git_exposed'\r\n ],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\nvulnerability_scan: {\r\n 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n 'enable_http_crawl': true,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_llm_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical']\r\n }\r\n}", "default_engine": true } }, @@ -49,19 +49,10 @@ "pk": 6, "fields": { "engine_name": "reNgine Recommended", - "yaml_configuration": "subdomain_discovery: {\r\n 'uses_tools': ['subfinder', 'ctfr', 'sublist3r', 'tlsx', 'oneforall', 'netlas'],\r\n 'enable_http_crawl': true,\r\n 'threads': 30,\r\n 'timeout': 5,\r\n}\r\nhttp_crawl: {}\r\nosint: {\r\n 'discover': [\r\n 'emails',\r\n 'metainfo'\r\n ],\r\n 'dorks': [\r\n 'login_pages',\r\n 'admin_panels',\r\n 'dashboard_pages',\r\n 'config_files',\r\n 'exposed_documents',\r\n ],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\nvulnerability_scan: {\r\n 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n 'enable_http_crawl': false,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['low', 'medium', 'high', 'critical']\r\n }\r\n}", + "yaml_configuration": "subdomain_discovery: {\r\n 'uses_tools': ['subfinder', 'ctfr', 'sublist3r', 'tlsx', 'oneforall', 'netlas'],\r\n 'enable_http_crawl': true,\r\n 'threads': 30,\r\n 'timeout': 5,\r\n}\r\nhttp_crawl: {}\r\nosint: {\r\n 'discover': [\r\n 'emails',\r\n 'metainfo'\r\n ],\r\n 'dorks': [\r\n 'login_pages',\r\n 'admin_panels',\r\n 'dashboard_pages',\r\n 'config_files',\r\n 'exposed_documents',\r\n ],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\nvulnerability_scan: {\r\n 'run_nuclei': true,\r\n 'run_dalfox': true,\r\n 'run_crlfuzz': true,\r\n 'enable_http_crawl': false,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_llm_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['low', 'medium', 'high', 'critical']\r\n }\r\n}", "default_engine": true } }, -{ - "model": "scanEngine.enginetype", - "pk": 7, - "fields": { - "engine_name": "Full (perso)", - "yaml_configuration": "# Global vars for all tools\r\n#\r\n# Custom header - FFUF, Nuclei, Dalfox, CRL Fuzz, HTTPx, Fetch URL (Hakrawler, Katana, Gospider)\r\n# custom_header: {\r\n# 'Cookie':'Test',\r\n# 'User-Agent': 'Mozilla/5.0',\r\n# 'Custom-Header': 'My custom header'\r\n# }\r\n# 'user_agent': '' # Dalfox only\r\n# 'enable_http_crawl': true # All tools\r\n# 'timeout': 10 # Subdomain discovery, Screenshot, Port scan, FFUF, Nuclei \r\n# 'threads': 30 # All tools\r\n# 'rate_limit': 150 # Port scan, FFUF, Nuclei\r\n# 'intensity': 'normal' # Screenshot (grab only the root endpoints of each subdomain), Nuclei (reduce number of endpoints to scan), OSINT (not implemented yet)\r\n# 'retries': 1 # Nuclei\r\n\r\nsubdomain_discovery: {\r\n 'uses_tools': ['subfinder', 'ctfr', 'sublist3r', 'tlsx', 'oneforall', 'netlas'], # amass-passive, amass-active, All\r\n 'enable_http_crawl': true,\r\n 'threads': 30,\r\n 'timeout': 5,\r\n # 'use_subfinder_config': false,\r\n # 'use_amass_config': false,\r\n # 'amass_wordlist': 'deepmagic.com-prefixes-top50000'\r\n}\r\nhttp_crawl: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0'\r\n # },\r\n # 'threads': 30,\r\n # 'follow_redirect': false\r\n}\r\nport_scan: {\r\n 'enable_http_crawl': true,\r\n 'timeout': 5,\r\n # 'exclude_ports': [],\r\n # 'exclude_subdomains': [],\r\n 'ports': ['top-100'],\r\n 'rate_limit': 150,\r\n 'threads': 30,\r\n 'passive': false,\r\n # 'use_naabu_config': false,\r\n # 'enable_nmap': true,\r\n # 'nmap_cmd': '',\r\n # 'nmap_script': '',\r\n # 'nmap_script_args': ''\r\n}\r\nosint: {\r\n 'discover': [\r\n 'emails',\r\n 'metainfo',\r\n 'employees'\r\n ],\r\n 'dorks': [\r\n 'login_pages',\r\n 'admin_panels',\r\n 'dashboard_pages',\r\n 'stackoverflow',\r\n 'social_media',\r\n 'project_management',\r\n 'code_sharing',\r\n 'config_files',\r\n 'jenkins',\r\n 'wordpress_files',\r\n 'php_error',\r\n 'exposed_documents',\r\n 'db_files',\r\n 'git_exposed'\r\n ],\r\n # 'custom_dorks': [],\r\n 'intensity': 'normal',\r\n 'documents_limit': 50\r\n}\r\ndir_file_fuzz: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0',\r\n # 'Custom-Header': 'My custom header'\r\n # },\r\n 'auto_calibration': true,\r\n 'enable_http_crawl': true,\r\n 'rate_limit': 150,\r\n 'extensions': [],\r\n 'follow_redirect': false,\r\n 'max_time': 0,\r\n 'match_http_status': [200, 204],\r\n 'recursive_level': 0,\r\n 'stop_on_error': false,\r\n 'timeout': 5,\r\n 'threads': 30,\r\n 'wordlist_name': 'default', # fuzz-Bo0oM,\r\n}\r\nfetch_url: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0',\r\n # 'Custom-Header': 'My custom header'\r\n # },\r\n 'uses_tools': ['gospider', 'hakrawler', 'waybackurls', 'katana', 'gau'],\r\n 'remove_duplicate_endpoints': true,\r\n 'duplicate_fields': ['content_length', 'page_title'],\r\n 'follow_redirect': false,\r\n 'enable_http_crawl': true,\r\n 'gf_patterns': ['debug_logic', 'idor', 'interestingEXT', 'interestingparams', 'interestingsubs', 'lfi', 'rce', 'redirect', 'sqli', 'ssrf', 'ssti', 'xss'],\r\n 'ignore_file_extensions': ['png', 'jpg', 'jpeg', 'gif', 'mp4', 'mpeg', 'mp3'],\r\n 'threads': 30,\r\n # 'exclude_subdomains': false\r\n}\r\nvulnerability_scan: {\r\n # 'custom_header': {\r\n # 'Cookie':'Test',\r\n # 'User-Agent': 'Mozilla/5.0',\r\n # 'Custom-Header': 'My custom header'\r\n # },\r\n 'run_nuclei': true,\r\n 'run_dalfox': false,\r\n 'run_crlfuzz': false,\r\n 'run_s3scanner': false,\r\n 'enable_http_crawl': true,\r\n 'concurrency': 50,\r\n 'intensity': 'normal',\r\n 'rate_limit': 150,\r\n 'retries': 1,\r\n 'timeout': 5,\r\n 'fetch_gpt_report': true,\r\n 'nuclei': {\r\n 'use_nuclei_config': false,\r\n 'severities': ['unknown', 'info', 'low', 'medium', 'high', 'critical'],\r\n # 'tags': [], # Nuclei tags (https://github.com/projectdiscovery/nuclei-templates)\r\n # 'templates': [], # Nuclei templates (https://github.com/projectdiscovery/nuclei-templates)\r\n # 'custom_templates': [] # Nuclei custom templates uploaded in reNgine\r\n }\r\n}\r\nwaf_detection: {\r\n 'enable_http_crawl': true\r\n}\r\nscreenshot: {\r\n 'enable_http_crawl': true,\r\n 'intensity': 'normal',\r\n 'timeout': 10,\r\n 'threads': 40\r\n}", - "default_engine": false - } -}, { "model": "scanEngine.interestinglookupmodel", "pk": 1, @@ -73,6 +64,17 @@ "condition_200_http_lookup": false } }, +{ + "model": "scanEngine.interestinglookupmodel", + "pk": 2, + "fields": { + "keywords": "sql, ftp", + "custom_type": true, + "title_lookup": true, + "url_lookup": true, + "condition_200_http_lookup": true + } +}, { "model": "scanEngine.installedexternaltool", "pk": 1, diff --git a/web/scanEngine/templates/scanEngine/settings/llm_toolkit.html b/web/scanEngine/templates/scanEngine/settings/llm_toolkit.html index cf5434590..6b40f913c 100644 --- a/web/scanEngine/templates/scanEngine/settings/llm_toolkit.html +++ b/web/scanEngine/templates/scanEngine/settings/llm_toolkit.html @@ -14,7 +14,7 @@ {% endblock breadcrumb_title %} {% block page_title %} -LLM Toolkit (Beta) +LLM Toolkit {% endblock page_title %} {% block main_content %} @@ -44,43 +44,56 @@
    {{installed_models|length}} available Models
    Warning: GPT model is currently selected and requires API key to be set. Please set the API key in the API Vault.
{% endif %} -
+
{% for model in installed_models %} -
-
-
- + `; - content += `
-
    `; + let references = response.result.references; - for (var reference in response.result.references) { - content += `
  • ${response.result.references[reference]}
  • `; + // Check if references is a string representation of an array + if (typeof references === 'string' && references.startsWith('[') && references.endsWith(']')) { + // Remove the brackets and split by comma + references = references.slice(1, -1).split(',').map(ref => ref.trim().replace(/^'|'$/g, '')); } - - content += `
`; - - + + // Generate HTML content + let referencesContent = ''; + if (Array.isArray(references)) { + referencesContent = '
    '; + references.forEach(ref => { + referencesContent += `
  • ${ref}
  • `; + }); + referencesContent += '
'; + } else { + referencesContent = `

${references}

`; + } + + content += `
+ ${referencesContent} +
`; + content += `
    `; @@ -2749,10 +2766,10 @@ function validURL(str) { // checks for valid http url var pattern = new RegExp('^(https?:\\/\\/)?'+ // protocol '((([a-z\\d]([a-z\\d-]*[a-z\\d])*)\\.)+[a-z]{2,}|'+ // domain name - '((\\d{1,3}\\.){3}\\d{1,3}))'+ // OR ip (v4) address - '(\\:\\d+)?(\\/[-a-z\\d%_.~+]*)*'+ // port and path - '(\\?[;&a-z\\d%_.~+=-]*)?'+ // query string - '(\\#[-a-z\\d_]*)?$','i'); // fragment locator + '((\\\d{1,3}\\.){3}\\d{1,3}))'+ // OR ip (v4) address + '(\\\:\\d+)?(\\/[-a-z\\d%_.~+]*)*'+ // port and path + '(\\\?[;&a-z\\d%_.~+=-]*)?'+ // query string + '(\\\#[-a-z\\d_]*)?$','i'); // fragment locator return !!pattern.test(str); } @@ -2866,7 +2883,7 @@ function render_vuln_offcanvas(vuln){ body += `

    Severity: ${vuln.severity}
    Type: ${vuln.type.toUpperCase()}
    Source: ${vuln.source.toUpperCase()}

    `; if (vuln.description) { - description = vuln.description.replace(new RegExp('\r?\n','g'), '
    '); + // Sanitize with DOMPurify before inserting into the DOM body += ``; } @@ -2894,7 +2911,7 @@ function render_vuln_offcanvas(vuln){
    -

    ${impact}

    +

    ${DOMPurify.sanitize(impact)}

`; } @@ -2911,7 +2928,7 @@ function render_vuln_offcanvas(vuln){
-

${remediation}

+

${DOMPurify.sanitize(remediation)}

`; } @@ -3095,31 +3112,41 @@ function render_vuln_offcanvas(vuln){
`; - if (vuln.references.length) { - body += `
-
- - References - -
-
-
    `; + let references = vuln.references; - vuln.references.forEach(reference => { - body += `
  • ${htmlEncode(reference.url)}
  • `; - }); + // Check if references is a string representation of an array + if (typeof references === 'string' && references.startsWith('[') && references.endsWith(']')) { + // Remove the brackets and split by comma + references = references.slice(1, -1).split(',').map(ref => ref.trim().replace(/^'|'$/g, '')); + } - body += ` -
-
-
`; + // Generate HTML content + let referencesContent = ''; + if (Array.isArray(references)) { + referencesContent = '
    '; + references.forEach(ref => { + referencesContent += `
  • ${ref}
  • `; + }); + referencesContent += '
'; + } else { + referencesContent = `

${references}

`; } - if (vuln.is_gpt_used) { - body += `(GPT was used to generate vulnerability details.)`; + body += `
+
+ + References + +
+
+ ${referencesContent} +
+
`; + + if (vuln.is_llm_used) { + body += `(LLM was used to generate vulnerability details.)`; } @@ -3143,7 +3170,7 @@ function showSwalLoader(title, text){ }); } -async function send_gpt_api_request(endpoint_url, vuln_id){ +async function send_llm_api_request(endpoint_url, vuln_id){ const api = `${endpoint_url}?format=json&id=${vuln_id}`; try { const response = await fetch(api, { @@ -3156,23 +3183,22 @@ async function send_gpt_api_request(endpoint_url, vuln_id){ if (!response.ok) { throw new Error('Request failed'); } - const data = await response.json(); - return data; + return await response.json(); } catch (error) { throw new Error('Request failed'); } } -async function fetch_gpt_vuln_details(endpoint_url, id, title) { +async function fetch_llm_vuln_details(endpoint_url, id, title) { var loader_title = "Loading..."; - var text = 'Please wait while the GPT is generating vulnerability description.' + var text = 'Please wait while the LLM is generating vulnerability description.'; try { showSwalLoader(loader_title, text); - const data = await send_gpt_api_request(endpoint_url, id); + const data = await send_llm_api_request(endpoint_url, id); Swal.close(); if (data.status) { - render_gpt_vuln_modal(data, title); + render_llm_vuln_modal(data, title); } else{ Swal.close(); @@ -3194,7 +3220,10 @@ async function fetch_gpt_vuln_details(endpoint_url, id, title) { } -function render_gpt_vuln_modal(data, title){ +function render_llm_vuln_modal(data, title){ + // Change modal size to xl + $('#modal_dialog .modal-dialog').removeClass('modal-lg').addClass('modal-xl'); + $('#modal_dialog .modal-title').empty(); $('#modal_dialog .modal-text').empty(); $('#modal_dialog .modal-footer').empty(); @@ -3208,16 +3237,13 @@ function render_gpt_vuln_modal(data, title){

Remediation

${data.remediation}

References

-

    +

    ${data.references}

    `; - data.references.forEach(reference => { - modal_content += `
  • ${reference}
  • `; - }); - - modal_content += '

'; - - $('#modal_dialog .modal-text').append(modal_content); + // Sanitize with DOMPurify before inserting into the DOM + $('#modal_dialog .modal-text').append( + DOMPurify.sanitize(modal_content) + ); $('#modal_dialog').modal('show'); } @@ -3250,60 +3276,283 @@ function endpoint_datatable_col_visibility(endpoint_table){ } -async function send_gpt__attack_surface_api_request(endpoint_url, subdomain_id){ - const api = `${endpoint_url}?format=json&subdomain_id=${subdomain_id}`; - try { - const response = await fetch(api, { - method: 'GET', - credentials: "same-origin", - headers: { - "X-CSRFToken": getCookie("csrftoken") +async function send_llm__attack_surface_api_request(endpoint_url, id, force_regenerate = false, check_only = false, llm_model = null) { + const params = new URLSearchParams({ + subdomain_id: id, + force_regenerate: force_regenerate, + check_only: check_only + }); + + // Only add llm_model if it's not null + if (llm_model) { + params.append('llm_model', llm_model); + } + + const response = await fetch(`${endpoint_url}?${params}`); + return await response.json(); +} + +async function regenerateAttackSurface(endpoint_url, id) { + try { + // Show model selection dialog with force_regenerate flag + await showModelSelectionDialog(endpoint_url, id, true); + } catch (error) { + console.error(error); + Swal.fire({ + icon: 'error', + title: 'Error', + text: 'Something went wrong while regenerating the analysis.', + }); + } +} + +async function show_attack_surface_modal(endpoint_url, id) { + try { + // First check if we have cached results without triggering analysis + const initialResponse = await send_llm__attack_surface_api_request(endpoint_url, id, false, true); + + if (initialResponse.status && initialResponse.description) { + showAttackSurfaceModal(initialResponse, endpoint_url, id); + return; + } + + // If no cached results, show model selection + await showModelSelectionDialog(endpoint_url, id); + } catch (error) { + console.error(error); + Swal.fire({ + icon: 'error', + title: 'Error', + text: 'Something went wrong!', + }); + } +} + +async function showModelSelectionDialog(endpoint_url, id, force_regenerate = false) { + try { + // Fetch models from the unified endpoint that combines GPT and Ollama models + const response = await fetch('/api/tools/llm_models'); + const data = await response.json(); + + if (!data.status) { + throw new Error(data.error || 'Failed to fetch models'); + } + + // Change modal size to xl + $('#modal_dialog .modal-dialog').removeClass('modal-lg').addClass('modal-xl'); + + // Keep all the existing model selection code + window.generateAttackSurface = async () => { + const selectedModel = $('input[name="llm_model"]:checked').val(); + if (!selectedModel) { + Swal.fire({ + title: 'Error', + text: 'Please select a model', + icon: 'error' + }); + return; + } + + try { + // Update selected model in database first + const encoded_model = encodeURIComponent(selectedModel); + const updateResponse = await fetch(`/api/tool/ollama/${encoded_model}/`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'X-CSRFToken': getCookie('csrftoken') + }, + body: JSON.stringify({ model: selectedModel }) + }); + + const updateData = await updateResponse.json(); + if (!updateData.status) { + throw new Error('Failed to update selected model'); } - }); - if (!response.ok) { - throw new Error('Request failed'); - } - const data = await response.json(); - return data; - } catch (error) { - throw new Error('Request failed'); - } -} - - -async function show_attack_surface_modal(endpoint_url, id){ - var loader_title = "Loading..."; - var text = 'Please wait while the GPT is generating attack surface.' - try { - showSwalLoader(loader_title, text); - const data = await send_gpt__attack_surface_api_request(endpoint_url,id); - Swal.close(); - if (data.status) { - $('#modal_dialog .modal-title').html(`Attack Surface Suggestion for ${data.subdomain_name} (BETA)`); - $('#modal_dialog .modal-text').empty(); - $('#modal_dialog .modal-text').append(data.description.replace(new RegExp('\r?\n','g'), '
')); - $('#modal_dialog').modal('show'); - } - else{ - Swal.close(); - Swal.fire({ - icon: 'error', - title: 'Oops...', - text: data.error, - }); - } - } catch (error) { - console.error(error); - Swal.close(); - Swal.fire({ - icon: 'error', - title: 'Oops...', - text: 'Something went wrong!', - }); - } + + // Then proceed with attack surface analysis + var loader_title = "Loading..."; + var text = 'Please wait while the LLM is generating attack surface.'; + showSwalLoader(loader_title, text); + const data = await send_llm__attack_surface_api_request(endpoint_url, id, force_regenerate, false, selectedModel); + Swal.close(); + + if (data.status) { + showAttackSurfaceModal(data, endpoint_url, id); + } else { + Swal.fire({ + icon: 'error', + title: 'Oops...', + text: data.error, + }); + } + } catch (error) { + console.error(error); + Swal.close(); + Swal.fire({ + icon: 'error', + title: 'Oops...', + text: 'Something went wrong!', + }); + } + }; + + // Continue with existing model selection UI code... + const allModels = data.models; + const selectedModel = data.selected_model; + + let modelOptions = ''; + allModels.forEach(model => { + const modelName = model.name; + const capabilities = model.capabilities || {}; + const isLocal = model.is_local || false; + + modelOptions += ` +
+
+
+
+ +
+ ${modelName} + ${modelName === selectedModel ? 'Selected' : ''} + +
+

${!isLocal ? 'Remote Model - API Key Required' : 'Locally installed model'}

+

+ + + ${isLocal ? 'Local model' : 'OpenAI'} + + ${model.details ? ` + + + ${model.details.family} + +
+ + + ${model.details.parameter_size} Parameters + + ` : ''} +
+
+ + + Best for: +

    + ${capabilities.best_for ? capabilities.best_for.map(cap => + `
  • ${cap}
  • ` + ).join('') : '
  • General analysis
  • '} +
+ +

+
+
+
+
`; + }); + + $('#modal_dialog .modal-title').html('Select LLM Model for Attack Surface Analysis'); + $('#modal_dialog .modal-text').empty(); + $('#modal_dialog .modal-text').append(` +
+

Select the LLM model to use:

+ ${modelOptions} +
+
+ +
+ `); + + $('#modal_dialog').modal('show'); + } catch (error) { + console.error(error); + Swal.fire({ + icon: 'error', + title: 'Error', + text: 'Unable to fetch LLM models. Please check configuration.', + footer: 'Configure LLM models' + }); + } +} + +async function deleteAttackSurfaceAnalysis(endpoint_url, id) { + try { + const result = await Swal.fire({ + title: 'Delete Analysis?', + text: "This will permanently delete the current attack surface analysis. This action cannot be undone.", + icon: 'warning', + showCancelButton: true, + confirmButtonColor: '#d33', + cancelButtonColor: '#3085d6', + confirmButtonText: 'Yes, delete it!', + cancelButtonText: 'Cancel' + }); + + if (result.isConfirmed) { + showSwalLoader("Deleting...", "Please wait while the analysis is being deleted."); + + const response = await fetch(`${endpoint_url}?subdomain_id=${id}`, { + method: 'DELETE', + headers: { + 'X-CSRFToken': getCookie('csrftoken') + } + }); + + const data = await response.json(); + Swal.close(); + + if (data.status) { + Swal.fire({ + icon: 'success', + title: 'Deleted!', + text: 'The analysis has been deleted successfully.', + showConfirmButton: false, + timer: 1500 + }); + $('#modal_dialog').modal('hide'); + } else { + throw new Error(data.error || 'Failed to delete analysis'); + } + } + } catch (error) { + console.error(error); + Swal.fire({ + icon: 'error', + title: 'Error', + text: error.message || 'Something went wrong while deleting the analysis!' + }); + } +} + +function showAttackSurfaceModal(data, endpoint_url, id) { + $('#modal_dialog .modal-dialog').removeClass('modal-lg').addClass('modal-xl'); + $('#modal_dialog .modal-title').html(`Attack Surface Suggestion for ${data.subdomain_name}`); + $('#modal_dialog .modal-text').empty(); + $('#modal_dialog .modal-text').append( + DOMPurify.sanitize(data.description) + + `
+
+ + +
+
` + ); + $('#modal_dialog').modal('show'); } - function convertToCamelCase(inputString) { // Converts camel case string to title // Split the input string by underscores @@ -3328,3 +3577,80 @@ function handleHashInUrl(){ } } } + +function showLLMModelSelectionModal(callback) { + $('#modal_dialog .modal-title').html('Select LLM Model'); + $('#modal_dialog .modal-text').empty(); + + // Get available models + fetch('/api/tool/ollama/') + .then(response => response.json()) + .then(data => { + const models = data.models; + const selectedModel = data.selected_model; + + let modelOptions = ''; + models.forEach(model => { + modelOptions += ` +
+ + +
`; + }); + + $('#modal_dialog .modal-text').append(` +
+

Select the LLM model to use for vulnerability analysis:

+ ${modelOptions} +
+
+ +
+ `); + + $('#modal_dialog').modal('show'); + }); +} + +function selectLLMModel() { + const selectedModel = $('input[name="llm_model"]:checked').val(); + if (!selectedModel) { + Swal.fire({ + title: 'Error', + text: 'Please select a model', + icon: 'error' + }); + return; + } + + // Update selected model in database + const encoded_model = encodeURIComponent(selectedModel); + fetch(`/api/tool/ollama/${encoded_model}/`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'X-CSRFToken': getCookie('csrftoken') + }, + body: JSON.stringify({ model: selectedModel }) + }) + .then(response => response.json()) + .then(data => { + if (data.status) { + $('#modal_dialog').modal('hide'); + // Continue with scan + startScan(); + } else { + Swal.fire({ + title: 'Error', + text: 'Unable to set selected model', + icon: 'error' + }); + } + }); +} diff --git a/web/static/custom/vuln_datatables.js b/web/static/custom/vuln_datatables.js index 10f9ec4c3..de06abd32 100644 --- a/web/static/custom/vuln_datatables.js +++ b/web/static/custom/vuln_datatables.js @@ -33,7 +33,7 @@ const vuln_datatable_columns = [ {'data': 'template_id'}, {'data': 'impact'}, {'data': 'remediation'}, - {'data': 'is_gpt_used'}, + {'data': 'is_llm_used'}, ]; const vuln_datatable_page_length = 50; diff --git a/web/targetApp/fixtures/targetApp.json b/web/targetApp/fixtures/targetApp.json index 0a23be7c3..0db1fffe2 100644 --- a/web/targetApp/fixtures/targetApp.json +++ b/web/targetApp/fixtures/targetApp.json @@ -1,507 +1,4 @@ [ -{ - "model": "targetApp.historicalip", - "pk": 1, - "fields": { - "ip": "44.228.249.3", - "location": "Boardman - United States", - "owner": "AMAZON-02", - "last_seen": "AMAZON-02" - } -}, -{ - "model": "targetApp.historicalip", - "pk": 2, - "fields": { - "ip": "18.192.172.30", - "location": "Frankfurt am Main - Germany", - "owner": "AMAZON-02", - "last_seen": "AMAZON-02" - } -}, -{ - "model": "targetApp.historicalip", - "pk": 3, - "fields": { - "ip": "176.28.50.165", - "location": "Strasbourg - France", - "owner": "Host Europe GmbH", - "last_seen": "Host Europe GmbH" - } -}, -{ - "model": "targetApp.historicalip", - "pk": 4, - "fields": { - "ip": "50.116.82.164", - "location": "United States", - "owner": "UNIFIEDLAYER-AS-1", - "last_seen": "UNIFIEDLAYER-AS-1" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 1, - "fields": { - "name": "2xax.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 2, - "fields": { - "name": "accunetix.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 3, - "fields": { - "name": "acunetix.asia" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 4, - "fields": { - "name": "acunetix.at" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 5, - "fields": { - "name": "acunetix.biz" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 6, - "fields": { - "name": "acunetix.co" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 7, - "fields": { - "name": "acunetix.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 8, - "fields": { - "name": "acunetix.in" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 9, - "fields": { - "name": "acunetix.info" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 10, - "fields": { - "name": "acunetix.jp" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 11, - "fields": { - "name": "acunetix.net" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 12, - "fields": { - "name": "acunetix.org" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 13, - "fields": { - "name": "acunetix.tw" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 14, - "fields": { - "name": "bxss.me" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 15, - "fields": { - "name": "free-security-audit.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 16, - "fields": { - "name": "free-security-scan.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 17, - "fields": { - "name": "mbsa-reporter.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 18, - "fields": { - "name": "mbsareporter.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 19, - "fields": { - "name": "networkdefender.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 20, - "fields": { - "name": "networkdefender.net" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 21, - "fields": { - "name": "security-analyzer-reporter.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 22, - "fields": { - "name": "sitesecurityaudit.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 23, - "fields": { - "name": "sitesecuritymonitor.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 24, - "fields": { - "name": "sitesecuritymonitor.net" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 25, - "fields": { - "name": "sql-injection-audit.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 26, - "fields": { - "name": "sql-injection-scan.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 27, - "fields": { - "name": "sqlinjectionscanner.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 28, - "fields": { - "name": "vulnweb.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 29, - "fields": { - "name": "web-security-audit.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 30, - "fields": { - "name": "web-site-defender.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 31, - "fields": { - "name": "web-sitedefender.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 32, - "fields": { - "name": "website-audit.org" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 33, - "fields": { - "name": "website-defender.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 34, - "fields": { - "name": "websitedefender.asia" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 35, - "fields": { - "name": "websitedefender.at" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 36, - "fields": { - "name": "websitedefender.co" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 37, - "fields": { - "name": "websitedefender.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 38, - "fields": { - "name": "websitesecuritycenter.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 39, - "fields": { - "name": "websitesecuritymonitor.com" - } -}, -{ - "model": "targetApp.relateddomain", - "pk": 40, - "fields": { - "name": "webvulnerabilityscanner.com" - } -}, -{ - "model": "targetApp.registrar", - "pk": 1, - "fields": { - "name": "Eurodns S.A.", - "phone": "+352.27220150", - "email": "legalservices@eurodns.com", - "url": null - } -}, -{ - "model": "targetApp.domainregistration", - "pk": 1, - "fields": { - "name": "Acunetix Acunetix", - "organization": "Acunetix Ltd", - "address": "3rd Floor,, J&C Building,, Road Town", - "city": "Tortola", - "state": null, - "zip_code": "VG1110", - "country": "VG", - "email": "administrator@acunetix.com", - "phone": "+1.23456789", - "fax": null, - "id_str": null - } -}, -{ - "model": "targetApp.domainregistration", - "pk": 2, - "fields": { - "name": null, - "organization": null, - "address": null, - "city": null, - "state": null, - "zip_code": null, - "country": null, - "email": null, - "phone": null, - "fax": null, - "id_str": null - } -}, -{ - "model": "targetApp.whoisstatus", - "pk": 1, - "fields": { - "name": "clienttransferprohibited" - } -}, -{ - "model": "targetApp.nameserver", - "pk": 1, - "fields": { - "name": "ns1.eurodns.com" - } -}, -{ - "model": "targetApp.nameserver", - "pk": 2, - "fields": { - "name": "ns2.eurodns.com" - } -}, -{ - "model": "targetApp.nameserver", - "pk": 3, - "fields": { - "name": "ns3.eurodns.com" - } -}, -{ - "model": "targetApp.nameserver", - "pk": 4, - "fields": { - "name": "ns4.eurodns.com" - } -}, -{ - "model": "targetApp.dnsrecord", - "pk": 1, - "fields": { - "name": "44.228.249.3", - "type": "a" - } -}, -{ - "model": "targetApp.dnsrecord", - "pk": 2, - "fields": { - "name": "v=spf1 -all", - "type": "txt" - } -}, -{ - "model": "targetApp.dnsrecord", - "pk": 3, - "fields": { - "name": "google-site-verification=4lqorv-lti-d4gpxtbeqwmfnwff7uaazqc9gzvhukbw", - "type": "txt" - } -}, -{ - "model": "targetApp.domaininfo", - "pk": 1, - "fields": { - "dnssec": false, - "created": "2010-06-14T00:00:00Z", - "updated": "2023-05-26T10:04:20Z", - "expires": "2025-06-13T00:00:00Z", - "geolocation_iso": "VG", - "registrar": 1, - "registrant": 1, - "admin": 2, - "tech": 2, - "whois_server": "whois.eurodns.com", - "status": [ - 1 - ], - "name_servers": [ - 1, - 2, - 3, - 4 - ], - "dns_records": [ - 1, - 2, - 3 - ], - "related_domains": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 38, - 39, - 40 - ], - "related_tlds": [], - "similar_domains": [], - "historical_ips": [ - 1, - 2, - 3, - 4 - ] - } -}, { "model": "targetApp.domain", "pk": 1, @@ -510,10 +7,10 @@ "h1_team_handle": "", "ip_address_cidr": null, "description": "", - "insert_date": "2024-09-03T21:23:39.098Z", - "start_scan_date": "2024-09-04T19:29:56.563Z", + "insert_date": "2024-11-13T02:02:14.438Z", + "start_scan_date": "2024-11-13T02:04:14.663Z", "request_headers": null, - "domain_info": 1, + "domain_info": null, "project": 1 } } diff --git a/web/targetApp/templates/target/summary.html b/web/targetApp/templates/target/summary.html index 0eac1dfc3..2807360a9 100644 --- a/web/targetApp/templates/target/summary.html +++ b/web/targetApp/templates/target/summary.html @@ -1150,7 +1150,7 @@

- + @@ -1534,7 +1534,7 @@

diff --git a/web/templates/report/template.html b/web/templates/report/template.html index f3de39c47..f31f010f7 100644 --- a/web/templates/report/template.html +++ b/web/templates/report/template.html @@ -936,7 +936,7 @@

Vulnerability Breakdown by Severity

{% regroup all_vulnerabilities by get_path as grouped_vulnerabilities %} {% for vulnerabilities in grouped_vulnerabilities %} {% for vulnerability in vulnerabilities.list %} -
+

{{vulnerability.name}}
in {{vulnerabilities.grouper}}
@@ -986,17 +986,17 @@

{% if vulnerability.description %}
Description
- {{vulnerability.description|linebreaks}} + {{vulnerability.description|linebreaks|safe}} {% endif %} {% if vulnerability.impact %}
Impact
- {{vulnerability.impact|linebreaks}} + {{vulnerability.impact|linebreaks|safe}} {% endif %} {% if vulnerability.remediation %}
Remediation
- {{vulnerability.remediation|linebreaks}} + {{vulnerability.remediation|linebreaks|safe}} {% endif %}
Vulnerable URL(s)
@@ -1020,15 +1020,19 @@

{% endfor %} {% endfor %} --> - {% if vulnerability.references.all %} + {% if vulnerability.references %} References
-
    - {% for ref in vulnerability.references.all %} -
  • - {{ref}} -
  • - {% endfor %} -
+ {% if vulnerability.references|length > 0 and vulnerability.references.0 %} +
    + {% for reference in vulnerability.references %} +
  • + {{ reference }} +
  • + {% endfor %} +
+ {% else %} +

{{ vulnerability.references }}

+ {% endif %} {% endif %}

diff --git a/web/tests/test_llm.py b/web/tests/test_llm.py new file mode 100644 index 000000000..917a73b44 --- /dev/null +++ b/web/tests/test_llm.py @@ -0,0 +1,137 @@ +from unittest.mock import patch +from django.test import TestCase +from rest_framework import status +from django.urls import reverse + +from reNgine.llm.config import MODEL_REQUIREMENTS +from reNgine.llm.llm import LLMVulnerabilityReportGenerator, LLMAttackSuggestionGenerator +from reNgine.llm.validators import LLMProvider +from utils.test_base import BaseTestCase + + +class TestLLMBase(BaseTestCase): + """Base test class for LLM functionality.""" + + def setUp(self): + super().setUp() + self.data_generator.create_project_base() + self.mock_llm_response = { + "status": True, + "description": "Test vulnerability description", + "impact": "Test impact description", + "remediation": "Test remediation steps", + "references": ["https://test.com/ref1", "https://test.com/ref2"] + } + + +class TestLLMVulnerabilityReport(TestLLMBase): + """Test cases for LLM Vulnerability Report Generator.""" + + def setUp(self): + super().setUp() + self.generator = LLMVulnerabilityReportGenerator() + + @patch('reNgine.llm.llm.LLMVulnerabilityReportGenerator._get_openai_response') + def test_get_vulnerability_report_success(self, mock_get_response): + """Test successful vulnerability report generation.""" + mock_get_response.return_value = "Test section content" + + response = self.generator.get_vulnerability_report("Test input") + self.assertTrue(response["status"]) + self.assertIsNotNone(response["description"]) + self.assertIsNotNone(response["impact"]) + self.assertIsNotNone(response["remediation"]) + self.assertIsNotNone(response["references"]) + + def test_validate_input_success(self): + """Test input validation success.""" + input_data = "Detailed vulnerability description for testing" + validated = self.generator._validate_input(input_data) + self.assertEqual(validated, input_data) + + @patch('reNgine.llm.llm.LLMVulnerabilityReportGenerator._get_section_response') + def test_get_vulnerability_report_failure(self, mock_get_section): + """Test vulnerability report generation failure.""" + # Mock section response to raise an exception + mock_get_section.side_effect = Exception("API Error") + + response = self.generator.get_vulnerability_report("Test input") + self.assertFalse(response["status"]) + self.assertIsNotNone(response["error"]) + self.assertEqual(response["error"], "API Error") + + +class TestLLMAttackSuggestion(TestLLMBase): + """Test cases for LLM Attack Suggestion Generator.""" + + def setUp(self): + super().setUp() + self.generator = LLMAttackSuggestionGenerator() + + @patch('reNgine.llm.llm.LLMAttackSuggestionGenerator.get_attack_suggestion') + def test_get_attack_suggestion_success(self, mock_get_suggestion): + """Test successful attack suggestion generation.""" + mock_suggestion = "Test attack suggestion" + mock_get_suggestion.return_value = { + "status": True, + "description": mock_suggestion, + "input": "Test input", + "model_name": None + } + + api_url = reverse("api:llm_get_possible_attacks") + response = self.client.get( + api_url, + {"subdomain_id": self.data_generator.subdomain.id} + ) + + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertTrue(response.data["status"]) + # Check if the suggestion is part of the formatted HTML response + self.assertIn(mock_suggestion, response.data["description"]) + + def test_validate_input_success(self): + """Test input validation success.""" + input_data = "Detailed reconnaissance data for testing" + validated = self.generator._validate_input(input_data) + self.assertEqual(validated, input_data) + + @patch('reNgine.llm.llm.LLMAttackSuggestionGenerator._get_openai_response') + def test_get_attack_suggestion_failure(self, mock_get_response): + """Test attack suggestion generation failure.""" + mock_get_response.side_effect = Exception("API Error") + + response = self.generator.get_attack_suggestion("Test input") + self.assertFalse(response["status"]) + self.assertIsNotNone(response["error"]) + + def test_get_provider_config(self): + """Test provider configuration retrieval""" + generator = LLMAttackSuggestionGenerator(provider=LLMProvider.OLLAMA) + config = generator._get_provider_config() + self.assertIn('default_model', config) + self.assertIn('models', config) + self.assertIn('timeout', config) + + def test_model_capabilities(self): + """Test model capabilities access""" + generator = LLMAttackSuggestionGenerator() + model_name = generator._get_model_name() + self.assertIn(model_name, MODEL_REQUIREMENTS) + self.assertIn('provider', MODEL_REQUIREMENTS[model_name]) + + +class TestLLMProviders(TestCase): + """Test cases for LLM providers configuration.""" + + def test_openai_provider_config(self): + """Test OpenAI provider configuration.""" + generator = LLMVulnerabilityReportGenerator(provider=LLMProvider.OPENAI) + self.assertEqual(generator.provider, LLMProvider.OPENAI) + self.assertIsNone(generator.ollama) + + def test_ollama_provider_config(self): + """Test Ollama provider configuration.""" + generator = LLMVulnerabilityReportGenerator(provider=LLMProvider.OLLAMA) + self.assertEqual(generator.provider, LLMProvider.OLLAMA) + self.assertIsNotNone(generator.ollama) \ No newline at end of file