Skip to content

Commit

Permalink
Merge pull request #7 from MMquant/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
MMquant authored Jan 6, 2019
2 parents b546655 + dec9b5b commit 73c827e
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 51 deletions.
117 changes: 71 additions & 46 deletions DNSweeper.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/usr/bin/env python3

# MIT License
#
# Copyright (c) 2018 Petr Javorik
Expand Down Expand Up @@ -114,12 +116,12 @@ def __init__(self):
}

self.exclude_subdomains = []
self.scraped_subdomains = []

# others
self.public_resolvers_remote_source = PUBLIC_RESOLVERS_REMOTE_SOURCE
self.public_resolvers_local_source = PUBLIC_RESOLVERS_LOCAL_SOURCE


################################################################################
# CORE
################################################################################
Expand All @@ -139,7 +141,7 @@ async def _query_sweep_resolvers(self, name, query_type, nameserver):
except aiodns.error.DNSError as e:
result = e

return {'ns': nameserver,'name': name ,'type': query_type, 'result': result}
return {'ns': nameserver, 'name': name, 'type': query_type, 'result': result}

@staticmethod
async def _query_sweep_names(name, query_type, resolver):
Expand Down Expand Up @@ -173,7 +175,8 @@ def _get_records(self, names, query_type, resolvers, sweep_mode):
elapsed_time = end - start

request_count = len(coros)
self.simple_log('## Event loop finished {} requests in {:.1f} seconds'.format(request_count, elapsed_time), 2)
self.simple_log('## Event loop finished {} requests in {:.1f} seconds'.format(request_count, elapsed_time),
2)
self.simple_log('## which is {:.1f} requests per second'.format(request_count / elapsed_time), 2)

elif sweep_mode == 'names':
Expand Down Expand Up @@ -209,15 +212,15 @@ def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]

chunk_size = len(resolvers) - RESOLVERS_SWEEP_RESERVE if len(resolvers) > RESOLVERS_SWEEP_RESERVE else len(resolvers)
chunk_size = len(resolvers) - RESOLVERS_SWEEP_RESERVE if len(resolvers) > RESOLVERS_SWEEP_RESERVE else len(
resolvers)
self.simple_log('## Calculated chunk_size: {}'.format(chunk_size), 2)
chunk_list = list(chunks(names, chunk_size))
self.simple_log('## Calculated chunk_size list length: {}'.format(len(chunk_list)), 2)
chunk_n = 0
requests_processed = 0
outer_start = time.time()
for chunk in chunk_list:

coros = [self._query_sweep_names(name, query_type, resolver) for name in chunk]
tasks = asyncio.gather(*coros, return_exceptions=True)

Expand All @@ -229,7 +232,8 @@ def chunks(l, n):
elapsed_time = end - start

request_count = len(coros)
self.simple_log('## Chunk event loop finished {} requests in {:.1f} seconds'.format(request_count, elapsed_time), 2)
self.simple_log(
'## Chunk event loop finished {} requests in {:.1f} seconds'.format(request_count, elapsed_time), 2)
self.simple_log('## which is {:.1f} requests per second'.format(request_count / elapsed_time), 2)

requests_processed += request_count
Expand Down Expand Up @@ -264,7 +268,8 @@ def _reliability_filter(public_resolvers, min_reliability):
if all(_filter):
reliable_resolvers.append(resolver[0])

self.simple_log('### {} public resolvers with reliability >= {}'.format(len(reliable_resolvers), min_reliability), 3)
self.simple_log(
'### {} public resolvers with reliability >= {}'.format(len(reliable_resolvers), min_reliability), 3)
return reliable_resolvers

# Param validation
Expand Down Expand Up @@ -323,7 +328,7 @@ def combine_resolvers(self, testing_domain, min_reliability, pub_ns_limit):
public_verified_resolvers.append(resolver['ns'])
else:
self.simple_log('# Resolver {} returned A records {} which are not in trusted resolvers A records'.
format(resolver['ns'], A_records), 1)
format(resolver['ns'], A_records), 1)

# Merge public and trusted resolvers and remove duplicates.
all_verified_resolvers = set(public_verified_resolvers + data.TRUSTED_RESOLVERS)
Expand Down Expand Up @@ -367,7 +372,8 @@ def garbage_query_filter(self, testing_name, resolvers):
for record in records:
if type(record['result']) is not aiodns.error.DNSError:
A_records = self.extract_A_record(record['result'])
self.simple_log('## Resolver {} resolves {} to {}'.format(record['ns'], random_subdomain, A_records), 2)
self.simple_log(
'## Resolver {} resolves {} to {}'.format(record['ns'], random_subdomain, A_records), 2)
bad_resolvers.append(record['ns'])
resolvers.difference_update(bad_resolvers)

Expand Down Expand Up @@ -455,10 +461,16 @@ def bruteforce(self, domain, payload, resolvers):

def bruteforce_recursive(self, domains, payload, resolvers):

for domain in domains:
filtered_domains = self.remove_excluded_subdomains(domains, self.exclude_subdomains)
for domain in filtered_domains:
self.simple_log('# Performing recursive bruteforce on {}'.format(domain), 1)
result = self.bruteforce(domain, payload, resolvers)
domains.extend(self.extract_names(result))
names = self.extract_names(result)
# Prevent duplicates if scraped subdomains contain mix of 1st, 2nd, 3rd, ... level domains.
for name in names:
if name not in filtered_domains:
filtered_domains.extend(names)
domains.extend(names)

################################################################################
# HELPERS
Expand Down Expand Up @@ -576,7 +588,6 @@ def to_json(data):

return data_json


ips = list(set(ips))
for ip in ips:
if not self.ipv4_validate(ip):
Expand All @@ -598,7 +609,6 @@ def _check_tcp_limit(self):
rlimit_nofile_soft, rlimit_nofile_hard), 3)

if rlimit_nofile_soft < RLIMIT_NOFILE_TEMP:

new_limit = RLIMIT_NOFILE_TEMP if RLIMIT_NOFILE_TEMP < rlimit_nofile_hard else rlimit_nofile_hard
resource.setrlimit(resource.RLIMIT_NOFILE, (new_limit, rlimit_nofile_hard))
self.simple_log('### Maximum number of opened file descriptors temporarily set to: {}'.format(
Expand All @@ -607,8 +617,8 @@ def _check_tcp_limit(self):
if platform.system() == 'Darwin':

# Kernel limits
kern_maxfilesperproc = subprocess.check_output(["sysctl", "kern.maxfilesperproc"])
kern_maxfilesperproc = int(kern_maxfilesperproc.decode().rstrip().split(':')[1].strip())
kern_maxfilesperproc = subprocess.check_output(["sysctl", "kern.maxfilesperproc"])
kern_maxfilesperproc = int(kern_maxfilesperproc.decode().rstrip().split(':')[1].strip())
self.simple_log('### Maximum number of opened file descriptors by kernel: {}'.format(
kern_maxfilesperproc), 3)

Expand All @@ -617,7 +627,6 @@ def _check_tcp_limit(self):
self.simple_log('### Maximum number of opened file descriptors by ulimit (Soft, Hard): {}, {}'.format(
rlimit_nofile_soft, rlimit_nofile_hard), 3)


if rlimit_nofile_soft < RLIMIT_NOFILE_TEMP:
new_limit = RLIMIT_NOFILE_TEMP if RLIMIT_NOFILE_TEMP < kern_maxfilesperproc else kern_maxfilesperproc
resource.setrlimit(resource.RLIMIT_NOFILE, (new_limit, new_limit))
Expand All @@ -644,7 +653,7 @@ def extract_PTR_records(self, resolvers_PTR_results):
}
PTR_records.append(record)

except UnicodeError:
except TypeError:

self.simple_log('### Unicode error in A records: {}'.format(arpa_ip), 3)

Expand All @@ -664,8 +673,8 @@ def extract_A_records(self, resolvers_A_results):
}
A_records.append(record)

except UnicodeError:
self.simple_log('### Unicode error in A records: {}'.format(resolver), 3)
except TypeError:
self.simple_log('### TypeError error in A records: {}'.format(resolver), 3)

return A_records

Expand Down Expand Up @@ -759,6 +768,8 @@ def __init__(self):
self.dnsw.args.update(vars(self.args))
if self.dnsw.args['exclude_file']:
self.dnsw.exclude_subdomains = self.read_file(self.dnsw.args['exclude_file'])
if self.dnsw.args['file_input']:
self.dnsw.scraped_subdomains = self.read_file(self.dnsw.args['file_input'])

# Cache
self.filtered_resolvers = []
Expand Down Expand Up @@ -796,15 +807,15 @@ def parse_args(self):
'in all IPs from discovered ASN netblocks and optionally '
'filters output with given REGEX.')
parser_enumerate.set_defaults(func=self.enumerate)
input_group = parser_enumerate.add_mutually_exclusive_group(required=True)
input_group.add_argument('-f',
metavar='FILE',
help='Path to file with (scraped) subdomains',
dest='file_input')
input_group.add_argument('-d',
metavar='DOMAIN',
help='Domain (ie. test_domain.com)',
dest='domain_input')
input_group_enumerate = parser_enumerate.add_mutually_exclusive_group(required=True)
input_group_enumerate.add_argument('-f',
metavar='FILE',
help='Path to file with (scraped) subdomains',
dest='file_input')
input_group_enumerate.add_argument('-d',
metavar='DOMAIN',
help='Domain (ie. test_domain.com)',
dest='domain_input')
parser_enumerate.add_argument('-p',
metavar='FILE',
help='Path to file with bruteforce payload',
Expand Down Expand Up @@ -897,11 +908,15 @@ def parse_args(self):
'permanently rotated - each new query is resolved with '
'different resolver. Payload is mangled with -i input.')
parser_bruteforce.set_defaults(func=self.bruteforce)
parser_bruteforce.add_argument('-d',
metavar='DOMAIN',
help='Domain or subdomain',
required=True,
dest='domain_input')
input_group_bruteforce = parser_bruteforce.add_mutually_exclusive_group(required=True)
input_group_bruteforce.add_argument('-f',
metavar='FILE',
help='Path to file with (scraped) subdomains',
dest='file_input')
input_group_bruteforce.add_argument('-d',
metavar='DOMAIN',
help='Domain (ie. test_domain.com)',
dest='domain_input')
parser_bruteforce.add_argument('-p',
metavar='FILE',
help='Path to file with bruteforce payload',
Expand All @@ -926,13 +941,18 @@ def parse_args(self):
metavar='FILE',
required=False,
dest='bruteforce_recursive')
parser_bruteforce.add_argument('--exclude',
help='File with subdomains which not to use in recursive bruteforce. '
'(improves speed)',
metavar='FILE',
required=False,
dest='exclude_file')
parser_bruteforce.add_argument('-v',
help='Verbosity, -v, -vv, -vvv',
action='count',
default=0,
dest='verbosity')


################################################################################
# forward_lookup command parser
################################################################################
Expand Down Expand Up @@ -1074,11 +1094,19 @@ def bruteforce(self):
bruteforce_subdomains = self.dnsw.extract_names(bruteforce_records)

if self.dnsw.args['bruteforce_recursive']:
self.simple_log('# Performing recursive bruteforce ...', 1)
payload = self.read_file(self.dnsw.args['bruteforce_recursive'])
if self.dnsw.scraped_subdomains:
self.simple_log('# Merging scraped subdomains with simple bruteforce result ...', 1)
bruteforce_subdomains.extend(self.dnsw.scraped_subdomains)
bruteforce_subdomains = list(set(bruteforce_subdomains))

self.dnsw.bruteforce_recursive(bruteforce_subdomains, payload, self.filtered_resolvers)
bruteforce_subdomains = list(set(bruteforce_subdomains))

# Output
self.simple_log('Bruteforce discovered {} subdomains...'.format(len(bruteforce_subdomains)), 0)
self.simple_log('Bruteforce discovered {} new subdomains...'.format(
len(bruteforce_subdomains) - len(self.dnsw.scraped_subdomains)), 0)
file_name = os.path.join(self.dnsw.args['output_dir'], 'bruteforce_result.json')
self.write_file(file_name, bruteforce_subdomains)

Expand All @@ -1092,13 +1120,13 @@ def forward_lookup(self):
self.simple_log('Performing forward-lookup...', 0)
self.load_resolvers()

subdomains = []
if self.dnsw.args['file_input']:
subdomains = self.read_file(self.dnsw.args['file_input'])
if self.bruteforce_result:
self.simple_log('# Extending input with previously bruteforced subdomains...', 1)
subdomains.extend(self.bruteforce_result)
subdomains = list(set(subdomains))
# Scraped subdomains already included in bruteforce_result!
subdomains = self.bruteforce_result
self.simple_log('# Performing forward-loopkup on previous bruteforce result...', 1)
else:
subdomains = self.dnsw.scraped_subdomains
self.simple_log('# Performing forward-loopkup on scraped subdomains...', 1)

if self.dnsw.args['fast_sweep']:
A_records = self.dnsw.forward_lookup_fast(subdomains, self.filtered_resolvers)
Expand Down Expand Up @@ -1208,10 +1236,8 @@ def get_domain_name(self):

if self.dnsw.args['domain_input']:
domain_name = self.dnsw.args['domain_input']
elif self.dnsw.args['file_input']:
with open(self.dnsw.args['file_input']) as f:
domain_name = f.readline().strip('\n')
domain_name = '.'.join(domain_name.split('.')[-2:])
elif self.dnsw.scraped_subdomains:
domain_name = '.'.join(self.dnsw.scraped_subdomains[0].split('.')[-2:])
else:
raise ValueError('domain_input or file_input missing')

Expand Down Expand Up @@ -1253,6 +1279,5 @@ def read_file(self, file_name):


if __name__ == '__main__':

app = App()
app.run_command()
35 changes: 30 additions & 5 deletions readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ Input domain directly

**Custom payload file**

`$ python3 DNSweeper.py enumerate -f scraped_subdomains.txt -p path/to/payload`
`$ python3 DNSweeper.py enumerate -f scraped_subdomains.txt -p path/to/large_payload`

**Custom output directory**

Expand All @@ -91,6 +91,10 @@ Input domain directly

`$ python3 DNSweeper.py enumerate -f scraped_subdomains.txt --no-bruteforce -v`

**Feed out-of-scope subdomains to DNSweeper engine**

`$ python3 DNSweeper.py enumerate -f scraped_subdomains.txt --exclude out_of_scope.txt -v`

<br>

#### `resolvers`
Expand All @@ -111,6 +115,10 @@ Input domain directly

`$ python3 DNSweeper.py bruteforce -d testing_domain.com`

**First simple bruteforce with `large_payload` then recursive bruteforce with `small_payload` with file as input**

`$ python3 DNSweeper.py bruteforce -f scraped_subdomains.txt -p path/to/large_payload --bruteforce-recursive path/to/small_payload`

<br>

#### `forward_lookup`
Expand All @@ -131,8 +139,7 @@ Input domain directly

`$ python3 DNSweeper.py asn_reverse_lookup -f ips.txt`

**Use custom regexp to filter gathered PTR records. Filtered records are stored in**
`result/asn_reverse_lookup_regex_ptr.json`
**Use custom regexp to filter gathered PTR records. Filtered records are stored in `result/asn_reverse_lookup_regex_ptr.json`**

`$ python3 DNSweeper.py asn_reverse_lookup -f ips.txt -r admin`

Expand Down Expand Up @@ -199,11 +206,25 @@ R/gov[0-9]*\.
```

If you scraped subdomains such as

```
static1.test-domain.com
static2.test-domain.com
static3.test-domain.com
...
```
You can improve DNSweeper performance by enumerating just one of these subdomains. Add following regex to your `--exclude` file

`R/^(?!static1\.testing-domain\.com)(static[0-9]*\.testing-domain\.com)`

This regex in `--exclude` file leaves `static1.test-domain.com` for further enumeration and matches other subdomains of the same type to be excluded.

<br>

#### `--bruteforce-recursive FILE`

Bruteforce recursively and use payload from FILE. Default bruteforce wordlist or custom wordlist is not used here. Use smaller wordlist for recursive bruteforcing (5k-10k).
Enable recursive bruteforce and use payload from FILE. Default simple bruteforce wordlist or `-p` custom wordlist is not used here. Use smaller wordlist for recursive bruteforce (5k-10k).

<br>

Expand Down Expand Up @@ -371,7 +392,11 @@ Quit VMware completely and upgrade to the latest VMware virtual NIC by editing `
* optimize code for Windows (maximum count of opened file descriptors)
* tune-up resolvers filtering - adding more filters, upgrade current filtering
* upgrade installation process (create package?)
* DNSweeper has poor performance when running in VMware. See [this](https://github.com/saghul/aiodns/issues/51) issue.


## Changelog

* 2019-01-06 Bruteforce command accepts file input. Recursive bruteforce is performed even on scraped subdomains.

## Contribution

Expand Down

0 comments on commit 73c827e

Please sign in to comment.