forked from ustayready/ShredHound
-
Notifications
You must be signed in to change notification settings - Fork 0
/
shred.py
58 lines (47 loc) · 1.58 KB
/
shred.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import json
import argparse
import sys
import os
parser = argparse.ArgumentParser()
parser.add_argument('--output', type=str, required=True, help='Output folder for chunked JSON files')
parser.add_argument('--filename', type=str, required=True, help='Name of the BloodHound JSON file')
parser.add_argument('--chunks', type=int, default=100, help='Number of chunks to split the BloodHound JSON file into')
args = parser.parse_args()
def main(args):
data = import_json(args.filename)
count = data['meta']['count']
data_type = data['meta']['type']
version = data['meta']['version']
chunks = json_chunks(data, args.chunks)
idx = 0
for chunked in chunks:
new_file = {
"data": [],
"meta": {
"type": data_type,
"version": version,
"count": 0
}
}
for chunk in chunked:
new_file['data'].append(chunk)
new_file['meta']['count'] += 1
new_path = f'{args.output}/chunk_{idx}.json'
write_json(new_file, new_path)
idx += 1
def write_json(data, filename):
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def import_json(filename):
with open(filename) as f:
data = json.load(f)
return data
def json_chunks(data, chunks):
total_count = data['meta']['count']
chunk_size = total_count // chunks
chunks_list = []
for i in range(0, total_count, chunk_size):
chunks_list.append(data['data'][i:i + chunk_size])
return chunks_list
if __name__ == '__main__':
main(args)