Skip to content

Commit

Permalink
Add process_batch_size argument to control memory
Browse files Browse the repository at this point in the history
  • Loading branch information
cpuhrsch committed Nov 30, 2023
1 parent 35db2a9 commit ec9a2bf
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 1 deletion.
9 changes: 8 additions & 1 deletion amg_example/amg_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def show_anns(anns):

sam = sam_model_fast_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
mask_generator = SamAutomaticMaskGenerator(sam)
mask_generator = SamAutomaticMaskGenerator(sam, process_batch_size=8)

# Run thrice for warmup
masks = mask_generator.generate(image)
Expand Down Expand Up @@ -69,3 +69,10 @@ def show_anns(anns):

# Save a GPU trace
profiler_runner(f"amg_example_trace.json.gz", mask_generator.generate, image)

# Write out memory usage
max_memory_allocated_bytes = torch.cuda.max_memory_allocated()
_, total_memory = torch.cuda.mem_get_info()
max_memory_allocated_percentage = int(100 * (max_memory_allocated_bytes / total_memory))
max_memory_allocated_bytes = max_memory_allocated_bytes >> 20
print(f"memory(MiB): {max_memory_allocated_bytes} memory(%): {max_memory_allocated_percentage}")
Binary file modified amg_example/amg_example_trace.json.gz
Binary file not shown.
Binary file modified amg_example/dog_mask_fast.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit ec9a2bf

Please sign in to comment.