Skip to content

Commit

Permalink
new diffusion
Browse files Browse the repository at this point in the history
  • Loading branch information
pierre.delaunay committed Jul 19, 2024
1 parent 4a1f505 commit 3065756
Show file tree
Hide file tree
Showing 16 changed files with 561 additions and 208 deletions.
35 changes: 31 additions & 4 deletions benchmarks/_templates/simple/Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,31 @@
tests:
milabench install --config dev.yaml --base base
milabench prepare --config dev.yaml --base base
milabench run --config dev.yaml --base base
# Use global base if possible
ifndef MILABENCH_BASE
MILABENCH_BASE="base"
endif

export MILABENCH_BASE

BENCH_NAME=template
MILABENCH_CONFIG=dev.yaml
MILABENCH_ARGS=--config $(MILABENCH_CONFIG) --base $(MILABENCH_BASE)

all:
install prepare single gpus nodes

install:
milabench install $(MILABENCH_ARGS) --force

prepare:
milabench prepare $(MILABENCH_ARGS)

tests: install prepare
milabench run $(MILABENCH_ARGS)

single:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-single

gpus:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-gpus

nodes:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-nodes
31 changes: 31 additions & 0 deletions benchmarks/_templates/stdout/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Use global base if possible
ifndef MILABENCH_BASE
MILABENCH_BASE="base"
endif

export MILABENCH_BASE

BENCH_NAME=template
MILABENCH_CONFIG=dev.yaml
MILABENCH_ARGS=--config $(MILABENCH_CONFIG) --base $(MILABENCH_BASE)

all:
install prepare single gpus nodes

install:
milabench install $(MILABENCH_ARGS) --force

prepare:
milabench prepare $(MILABENCH_ARGS)

tests: install prepare
milabench run $(MILABENCH_ARGS)

single:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-single

gpus:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-gpus

nodes:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-nodes
4 changes: 4 additions & 0 deletions benchmarks/_templates/stdout/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@

# Template

Rewrite this README to explain what the benchmark is!
34 changes: 34 additions & 0 deletions benchmarks/_templates/stdout/benchfile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from milabench.pack import Package


class Template(Package):
# Requirements file installed by install(). It can be empty or absent.
base_requirements = "requirements.in"

# The preparation script called by prepare(). It must be executable,
# but it can be any type of script. It can be empty or absent.
prepare_script = "prepare.py"

# The main script called by run(). It must be a Python file. It has to
# be present.
main_script = f"main.py"

# You can remove the functions below if you don't need to modify them.

def make_env(self):
# Return a dict of environment variables for prepare_script and
# main_script.
return super().make_env()

async def install(self):
await super().install()

async def prepare(self):
await super().prepare() # super() call executes prepare_script

def build_run_plan(self):
return super().build_run_plan().use_stdout()



__pack__ = Template
36 changes: 36 additions & 0 deletions benchmarks/_templates/stdout/dev.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
_template:
inherits: _defaults
definition: .
install-variant: unpinned
install_group: torch

#argv:
# --train_batch_size: 32
# --num_epochs: 5


template-single:
inherits: _template

num_machines: 1
plan:
method: per_gpu

template-gpus:
inherits: _template

num_machines: 1
plan:
method: njobs
n: 1

template-nodes:
inherits: _template

num_machines: 2
plan:
method: njobs
n: 1

requires_capabilities:
- "len(nodes) >= ${num_machines}"
50 changes: 50 additions & 0 deletions benchmarks/_templates/stdout/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# This is the script run by milabench run (by default)

# It is possible to use a script from a GitHub repo if it is cloned using
# clone_subtree in the benchfile.py, in which case this file can simply
# be deleted.

import time
import random

import torchcompat.core as accelerator
from benchmate.observer import BenchObserver


def criterion(*args, **kwargs):
return random.normalvariate()


def main():
device = accelerator.fetch_device(0) # <= This is your cuda device

observer = BenchObserver(
batch_size_fn=lambda batch: 1,
stdout=True,
)
# optimizer = observer.optimizer(optimizer)
# criterion = observer.criterion(criterion)

dataloader = list(range(6000))

for epoch in range(10000):
for i in observer.iterate(dataloader):
# avoid .item()
# avoid torch.cuda; use accelerator from torchcompat instead
# avoid torch.cuda.synchronize or accelerator.synchronize

# y = model(i)
loss = criterion()
# loss.backward()
# optimizer.step()

observer.record_loss(loss)

time.sleep(0.1)

assert epoch < 2, "milabench stopped the train script before the end of training"
assert i < 72, "milabench stopped the train script before the end of training"


if __name__ == "__main__":
main()
16 changes: 16 additions & 0 deletions benchmarks/_templates/stdout/prepare.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#!/usr/bin/env python

import os

if __name__ == "__main__":
# If you need the whole configuration:
# config = json.loads(os.environ["MILABENCH_CONFIG"])

data_directory = os.environ["MILABENCH_DIR_DATA"]

# Download (or generate) the needed dataset(s). You are responsible
# to check if it has already been properly downloaded or not, and to
# do nothing if it has been.
print("Hello I am doing some data stuff!")

# If there is nothing to download or generate, just delete this file.
2 changes: 2 additions & 0 deletions benchmarks/_templates/stdout/requirements.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
voir>=0.2.9,<0.3
torch
35 changes: 31 additions & 4 deletions benchmarks/_templates/voir/Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,31 @@
tests:
milabench install --config dev.yaml --base base
milabench prepare --config dev.yaml --base base
milabench run --config dev.yaml --base base
# Use global base if possible
ifndef MILABENCH_BASE
MILABENCH_BASE="base"
endif

export MILABENCH_BASE

BENCH_NAME=template
MILABENCH_CONFIG=dev.yaml
MILABENCH_ARGS=--config $(MILABENCH_CONFIG) --base $(MILABENCH_BASE)

all:
install prepare single gpus nodes

install:
milabench install $(MILABENCH_ARGS) --force

prepare:
milabench prepare $(MILABENCH_ARGS)

tests: install prepare
milabench run $(MILABENCH_ARGS)

single:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-single

gpus:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-gpus

nodes:
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-nodes
34 changes: 26 additions & 8 deletions benchmarks/diffusion/Makefile
Original file line number Diff line number Diff line change
@@ -1,17 +1,35 @@
# Use global base if possible
ifndef MILABENCH_BASE
MILABENCH_BASE="base"
endif

export MILABENCH_BASE

BENCH_NAME=diffusion
MILABENCH_CONFIG=dev.yaml
MILABENCH_ARGS=--config $(MILABENCH_CONFIG) --base $(MILABENCH_BASE)

all:
install prepare single gpus nodes

install:
milabench install --config dev.yaml --base base --force
milabench install $(MILABENCH_ARGS) --force

prepare:
milabench prepare $(MILABENCH_ARGS)

tests: install prepare
milabench run $(MILABENCH_ARGS)

tests:
milabench install --config dev.yaml --base base
milabench prepare --config dev.yaml --base base
milabench run --config dev.yaml --base base
debug:
export CUDA_VISIBLE_DEVICES=0
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-gpus

single:
milabench run --config dev.yaml --base base --select diffusion-single
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-single

gpus:
milabench run --config dev.yaml --base base --select diffusion-gpus
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-gpus

nodes:
milabench run --config dev.yaml --base base --select diffusion-nodes
milabench run $(MILABENCH_ARGS) --select $(BENCH_NAME)-nodes
Loading

0 comments on commit 3065756

Please sign in to comment.