Skip to content

Commit

Permalink
update templates
Browse files Browse the repository at this point in the history
  • Loading branch information
pierre.delaunay committed Jul 11, 2024
1 parent 127d18c commit a0107b4
Show file tree
Hide file tree
Showing 26 changed files with 398 additions and 57 deletions.
14 changes: 14 additions & 0 deletions .github/workflows/tests_unit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,20 @@ jobs:
#
poetry install --with dev
- name: Simple Template
run: |
source $(poetry env info -p)/bin/activate
milabench new --name simplebench --template simple
cd benchmarks/simplebench
make tests
- name: Voir Template
run: |
source $(poetry env info -p)/bin/activate
milabench new --name voirbench --template voir
cd benchmarks/voirbench
make tests
- name: tests
run: |
source $(poetry env info -p)/bin/activate
Expand Down
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,8 @@ dry/

stderr.txt
stdout.txt

base/

benchmarks/simple
benchmarks/voir
4 changes: 4 additions & 0 deletions benchmarks/_templates/simple/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
tests:
milabench install --config dev.yaml --base base
milabench prepare --config dev.yaml --base base
milabench run --config dev.yaml --base base
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@

# {{STEM}}
# Template

Rewrite this README to explain what the benchmark is!
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from milabench.pack import Package


class TheBenchmark(Package):
class Template(Package):
# Requirements file installed by install(). It can be empty or absent.
base_requirements = "requirements.in"

Expand All @@ -26,8 +26,6 @@ async def install(self):
async def prepare(self):
await super().prepare() # super() call executes prepare_script

async def run(self):
return await super().run()


__pack__ = TheBenchmark
__pack__ = Template
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

{{STEM}}:
template:
inherits: _defaults
definition: .
install-variant: unpinned
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,33 +5,45 @@
# be deleted.

import time
import random

import torchcompat.core as accelerator
from benchmate.observer import BenchObserver


def criterion(*args, **kwargs):
return random.normalvariate()


def main():
device = accelerator.fetch_device(0) # <= This is your cuda device

observer = BenchObserver(batch_size_fn=lambda batch: 1)

observer = BenchObserver(
batch_size_fn=lambda batch: 1
)
# optimizer = observer.optimizer(optimizer)
# criterion = observer.criterion(criterion)

dataloader = [1, 2, 3, 4]
dataloader = list(range(6000))

for epoch in range(10):
for epoch in range(10000):
for i in observer.iterate(dataloader):
# avoid .item()
# avoid torch.cuda; use accelerator from torchcompat instead
# avoid torch.cuda.synchronize or accelerator.synchronize

# y = model(i)
# loss = criterion(y)
loss = criterion()
# loss.backward()
# optimizer.step()

observer.record_loss(loss)

time.sleep(0.1)

assert epoch < 2, "milabench stopped the train script before the end of training"
assert i < 72, "milabench stopped the train script before the end of training"


if __name__ == "__main__":
main()
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
voir>=0.2.9,<0.3
torch
Original file line number Diff line number Diff line change
Expand Up @@ -26,25 +26,13 @@ class Config:

@configurable
def instrument_main(ov, options: Config):
try:
import torch
except ImportError:
torch = None

yield ov.phases.init

if options.dash:
ov.require(dash)

ov.require(
log("value", "progress", "rate", "units", "loss", "gpudata", context="task"),
rate(
interval=options.interval,
skip=options.skip,
sync=torch.cuda.synchronize
if torch and torch.cuda.is_available()
else None,
),
early_stop(n=options.stop, key="rate", task="train"),
monitor_monogpu(poll_interval=options.gpu_poll),
)
4 changes: 4 additions & 0 deletions benchmarks/_templates/voir/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
tests:
milabench install --config dev.yaml --base base
milabench prepare --config dev.yaml --base base
milabench run --config dev.yaml --base base
4 changes: 4 additions & 0 deletions benchmarks/_templates/voir/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@

# Template

Rewrite this README to explain what the benchmark is!
42 changes: 42 additions & 0 deletions benchmarks/_templates/voir/benchfile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from milabench.pack import Package


SOURCE_DIR = "src"
REPO_URL = "https://github.com/Delaunay/extern_example.git"
BRANCH = "a524286ab6364bca6729dd6ef4936e175a87c7e4"


class Template(Package):
# Requirements file installed by install(). It can be empty or absent.
base_requirements = "requirements.in"

# The preparation script called by prepare(). It must be executable,
# but it can be any type of script. It can be empty or absent.
prepare_script = "prepare.py"

# The main script called by run(). It must be a Python file. It has to
# be present.
main_script = f"{SOURCE_DIR}/main.py"

# You can remove the functions below if you don't need to modify them.

def make_env(self):
# Return a dict of environment variables for prepare_script and
# main_script.
return super().make_env()

async def install(self):
await super().install()

source_destination = self.dirs.code / SOURCE_DIR
if not source_destination.exists():
source_destination.clone_subtree(
REPO_URL, BRANCH
)

async def prepare(self):
await super().prepare() # super() call executes prepare_script



__pack__ = Template
7 changes: 7 additions & 0 deletions benchmarks/_templates/voir/dev.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@

template:
inherits: _defaults
definition: .
install-variant: unpinned
plan:
method: per_gpu
16 changes: 16 additions & 0 deletions benchmarks/_templates/voir/prepare.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#!/usr/bin/env python

import os

if __name__ == "__main__":
# If you need the whole configuration:
# config = json.loads(os.environ["MILABENCH_CONFIG"])

data_directory = os.environ["MILABENCH_DIR_DATA"]

# Download (or generate) the needed dataset(s). You are responsible
# to check if it has already been properly downloaded or not, and to
# do nothing if it has been.
print("Hello I am doing some data stuff!")

# If there is nothing to download or generate, just delete this file.
2 changes: 2 additions & 0 deletions benchmarks/_templates/voir/requirements.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
voir>=0.2.9,<0.3
torch
69 changes: 69 additions & 0 deletions benchmarks/_templates/voir/voirfile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
from dataclasses import dataclass

from voir.phase import StopProgram
from voir import configurable
from voir.instruments import dash, early_stop, log
from benchmate.monitor import monitor_monogpu
from benchmate.observer import BenchObserver


@dataclass
class Config:
"""voir configuration"""

# Whether to display the dash or not
dash: bool = False

# How often to log the rates
interval: str = "1s"

# Number of rates to skip before logging
skip: int = 5

# Number of rates to log before stopping
stop: int = 20

# Number of seconds between each gpu poll
gpu_poll: int = 3


@configurable
def instrument_main(ov, options: Config):
yield ov.phases.init


yield ov.phases.load_script

if options.dash:
ov.require(dash)

ov.require(
log("value", "progress", "rate", "units", "loss", "gpudata", context="task"),
early_stop(n=options.stop, key="rate", task="train"),
monitor_monogpu(poll_interval=options.gpu_poll),
)

#
# Insert milabench tools
#
observer = BenchObserver(
earlystop=options.stop + options.skip,
batch_size_fn=lambda x: 1
)

probe = ov.probe("//my_dataloader_creator() as loader", overridable=True)
probe['loader'].override(observer.loader)

probe = ov.probe("//my_criterion_creator() as criterion", overridable=True)
probe['criterion'].override(observer.criterion)

probe = ov.probe("//my_optimizer_creator() as optimizer", overridable=True)
probe['optimizer'].override(observer.optimizer)

#
# Run the benchmark
#
try:
yield ov.phases.run_script
except StopProgram:
print("early stopped")
1 change: 0 additions & 1 deletion benchmarks/torchvision_ddp/benchfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ class TorchvisionBenchmarkDDP(Package):

def build_run_plan(self) -> "Command":
import milabench.commands as cmd

pack = cmd.PackCommand(self, *self.argv, lazy=True)
pack = cmd.ActivatorCommand(pack, use_stdout=True)
return pack
Expand Down
4 changes: 2 additions & 2 deletions benchmate/benchmate/observer.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def override_return_value(self, function, override):
raise RuntimeError("Not running through voir")

def iterate(self, iterator):
return self.loader(loader)
return self.loader(iterator)

def loader(self, loader):
"""Wrap a dataloader or an iterable which enable accurate measuring of time spent in the loop's body"""
Expand All @@ -94,7 +94,7 @@ def new_backward(*args, **kwargs):

loss.backward = new_backward

self.record_loss(loss.detach())
self.record_loss(loss)
return loss

return wrapped
Expand Down
5 changes: 4 additions & 1 deletion docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,15 @@ Welcome to milabench's documentation!
:caption: Contents:

usage.rst
recipes.rst
new_benchmarks.rst

docker.rst
dev-usage.rst
new_benchmarks.rst
reference.rst
sizer.rst


Indices and tables
==================

Expand Down
Loading

0 comments on commit a0107b4

Please sign in to comment.