Skip to content

Commit

Permalink
benchmarks implemented
Browse files Browse the repository at this point in the history
  • Loading branch information
bbengfort committed Nov 12, 2024
1 parent 5e1f23f commit 675f508
Show file tree
Hide file tree
Showing 3 changed files with 177 additions and 0 deletions.
27 changes: 27 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,33 @@ $ which construe
$ construe --help
```

## Basic Benchmarks

The basic benchmarks implement dot product benchmarks from the [PyTorch documentation](). These benchmarks can be run using `construe basic`; for example by running:

```
$ construe basic -e "MacBook Pro 2022 M1" -o results-macbook.pickle
```

The `-e` flag specifies the environment for comparison purposes and the `-o` flag saves the measurements out to disk as a Pickle file that can be loaded for comparison to other environments later.

Command usage is as follows:

```
Usage: construe basic [OPTIONS]
Options:
-e, --env TEXT name of the experimental environment for
comparison (default is hostname)
-o, --saveto TEXT path to write the measurements pickle data to
-t, --num-threads INTEGER specify number of threads for benchmark (default
to maximum)
-F, --fuzz / --no-fuzz fuzz the tensor sizes of the inputs to the
benchmark
-S, --seed INTEGER set the random seed for random generation
-h, --help Show this message and exit.
```

## Releases

To release the construe library and deploy to PyPI run the following commands:
Expand Down
39 changes: 39 additions & 0 deletions construe/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import click

from .version import get_version
from .basic import BasicBenchmark


CONTEXT_SETTINGS = {
Expand All @@ -18,6 +19,44 @@ def main():
pass


@main.command()
@click.option(
"-e",
"--env",
default=None,
help="name of the experimental environment for comparison (default is hostname)",
)
@click.option(
"-o",
"--saveto",
default=None,
help="path to write the measurements pickle data to",
)
@click.option(
"-t",
"--num-threads",
default=None,
type=int,
help="specify number of threads for benchmark (default to maximum)",
)
@click.option(
"-F",
"--fuzz/--no-fuzz",
default=False,
help="fuzz the tensor sizes of the inputs to the benchmark",
)
@click.option(
"-S",
"--seed",
default=None,
type=int,
help="set the random seed for random generation",
)
def basic(**kwargs):
benchmark = BasicBenchmark(**kwargs)
benchmark.run()


if __name__ == "__main__":
main(
prog_name="construe",
Expand Down
111 changes: 111 additions & 0 deletions construe/basic.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,16 @@
"""
Benchmarks basic dot product torch operators.
See: https://pytorch.org/tutorials/recipes/recipes/benchmark.html
"""

import torch
import pickle
import platform
import torch.utils.benchmark as benchmark

from itertools import product
from torch.utils.benchmark import Fuzzer, FuzzedParameter, FuzzedTensor


def batched_dot_mul_sum(a, b):
Expand All @@ -19,3 +27,106 @@ def batched_dot_bmm(a, b):
a = a.reshape(-1, 1, a.shape[-1])
b = b.reshape(-1, b.shape[-1], 1)
return torch.bmm(a, b).flatten(-3)


class BasicBenchmark(object):

def __init__(self, env=None, saveto=None, num_threads=None, fuzz=False, seed=None):
if num_threads is None:
num_threads = torch.get_num_threads()

if env is None:
env = platform.node()

self.env = env
self.saveto = saveto
self.num_threads = num_threads
self.fuzz = fuzz
self.seed = seed

def run(self):
results = []
dataset = self.fuzzer().take(10) if self.fuzz else self.static()

kwargs = {
"label": "Batched Dot",
"num_threads": self.num_threads,
"env": self.env,
}

for tensors, tensor_params, params in dataset:
sub_label = f"{params['k0']:<6} x {params['k1']:<4} {'' if tensor_params['x']['is_contiguous'] else '(discontiguous)'}" # noqa
results.append(
benchmark.Timer(
stmt="batched_dot_mul_sum(x, x)",
setup="from construe.basic import batched_dot_mul_sum",
globals=tensors,
sub_label=sub_label,
description="mul/sum",
**kwargs
).blocked_autorange(min_run_time=1)
)
results.append(
benchmark.Timer(
stmt="batched_dot_bmm(x, x)",
setup="from construe.basic import batched_dot_bmm",
globals=tensors,
sub_label=sub_label,
description="bmm",
**kwargs
).blocked_autorange(min_run_time=1)
)

if self.saveto is not None:
with open(self.saveto, "wb") as f:
pickle.dump(results, f)

compare = benchmark.Compare(results)
compare.print()

def fuzzer(self):
"""
Generates random tensors with 128 to 10000000 elements and sizes k0 and k1
chosen from a loguniform distribution in [1, 10000], 40% of which will be
discontiguous on average.
"""
return Fuzzer(
parameters=[
FuzzedParameter(
"k0", minval=1, maxval=10000, distribution="loguniform"
),
FuzzedParameter(
"k1", minval=1, maxval=10000, distribution="loguniform"
),
],
tensors=[
FuzzedTensor(
"x",
size=("k0", "k1"),
min_elements=128,
max_elements=10000000,
probability_contiguous=0.6,
)
],
seed=self.seed,
)

def static(self):
sizes = [16, 64, 1024, 16384]
for k0, k1 in product(sizes, sizes):
params = {
"k0": k0,
"k1": k1,
}

tensors = {
"x": torch.ones((k0, k1))
}

tensor_params = {
"x": {
"is_contiguous": True
}
}

yield tensors, tensor_params, params

0 comments on commit 675f508

Please sign in to comment.