Skip to content

Commit

Permalink
fix: add mode to benchmarking, refactor slightly
Browse files Browse the repository at this point in the history
  • Loading branch information
autumnjolitz committed Jul 30, 2024
1 parent 9bf8489 commit 0d951cf
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 35 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@ build/*
.pytype/*
instruct/about.py
python*/
.coverage
97 changes: 63 additions & 34 deletions instruct/__main__.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,46 @@
from __future__ import annotations

from typing import Union
from instruct import Base, clear
from instruct import SimpleBase, clear
import timeit

try:
from typing import assert_never
except ImportError:
from typing_extensions import assert_never

clear

test_statement = """
t.name_or_id = 1
t.name_or_id += 1
"""


class TestH(Base, history=True):
class TestH(SimpleBase, history=True):
name_or_id: Union[int, str]

def __init__(self, **kwargs):
self.name_or_id = 1
super().__init__(**kwargs)


class Test(Base):
class Test(SimpleBase):
name_or_id: Union[int, str]

def __init__(self, **kwargs):
self.name_or_id = 1
super().__init__(**kwargs)


class TestOptimized(Base, fast=True):
class TestOptimized(SimpleBase, fast=True):
name_or_id: Union[int, str]

def __init__(self, **kwargs):
self.name_or_id = 1
super().__init__(**kwargs)


class ComplexTest(Base):
class ComplexTest(SimpleBase):
id: int
name: str
type: int
Expand All @@ -51,51 +58,72 @@ class Next(ComplexTest):
next: int


def main(count=1_000_000):
ttl = timeit.timeit(
't = Test(name_or_id="name")', setup="from __main__ import Test", number=count
)
US_IN_S: int = 1_000_000


def main(unit: Literal["ns", "us"] = "us", limit: int = 10_000):
if "us" == unit:
multiplier = 1
fmt = "{:.2f}"
elif "ns" == unit:
multiplier = 1000
fmt = "{:.0f}"
else:
assert_never(unit)
ttl = timeit.timeit('t = Test(name_or_id="name")', number=limit, globals={"Test": Test})
print("Overhead of allocation")
per_round_ms = (ttl / count) * count
print("one field, safeties on: {:.2f} us".format(per_round_ms))
per_round_us = (ttl * US_IN_S) / limit
print("one field, safeties on: {} {}".format(fmt.format(per_round_us * multiplier), unit))

ttl = timeit.timeit(
't = Test(name_or_id="name")',
setup="from __main__ import TestOptimized as Test",
number=count,
't = TestOptimized(name_or_id="name")',
number=limit,
globals={"TestOptimized": TestOptimized},
)
per_round_ms = (ttl / count) * count
print("one field, safeties off: {:.2f} us".format(per_round_ms))
per_round_us = (ttl * US_IN_S) / limit
print("one field, safeties off: {} {}".format(fmt.format(per_round_us * multiplier), unit))

unit = "ns"
multiplier = 1_000
fmt = "{:.0f}"

print("Overhead of setting a field")
ttl = timeit.timeit(test_statement, setup="from __main__ import Test;t = Test()")
per_round_ms = (ttl / count) * count
print("Test with safeties: {:.2f} us".format(per_round_ms))
ttl = timeit.timeit(test_statement, number=limit, globals={"t": Test()})
per_round_us = (ttl * US_IN_S) / limit
print("Test with safeties: {} {}".format(fmt.format(per_round_us * multiplier), unit))

ttl = timeit.timeit(
test_statement,
setup="from __main__ import TestOptimized as Test;t = Test()",
number=count,
number=limit,
globals={
"t": TestOptimized(),
},
)
per_round_ms = (ttl / count) * count
print("Test without safeties: {:.2f} us".format(per_round_ms))
per_round_us = (ttl * US_IN_S) / limit
print("Test without safeties: {} {}".format(fmt.format(per_round_us * multiplier), unit))

print("Overhead of clearing/setting")
ttl = timeit.timeit(
"clear(t);t.name_or_id = 1",
setup='from __main__ import Test, clear;t = Test(name_or_id="name")',
number=count,
number=limit,
globals={
"t": Test(name_or_id="name"),
"clear": clear,
},
)
per_round_ms = (ttl / count) * count
print("Test with safeties: {:.2f} us".format(per_round_ms))
per_round_us = (ttl * US_IN_S) / limit
print("Test with safeties: {} {}".format(fmt.format(per_round_us * multiplier), unit))

ttl = timeit.timeit(
"clear(t);t.name_or_id = 1",
setup='from __main__ import TestOptimized as Test,clear;t = Test(name_or_id="name")',
number=count,
number=limit,
globals={
"t": TestOptimized(name_or_id="name"),
"clear": clear,
},
)
per_round_ms = (ttl / count) * count
print("Test without safeties: {:.2f} us".format(per_round_ms))
per_round_us = (ttl * US_IN_S) / limit
print("Test without safeties: {} {}".format(fmt.format(per_round_us * multiplier), unit))


if __name__ == "__main__":
Expand All @@ -113,7 +141,8 @@ def main(count=1_000_000):
subparsers = parser.add_subparsers()
benchmark = subparsers.add_parser("benchmark")
benchmark.set_defaults(mode="benchmark")
benchmark.add_argument("count", default=1_000_000, type=int, nargs="?")
benchmark.add_argument("unit", choices=["us", "ns"], default="us", nargs="?")
benchmark.add_argument("limit", type=int, default=main.__defaults__[-1], nargs="?")
if PyCallGraph is not None:
callgraph = subparsers.add_parser("callgraph")
callgraph.set_defaults(mode="callgraph")
Expand All @@ -123,7 +152,7 @@ def main(count=1_000_000):
if not args.mode:
raise SystemExit("Use benchmark or callgraph")
if args.mode == "benchmark":
main(args.count)
main(args.unit, args.limit)
if PyCallGraph and args.mode == "callgraph":
names = [random.choice((("test",) * 10) + (-1, None)) for _ in range(1000)]
ids = [random.randint(1, 232) for _ in range(1000)]
Expand Down
3 changes: 2 additions & 1 deletion tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1015,6 +1015,7 @@ def benchmark(
context: Context,
type_: Union[Type[UnitValue], Type[str], Literal["UnitValue", "str"]] = "str",
*,
mode: Literal["us", "ns"] = "us",
count: Optional[int] = None,
) -> Union[UnitValue, Tuple[str, ...]]:
if type_ == "UnitValue":
Expand All @@ -1023,7 +1024,7 @@ def benchmark(
type_ = str
assert type_ in (str, UnitValue)
python_bin = _.python_path(str, silent=True)
fh = context.run(f"{python_bin} -m instruct benchmark {count or ''}", hide="stdout")
fh = context.run(f"{python_bin} -m instruct benchmark {mode} {count or ''}", hide="stdout")
assert fh is not None
tests = []
section = None
Expand Down

0 comments on commit 0d951cf

Please sign in to comment.