From a595d0ab4788cab24bdf1ef9612618d3e9551809 Mon Sep 17 00:00:00 2001 From: Pierre Delaunay Date: Tue, 17 Oct 2023 12:38:00 -0400 Subject: [PATCH 01/36] Update cuda --- .github/workflows/tests.yml | 4 + .pin/constraints-cuda-torch.txt | 185 +++++++++--------- .../accelerate_opt/requirements.cuda.txt | 106 +++++----- benchmarks/dlrm/requirements.cuda.txt | 126 +++++------- benchmarks/huggingface/requirements.cuda.txt | 62 +++--- benchmarks/rwkv/requirements.cuda.txt | 74 +++---- benchmarks/stargan/requirements.cuda.txt | 47 ++--- benchmarks/super-slomo/requirements.cuda.txt | 51 +++-- benchmarks/timm/requirements.cuda.txt | 54 +++-- benchmarks/torchvision/requirements.cuda.txt | 49 +++-- constraints/rocm.txt | 2 +- milabench/_version.py | 6 +- milabench/pack.py | 2 +- 13 files changed, 356 insertions(+), 412 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 8e0cb45d2..a81dd15d5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -76,6 +76,10 @@ jobs: poetry lock --no-update poetry install + - name: pin + run: | + poetry run milabench pin --config config/standard.yaml + - name: tests run: | export PATH="/opt/rocm/bin:$PATH" diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 9db708f95..6e40fb118 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -2,78 +2,81 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=.pin/constraints-cuda-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in +# pip-compile --config=pyproject.toml --output-file=.pin/constraints-cuda-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 -absl-py==1.4.0 +absl-py==2.0.0 # via tensorboard -accelerate==0.19.0 +accelerate==0.23.0 # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.4 +aiohttp==3.8.6 # via # datasets # fsspec aiosignal==1.3.1 # via aiohttp +annotated-types==0.6.0 + # via pydantic antlr4-python3-runtime==4.9.3 # via omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via giving -async-timeout==4.0.2 +async-timeout==4.0.3 # via aiohttp attrs==23.1.0 # via aiohttp cachetools==5.3.1 # via google-auth -certifi==2023.5.7 +certifi==2023.7.22 # via requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # aiohttp # requests -cmake==3.26.3 - # via triton codefind==0.1.3 # via ptera -datasets==2.12.0 +datasets==2.14.5 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate deepspeed==0.8.3 - # via -r benchmarks/accelerate_opt/requirements.in -dill==0.3.6 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/rwkv/requirements.in +dill==0.3.7 # via # datasets # evaluate # multiprocess -docker==6.1.2 +docker==6.1.3 # via torchx docstring-parser==0.8.1 # via torchx -evaluate==0.4.0 +evaluate==0.4.1 # via -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via varname -fbgemm-gpu==0.4.1 +fbgemm-gpu==0.5.0+cu118 # via torchrec -filelock==3.12.0 +filelock==3.12.4 # via # huggingface-hub # torch # torchx # transformers # triton -frozenlist==1.3.3 +frozenlist==1.4.0 # via # aiohttp # aiosignal -fsspec[http]==2023.5.0 +fsspec[http]==2023.6.0 # via # datasets # evaluate # huggingface-hub # pytorch-lightning + # torch # torchx future==0.18.3 # via -r benchmarks/dlrm/requirements.in @@ -81,7 +84,7 @@ giving==0.4.2 # via # ptera # voir -google-auth==2.19.0 +google-auth==2.23.3 # via # google-auth-oauthlib # tensorboard @@ -89,39 +92,39 @@ google-auth-oauthlib==1.0.0 # via tensorboard graphviz==0.20.1 # via torchviz -grpcio==1.54.2 +grpcio==1.59.0 # via tensorboard hjson==3.1.0 # via deepspeed -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via # -r benchmarks/timm/requirements.in + # accelerate # datasets # evaluate + # tokenizers # transformers idna==3.4 # via # requests # yarl -importlib-metadata==6.6.0 +importlib-metadata==6.8.0 # via # markdown # torchx -iopath==0.1.10 - # via torchrec jinja2==3.1.2 # via torch -joblib==1.2.0 +joblib==1.3.2 # via scikit-learn -lightning-utilities==0.8.0 - # via pytorch-lightning -lit==16.0.5 - # via triton -markdown==3.4.3 +lightning-utilities==0.9.0 + # via + # pytorch-lightning + # torchmetrics +markdown==3.5 # via tensorboard -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # jinja2 # werkzeug @@ -133,7 +136,7 @@ multidict==6.0.4 # via # aiohttp # yarl -multiprocess==0.70.14 +multiprocess==0.70.15 # via # datasets # evaluate @@ -141,13 +144,13 @@ mypy-extensions==1.0.0 # via typing-inspect networkx==3.1 # via torch -ninja==1.11.1 +ninja==1.11.1.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.24.3 +numpy==1.26.1 # via - # -r benchmarks/dlrm/requirements.in + # -r benchmarks/stargan/requirements.in # -r benchmarks/super-slomo/requirements.in # accelerate # datasets @@ -168,13 +171,13 @@ oauthlib==3.2.2 # via requests-oauthlib omegaconf==2.3.0 # via voir -onnx==1.14.0 +onnx==1.14.1 # via -r benchmarks/dlrm/requirements.in -opencv-python==4.7.0.72 +opencv-python==4.8.1.78 # via -r benchmarks/super-slomo/requirements.in ovld==0.3.2 # via voir -packaging==23.1 +packaging==23.2 # via # accelerate # datasets @@ -186,20 +189,17 @@ packaging==23.1 # pytorch-lightning # torchmetrics # transformers -pandas==2.0.2 +pandas==2.1.1 # via # datasets # evaluate - # torchrec -pillow==9.5.0 +pillow==10.1.0 # via torchvision -portalocker==2.7.0 - # via iopath -protobuf==4.23.2 +protobuf==4.24.4 # via # onnx # tensorboard -psutil==5.9.5 +psutil==5.9.6 # via # accelerate # deepspeed @@ -207,7 +207,7 @@ ptera==1.4.1 # via voir py-cpuinfo==9.0.0 # via deepspeed -pyarrow==12.0.0 +pyarrow==13.0.0 # via datasets pyasn1==0.5.0 # via @@ -215,15 +215,17 @@ pyasn1==0.5.0 # rsa pyasn1-modules==0.3.0 # via google-auth -pydantic==1.10.8 +pydantic==2.4.2 # via deepspeed +pydantic-core==2.10.1 + # via pydantic pydot==1.4.2 # via -r benchmarks/dlrm/requirements.in -pygments==2.15.1 +pygments==2.16.1 # via rich pynvml==11.5.0 # via voir -pyparsing==3.0.9 +pyparsing==3.1.1 # via pydot pyre-extensions==0.0.30 # via torchx @@ -231,9 +233,9 @@ python-dateutil==2.8.2 # via pandas pytorch-lightning==1.9.5 # via -r benchmarks/rwkv/requirements.in -pytz==2023.3 +pytz==2023.3.post1 # via pandas -pyyaml==6.0 +pyyaml==6.0.1 # via # -r benchmarks/timm/requirements.in # accelerate @@ -245,7 +247,7 @@ pyyaml==6.0 # transformers reactivex==4.0.4 # via giving -regex==2023.5.5 +regex==2023.10.3 # via transformers requests==2.31.0 # via @@ -262,44 +264,42 @@ requests==2.31.0 requests-oauthlib==1.3.1 # via google-auth-oauthlib responses==0.18.0 - # via - # datasets - # evaluate -rich==13.3.5 + # via evaluate +rich==13.6.0 # via # -r benchmarks/accelerate_opt/requirements.in # voir rsa==4.9 # via google-auth -safetensors==0.3.1 - # via -r benchmarks/timm/requirements.in -scikit-learn==1.2.2 +safetensors==0.4.0 + # via + # -r benchmarks/timm/requirements.in + # transformers +scikit-learn==1.3.1 # via -r benchmarks/dlrm/requirements.in -scipy==1.10.1 +scipy==1.11.3 # via scikit-learn six==1.16.0 # via # asttokens - # google-auth # python-dateutil + # tensorboard sympy==1.12 # via torch tabulate==0.9.0 - # via - # torchrec - # torchx -tensorboard==2.13.0 + # via torchx +tensorboard==2.14.1 # via -r benchmarks/dlrm/requirements.in -tensorboard-data-server==0.7.0 +tensorboard-data-server==0.7.1 # via tensorboard -threadpoolctl==3.1.0 +threadpoolctl==3.2.0 # via scikit-learn -tokenizers==0.13.3 +tokenizers==0.14.1 # via transformers -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via - # -r benchmarks/dlrm/requirements.in - # -r benchmarks/timm/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/torchvision/requirements.in # accelerate # deepspeed # pytorch-lightning @@ -307,16 +307,15 @@ torch==2.0.1+cu118 # torchmetrics # torchvision # torchviz - # triton -torchaudio==2.0.2+cu118 +torchaudio==2.1.0+cu118 # via -r benchmarks/accelerate_opt/requirements.in -torchmetrics==0.11.4 +torchmetrics==1.0.3 # via # pytorch-lightning # torchrec -torchrec==0.4.0 +torchrec==0.5.0+cu118 # via -r benchmarks/dlrm/requirements.in -torchvision==0.15.2+cu118 +torchvision==0.16.0+cu118 # via # -r benchmarks/super-slomo/requirements.in # -r benchmarks/torchvision/requirements.in @@ -324,31 +323,30 @@ torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 # via -r benchmarks/dlrm/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via - # -r benchmarks/dlrm/requirements.in + # -r benchmarks/super-slomo/requirements.in # -r benchmarks/torchvision/requirements.in # datasets # deepspeed # evaluate # huggingface-hub - # iopath # pytorch-lightning # torchrec # transformers -transformers==4.29.2 +transformers==4.34.0 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/huggingface/requirements.in -triton==2.0.0 +triton==2.1.0 # via torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # huggingface-hub - # iopath # lightning-utilities # onnx # pydantic + # pydantic-core # pyre-extensions # pytorch-lightning # reactivex @@ -358,10 +356,9 @@ typing-inspect==0.9.0 # via pyre-extensions tzdata==2023.3 # via pandas -urllib3==1.26.16 +urllib3==1.26.17 # via # docker - # google-auth # requests # responses # torchx @@ -369,21 +366,19 @@ varname==0.10.0 # via giving voir==0.2.10 # via - # -r benchmarks/dlrm/requirements.in - # -r benchmarks/timm/requirements.in -websocket-client==1.5.2 + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/torchvision/requirements.in +websocket-client==1.6.4 # via docker -werkzeug==2.3.4 - # via tensorboard -wheel==0.40.0 +werkzeug==3.0.0 # via tensorboard -xxhash==3.2.0 +xxhash==3.4.1 # via # datasets # evaluate yarl==1.9.2 # via aiohttp -zipp==3.15.0 +zipp==3.17.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/benchmarks/accelerate_opt/requirements.cuda.txt b/benchmarks/accelerate_opt/requirements.cuda.txt index 1f4194c6b..3eb4a0dba 100644 --- a/benchmarks/accelerate_opt/requirements.cuda.txt +++ b/benchmarks/accelerate_opt/requirements.cuda.txt @@ -2,13 +2,13 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/accelerate_opt/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-opt.txt benchmarks/accelerate_opt/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-opt.txt benchmarks/accelerate_opt/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 -accelerate==0.19.0 +accelerate==0.23.0 # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.4 +aiohttp==3.8.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -17,15 +17,19 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp +annotated-types==0.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -async-timeout==4.0.2 +async-timeout==4.0.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp @@ -33,59 +37,56 @@ attrs==23.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==2.12.0 +datasets==2.14.5 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate deepspeed==0.8.3 # via -r benchmarks/accelerate_opt/requirements.in -dill==0.3.6 +dill==0.3.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate # multiprocess -evaluate==0.4.0 +evaluate==0.4.1 # via -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch # transformers # triton -frozenlist==1.3.3 +frozenlist==1.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.5.0 +fsspec[http]==2023.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate # huggingface-hub + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -95,11 +96,13 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate # datasets # evaluate + # tokenizers # transformers idna==3.4 # via @@ -110,15 +113,11 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lit==16.0.5 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -135,7 +134,7 @@ multidict==6.0.4 # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # yarl -multiprocess==0.70.14 +multiprocess==0.70.15 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -144,11 +143,11 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -ninja==1.11.1 +ninja==1.11.1.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -167,7 +166,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -176,16 +175,16 @@ packaging==23.1 # evaluate # huggingface-hub # transformers -pandas==2.0.2 +pandas==2.1.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision -psutil==5.9.5 +psutil==5.9.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -198,15 +197,19 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pyarrow==12.0.0 +pyarrow==13.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -pydantic==1.10.8 +pydantic==2.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pygments==2.15.1 +pydantic-core==2.10.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pydantic +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -218,11 +221,11 @@ python-dateutil==2.8.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -pytz==2023.3 +pytz==2023.3.post1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -234,7 +237,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -regex==2023.5.5 +regex==2023.10.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers @@ -251,12 +254,15 @@ requests==2.31.0 responses==0.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # datasets # evaluate -rich==13.3.5 +rich==13.6.0 # via # -r benchmarks/accelerate_opt/requirements.in # voir +safetensors==0.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # transformers six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -266,23 +272,22 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -tokenizers==0.13.3 +tokenizers==0.14.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via # -r benchmarks/accelerate_opt/requirements.in # accelerate # deepspeed # torchaudio # torchvision - # triton -torchaudio==2.0.2+cu118 +torchaudio==2.1.0+cu118 # via -r benchmarks/accelerate_opt/requirements.in -torchvision==0.15.2+cu118 +torchvision==0.16.0+cu118 # via -r benchmarks/accelerate_opt/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -290,24 +295,25 @@ tqdm==4.65.0 # evaluate # huggingface-hub # transformers -transformers==4.29.2 +transformers==4.34.0 # via -r benchmarks/accelerate_opt/requirements.in -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # pydantic + # pydantic-core # reactivex # torch tzdata==2023.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -318,7 +324,7 @@ varname==0.10.0 # giving voir==0.2.10 # via -r benchmarks/accelerate_opt/requirements.in -xxhash==3.2.0 +xxhash==3.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets diff --git a/benchmarks/dlrm/requirements.cuda.txt b/benchmarks/dlrm/requirements.cuda.txt index 98abcb0cf..67c8619f1 100644 --- a/benchmarks/dlrm/requirements.cuda.txt +++ b/benchmarks/dlrm/requirements.cuda.txt @@ -2,11 +2,11 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/dlrm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-dlrm.txt benchmarks/dlrm/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-dlrm.txt benchmarks/dlrm/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 -absl-py==1.4.0 +absl-py==2.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard @@ -14,7 +14,7 @@ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving @@ -22,23 +22,19 @@ cachetools==5.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # google-auth -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -docker==6.1.2 +docker==6.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx @@ -50,19 +46,20 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -fbgemm-gpu==0.4.1 +fbgemm-gpu==0.5.0+cu118 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchrec -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # torchx # triton -fsspec==2023.5.0 +fsspec==2023.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # torch # torchx future==0.18.3 # via -r benchmarks/dlrm/requirements.in @@ -71,7 +68,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -google-auth==2.19.0 +google-auth==2.23.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # google-auth-oauthlib @@ -84,7 +81,7 @@ graphviz==0.20.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchviz -grpcio==1.54.2 +grpcio==1.59.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard @@ -92,36 +89,32 @@ idna==3.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -importlib-metadata==6.6.0 +importlib-metadata==6.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # markdown # torchx -iopath==0.1.10 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # torchrec jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -joblib==1.2.0 +joblib==1.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn -lit==16.0.5 +lightning-utilities==0.9.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown==3.4.3 + # torchmetrics +markdown==3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -142,11 +135,10 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/dlrm/requirements.in # onnx - # pandas # scikit-learn # scipy # tensorboard @@ -159,26 +151,19 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -onnx==1.14.0 +onnx==1.14.1 # via -r benchmarks/dlrm/requirements.in ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # docker + # lightning-utilities # torchmetrics -pandas==2.0.2 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # torchrec -portalocker==2.7.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # iopath -protobuf==4.23.2 +protobuf==4.24.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # onnx @@ -198,7 +183,7 @@ pyasn1-modules==0.3.0 # google-auth pydot==1.4.2 # via -r benchmarks/dlrm/requirements.in -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -206,7 +191,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyparsing==3.0.9 +pyparsing==3.1.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pydot @@ -214,15 +199,7 @@ pyre-extensions==0.0.30 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx -python-dateutil==2.8.2 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # pandas -pytz==2023.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # pandas -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf @@ -241,7 +218,7 @@ requests-oauthlib==1.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # google-auth-oauthlib -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -249,9 +226,9 @@ rsa==4.9 # via # -c .pin/../.pin/constraints-cuda-torch.txt # google-auth -scikit-learn==1.2.2 +scikit-learn==1.3.1 # via -r benchmarks/dlrm/requirements.in -scipy==1.10.1 +scipy==1.11.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn @@ -259,8 +236,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens - # google-auth - # python-dateutil + # tensorboard sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -268,47 +244,44 @@ sympy==1.12 tabulate==0.9.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # torchrec # torchx -tensorboard==2.13.0 +tensorboard==2.14.1 # via -r benchmarks/dlrm/requirements.in -tensorboard-data-server==0.7.0 +tensorboard-data-server==0.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard -threadpoolctl==3.1.0 +threadpoolctl==3.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via # -r benchmarks/dlrm/requirements.in # torchmetrics # torchviz - # triton -torchmetrics==0.11.4 +torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchrec -torchrec==0.4.0 +torchrec==0.5.0+cu118 # via -r benchmarks/dlrm/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 # via -r benchmarks/dlrm/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via # -r benchmarks/dlrm/requirements.in - # iopath # torchrec -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # iopath + # lightning-utilities # onnx # pyre-extensions # reactivex @@ -318,15 +291,10 @@ typing-inspect==0.9.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pyre-extensions -tzdata==2023.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # pandas -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # docker - # google-auth # requests # torchx varname==0.10.0 @@ -335,19 +303,15 @@ varname==0.10.0 # giving voir==0.2.10 # via -r benchmarks/dlrm/requirements.in -websocket-client==1.5.2 +websocket-client==1.6.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # docker -werkzeug==2.3.4 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # tensorboard -wheel==0.40.0 +werkzeug==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard -zipp==3.15.0 +zipp==3.17.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # importlib-metadata diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index 70053a636..8d9e91fa5 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/huggingface/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-hf.txt benchmarks/huggingface/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-hf.txt benchmarks/huggingface/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 @@ -10,22 +10,18 @@ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -34,25 +30,27 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch # transformers # triton -fsspec==2023.5.0 +fsspec==2023.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # tokenizers # transformers idna==3.4 # via @@ -62,15 +60,11 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lit==16.0.5 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -86,7 +80,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers @@ -98,7 +92,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub @@ -107,7 +101,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -115,7 +109,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub @@ -125,7 +119,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -regex==2023.5.5 +regex==2023.10.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers @@ -134,10 +128,14 @@ requests==2.31.0 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # transformers -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir +safetensors==0.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # transformers six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -146,32 +144,30 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -tokenizers==0.13.3 +tokenizers==0.14.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers -torch==2.0.1+cu118 - # via - # -r benchmarks/huggingface/requirements.in - # triton -tqdm==4.65.0 +torch==2.1.0+cu118 + # via -r benchmarks/huggingface/requirements.in +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # transformers -transformers==4.29.2 +transformers==4.34.0 # via -r benchmarks/huggingface/requirements.in -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/rwkv/requirements.cuda.txt b/benchmarks/rwkv/requirements.cuda.txt index 2c0c866e4..4a52cb3c3 100644 --- a/benchmarks/rwkv/requirements.cuda.txt +++ b/benchmarks/rwkv/requirements.cuda.txt @@ -2,11 +2,11 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/rwkv/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-rwkv.txt benchmarks/rwkv/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-rwkv.txt benchmarks/rwkv/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 -aiohttp==3.8.4 +aiohttp==3.8.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # fsspec @@ -14,15 +14,19 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp +annotated-types==0.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -async-timeout==4.0.2 +async-timeout==4.0.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp @@ -30,19 +34,15 @@ attrs==23.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -53,20 +53,21 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -frozenlist==1.3.3 +frozenlist==1.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.5.0 +fsspec[http]==2023.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pytorch-lightning + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -85,19 +86,16 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lightning-utilities==0.8.0 +lightning-utilities==0.9.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pytorch-lightning -lit==16.0.5 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown-it-py==2.2.0 + # torchmetrics +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -118,11 +116,11 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -ninja==1.11.1 +ninja==1.11.1.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed @@ -136,14 +134,14 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed # lightning-utilities # pytorch-lightning # torchmetrics -psutil==5.9.5 +psutil==5.9.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed @@ -155,11 +153,15 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pydantic==1.10.8 +pydantic==2.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pygments==2.15.1 +pydantic-core==2.10.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pydantic +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -169,7 +171,7 @@ pynvml==11.5.0 # voir pytorch-lightning==1.9.5 # via -r benchmarks/rwkv/requirements.in -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf @@ -182,7 +184,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # fsspec -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -194,35 +196,35 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning # torchmetrics - # triton -torchmetrics==0.11.4 +torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pytorch-lightning -tqdm==4.65.0 +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed # pytorch-lightning -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # lightning-utilities # pydantic + # pydantic-core # pytorch-lightning # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -230,7 +232,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.9 +voir==0.2.10 # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/stargan/requirements.cuda.txt b/benchmarks/stargan/requirements.cuda.txt index 16485fb78..a84334b2e 100644 --- a/benchmarks/stargan/requirements.cuda.txt +++ b/benchmarks/stargan/requirements.cuda.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/stargan/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-stargan.txt benchmarks/stargan/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-stargan.txt benchmarks/stargan/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 @@ -10,22 +10,18 @@ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -34,11 +30,15 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -52,15 +52,11 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lit==16.0.5 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -76,7 +72,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/stargan/requirements.in # torchvision @@ -88,7 +84,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision @@ -96,7 +92,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -104,7 +100,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf @@ -116,7 +112,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -128,23 +124,22 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via # -r benchmarks/stargan/requirements.in # torchvision - # triton -torchvision==0.15.2+cu118 +torchvision==0.16.0+cu118 # via -r benchmarks/stargan/requirements.in -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/super-slomo/requirements.cuda.txt b/benchmarks/super-slomo/requirements.cuda.txt index 40caddc4d..4b3196fec 100644 --- a/benchmarks/super-slomo/requirements.cuda.txt +++ b/benchmarks/super-slomo/requirements.cuda.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/super-slomo/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-super-slomo.txt benchmarks/super-slomo/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-super-slomo.txt benchmarks/super-slomo/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 @@ -10,22 +10,18 @@ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -34,11 +30,15 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -52,15 +52,11 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lit==16.0.5 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -76,7 +72,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/super-slomo/requirements.in # opencv-python @@ -85,13 +81,13 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opencv-python==4.7.0.72 +opencv-python==4.8.1.78 # via -r benchmarks/super-slomo/requirements.in ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision @@ -99,7 +95,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -107,7 +103,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf @@ -119,7 +115,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -131,25 +127,24 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via # -r benchmarks/super-slomo/requirements.in # torchvision - # triton -torchvision==0.15.2+cu118 +torchvision==0.16.0+cu118 # via -r benchmarks/super-slomo/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via -r benchmarks/super-slomo/requirements.in -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index e23485257..577888870 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/timm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-timm.txt benchmarks/timm/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-timm.txt benchmarks/timm/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 @@ -10,22 +10,18 @@ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -34,22 +30,23 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch # triton -fsspec==2023.5.0 +fsspec==2023.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via -r benchmarks/timm/requirements.in idna==3.4 # via @@ -59,15 +56,11 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lit==16.0.5 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -83,7 +76,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision @@ -95,11 +88,11 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision @@ -107,7 +100,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -115,7 +108,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyyaml==6.0 +pyyaml==6.0.1 # via # -r benchmarks/timm/requirements.in # huggingface-hub @@ -129,11 +122,11 @@ requests==2.31.0 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -safetensors==0.3.1 +safetensors==0.4.0 # via -r benchmarks/timm/requirements.in six==1.16.0 # via @@ -143,28 +136,27 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via # -r benchmarks/timm/requirements.in # torchvision - # triton -torchvision==0.15.2+cu118 +torchvision==0.16.0+cu118 # via -r benchmarks/timm/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index aa60bdcfb..ea1d5e2c8 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/torchvision/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-torchvision.txt benchmarks/torchvision/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/torchvision/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-torchvision.txt benchmarks/torchvision/requirements.in # --extra-index-url https://download.pytorch.org/whl/cu118 @@ -10,22 +10,18 @@ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -cmake==3.26.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -34,11 +30,15 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -52,15 +52,11 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lit==16.0.5 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # triton -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -76,7 +72,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision @@ -88,7 +84,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision @@ -96,7 +92,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -104,7 +100,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf @@ -116,7 +112,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -128,25 +124,24 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.0.1+cu118 +torch==2.1.0+cu118 # via # -r benchmarks/torchvision/requirements.in # torchvision - # triton -torchvision==0.15.2+cu118 +torchvision==0.16.0+cu118 # via -r benchmarks/torchvision/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via -r benchmarks/torchvision/requirements.in -triton==2.0.0 +triton==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/constraints/rocm.txt b/constraints/rocm.txt index 8d434fa36..dd6ef8a59 100644 --- a/constraints/rocm.txt +++ b/constraints/rocm.txt @@ -1,2 +1,2 @@ ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ deepspeed==0.8.3 diff --git a/milabench/_version.py b/milabench/_version.py index 5a191cf45..1d65f2645 100644 --- a/milabench/_version.py +++ b/milabench/_version.py @@ -1,5 +1,5 @@ """This file is generated, do not modify""" -__tag__ = "cce4c99" -__commit__ = "cce4c994559481334f9c15b01caa2d4e7b0cf82a" -__date__ = "2023-07-04 17:33:49 -0400" +__tag__ = "v0.0.6-23-ga210276" +__commit__ = "a2102768d7a3a8ce81d9599daa9b40ca412e7121" +__date__ = "2023-07-26 12:25:40 -0400" diff --git a/milabench/pack.py b/milabench/pack.py index 2656fec55..a1bf5c3cb 100644 --- a/milabench/pack.py +++ b/milabench/pack.py @@ -376,7 +376,7 @@ async def pin( ivar = self.config.get("install_variant", None) if ivar == "unpinned": raise Exception("Cannot pin the 'unpinned' variant.") - assert self.phase == "pin" + # assert self.phase == "pin" for base_reqs, reqs in self.requirements_map().items(): if not base_reqs.exists(): raise FileNotFoundError( From b732226c4803b615f5b0f75a4db96990128d6360 Mon Sep 17 00:00:00 2001 From: Pierre Delaunay Date: Tue, 17 Oct 2023 13:07:00 -0400 Subject: [PATCH 02/36] update rocm --- .pin/constraints-rocm-torch.txt | 185 +++++++++--------- .../accelerate_opt/requirements.rocm.txt | 103 +++++----- benchmarks/dlrm/requirements.rocm.txt | 128 +++++------- benchmarks/huggingface/requirements.rocm.txt | 56 +++--- benchmarks/rwkv/requirements.rocm.txt | 71 ++++--- benchmarks/stargan/requirements.rocm.txt | 44 +++-- benchmarks/super-slomo/requirements.rocm.txt | 48 ++--- benchmarks/timm/requirements.rocm.txt | 51 ++--- benchmarks/torchvision/requirements.rocm.txt | 46 +++-- 9 files changed, 376 insertions(+), 356 deletions(-) diff --git a/.pin/constraints-rocm-torch.txt b/.pin/constraints-rocm-torch.txt index efd674cce..5f1ce62c1 100644 --- a/.pin/constraints-rocm-torch.txt +++ b/.pin/constraints-rocm-torch.txt @@ -2,78 +2,83 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=.pin/constraints-rocm-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in +# pip-compile --config=pyproject.toml --output-file=.pin/constraints-rocm-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ -absl-py==1.4.0 +absl-py==2.0.0 # via tensorboard -accelerate==0.19.0 +accelerate==0.23.0 # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.4 +aiohttp==3.8.6 # via # datasets # fsspec aiosignal==1.3.1 # via aiohttp +annotated-types==0.6.0 + # via pydantic antlr4-python3-runtime==4.9.3 # via omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via giving -async-timeout==4.0.2 +async-timeout==4.0.3 # via aiohttp attrs==23.1.0 # via aiohttp cachetools==5.3.1 # via google-auth -certifi==2023.5.7 +certifi==2023.7.22 # via requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # aiohttp # requests -cmake==3.26.3 +cmake==3.27.7 # via pytorch-triton-rocm codefind==0.1.3 # via ptera -datasets==2.12.0 +datasets==2.14.5 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate deepspeed==0.8.3 - # via -r benchmarks/accelerate_opt/requirements.in -dill==0.3.6 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/rwkv/requirements.in +dill==0.3.7 # via # datasets # evaluate # multiprocess -docker==6.1.2 +docker==6.1.3 # via torchx docstring-parser==0.8.1 # via torchx -evaluate==0.4.0 +evaluate==0.4.1 # via -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via varname -fbgemm-gpu==0.4.1 +fbgemm-gpu==0.5.0 # via torchrec -filelock==3.12.0 +filelock==3.12.4 # via # huggingface-hub # pytorch-triton-rocm # torch # torchx # transformers -frozenlist==1.3.3 +frozenlist==1.4.0 # via # aiohttp # aiosignal -fsspec[http]==2023.5.0 +fsspec[http]==2023.6.0 # via # datasets # evaluate # huggingface-hub # pytorch-lightning + # torch # torchx future==0.18.3 # via -r benchmarks/dlrm/requirements.in @@ -81,7 +86,7 @@ giving==0.4.2 # via # ptera # voir -google-auth==2.19.0 +google-auth==2.23.3 # via # google-auth-oauthlib # tensorboard @@ -89,39 +94,41 @@ google-auth-oauthlib==1.0.0 # via tensorboard graphviz==0.20.1 # via torchviz -grpcio==1.54.2 +grpcio==1.59.0 # via tensorboard hjson==3.1.0 # via deepspeed -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via # -r benchmarks/timm/requirements.in + # accelerate # datasets # evaluate + # tokenizers # transformers idna==3.4 # via # requests # yarl -importlib-metadata==6.6.0 +importlib-metadata==6.8.0 # via # markdown # torchx -iopath==0.1.10 - # via torchrec jinja2==3.1.2 # via torch -joblib==1.2.0 +joblib==1.3.2 # via scikit-learn -lightning-utilities==0.8.0 - # via pytorch-lightning -lit==16.0.5 +lightning-utilities==0.9.0 + # via + # pytorch-lightning + # torchmetrics +lit==17.0.2 # via pytorch-triton-rocm -markdown==3.4.3 +markdown==3.5 # via tensorboard -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # jinja2 # werkzeug @@ -133,7 +140,7 @@ multidict==6.0.4 # via # aiohttp # yarl -multiprocess==0.70.14 +multiprocess==0.70.15 # via # datasets # evaluate @@ -141,18 +148,19 @@ mypy-extensions==1.0.0 # via typing-inspect networkx==3.1 # via torch -ninja==1.11.1 +ninja==1.11.1.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/dlrm/requirements.in - # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/stargan/requirements.in # accelerate # datasets # deepspeed # evaluate + # fbgemm-gpu # onnx # opencv-python # pandas @@ -168,13 +176,13 @@ oauthlib==3.2.2 # via requests-oauthlib omegaconf==2.3.0 # via voir -onnx==1.14.0 +onnx==1.14.1 # via -r benchmarks/dlrm/requirements.in -opencv-python==4.7.0.72 +opencv-python==4.8.1.78 # via -r benchmarks/super-slomo/requirements.in ovld==0.3.2 # via voir -packaging==23.1 +packaging==23.2 # via # accelerate # datasets @@ -186,20 +194,17 @@ packaging==23.1 # pytorch-lightning # torchmetrics # transformers -pandas==2.0.2 +pandas==2.1.1 # via # datasets # evaluate - # torchrec -pillow==9.5.0 +pillow==10.1.0 # via torchvision -portalocker==2.7.0 - # via iopath -protobuf==4.23.2 +protobuf==4.24.4 # via # onnx # tensorboard -psutil==5.9.5 +psutil==5.9.6 # via # accelerate # deepspeed @@ -207,7 +212,7 @@ ptera==1.4.1 # via voir py-cpuinfo==9.0.0 # via deepspeed -pyarrow==12.0.0 +pyarrow==13.0.0 # via datasets pyasn1==0.5.0 # via @@ -215,15 +220,17 @@ pyasn1==0.5.0 # rsa pyasn1-modules==0.3.0 # via google-auth -pydantic==1.10.8 +pydantic==2.4.2 # via deepspeed +pydantic-core==2.10.1 + # via pydantic pydot==1.4.2 # via -r benchmarks/dlrm/requirements.in -pygments==2.15.1 +pygments==2.16.1 # via rich pynvml==11.5.0 # via voir -pyparsing==3.0.9 +pyparsing==3.1.1 # via pydot pyre-extensions==0.0.30 # via torchx @@ -231,11 +238,11 @@ python-dateutil==2.8.2 # via pandas pytorch-lightning==1.9.5 # via -r benchmarks/rwkv/requirements.in -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via torch -pytz==2023.3 +pytz==2023.3.post1 # via pandas -pyyaml==6.0 +pyyaml==6.0.1 # via # -r benchmarks/timm/requirements.in # accelerate @@ -247,7 +254,7 @@ pyyaml==6.0 # transformers reactivex==4.0.4 # via giving -regex==2023.5.5 +regex==2023.10.3 # via transformers requests==2.31.0 # via @@ -264,43 +271,41 @@ requests==2.31.0 requests-oauthlib==1.3.1 # via google-auth-oauthlib responses==0.18.0 - # via - # datasets - # evaluate -rich==13.3.5 + # via evaluate +rich==13.6.0 # via # -r benchmarks/accelerate_opt/requirements.in # voir rsa==4.9 # via google-auth -safetensors==0.3.1 - # via -r benchmarks/timm/requirements.in -scikit-learn==1.2.2 +safetensors==0.4.0 + # via + # -r benchmarks/timm/requirements.in + # transformers +scikit-learn==1.3.1 # via -r benchmarks/dlrm/requirements.in -scipy==1.10.1 +scipy==1.11.3 # via scikit-learn six==1.16.0 # via # asttokens - # google-auth # python-dateutil + # tensorboard sympy==1.12 # via torch tabulate==0.9.0 - # via - # torchrec - # torchx -tensorboard==2.13.0 + # via torchx +tensorboard==2.14.1 # via -r benchmarks/dlrm/requirements.in -tensorboard-data-server==0.7.0 +tensorboard-data-server==0.7.1 # via tensorboard -threadpoolctl==3.1.0 +threadpoolctl==3.2.0 # via scikit-learn -tokenizers==0.13.3 +tokenizers==0.14.1 # via transformers -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via - # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/stargan/requirements.in # -r benchmarks/torchvision/requirements.in # accelerate # deepspeed @@ -310,45 +315,44 @@ torch==2.0.1+rocm5.4.2 # torchmetrics # torchvision # torchviz -torchaudio==2.0.2+rocm5.4.2 +torchaudio==2.1.0+rocm5.6 # via -r benchmarks/accelerate_opt/requirements.in -torchmetrics==0.11.4 +torchmetrics==1.0.3 # via # pytorch-lightning # torchrec -torchrec==0.4.0 +torchrec==0.5.0 # via -r benchmarks/dlrm/requirements.in -torchvision==0.15.2+rocm5.4.2 +torchvision==0.16.0+rocm5.6 # via - # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/stargan/requirements.in # -r benchmarks/torchvision/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 # via -r benchmarks/dlrm/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via - # -r benchmarks/dlrm/requirements.in + # -r benchmarks/super-slomo/requirements.in # -r benchmarks/torchvision/requirements.in # datasets # deepspeed # evaluate # huggingface-hub - # iopath # pytorch-lightning # torchrec # transformers -transformers==4.29.2 +transformers==4.34.0 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/huggingface/requirements.in -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # huggingface-hub - # iopath # lightning-utilities # onnx # pydantic + # pydantic-core # pyre-extensions # pytorch-lightning # reactivex @@ -358,10 +362,9 @@ typing-inspect==0.9.0 # via pyre-extensions tzdata==2023.3 # via pandas -urllib3==1.26.16 +urllib3==1.26.17 # via # docker - # google-auth # requests # responses # torchx @@ -369,21 +372,19 @@ varname==0.10.0 # via giving voir==0.2.10 # via - # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/stargan/requirements.in # -r benchmarks/torchvision/requirements.in -websocket-client==1.5.2 +websocket-client==1.6.4 # via docker -werkzeug==2.3.4 - # via tensorboard -wheel==0.40.0 +werkzeug==3.0.0 # via tensorboard -xxhash==3.2.0 +xxhash==3.4.1 # via # datasets # evaluate yarl==1.9.2 # via aiohttp -zipp==3.15.0 +zipp==3.17.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/benchmarks/accelerate_opt/requirements.rocm.txt b/benchmarks/accelerate_opt/requirements.rocm.txt index 09cbce64f..4bddec1f0 100644 --- a/benchmarks/accelerate_opt/requirements.rocm.txt +++ b/benchmarks/accelerate_opt/requirements.rocm.txt @@ -2,13 +2,13 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/accelerate_opt/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-opt.txt benchmarks/accelerate_opt/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-opt.txt benchmarks/accelerate_opt/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ -accelerate==0.19.0 +accelerate==0.23.0 # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.4 +aiohttp==3.8.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets @@ -17,15 +17,19 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp +annotated-types==0.6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -async-timeout==4.0.2 +async-timeout==4.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp @@ -33,16 +37,16 @@ attrs==23.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -50,42 +54,43 @@ codefind==0.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera -datasets==2.12.0 +datasets==2.14.5 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate deepspeed==0.8.3 # via -r benchmarks/accelerate_opt/requirements.in -dill==0.3.6 +dill==0.3.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate # multiprocess -evaluate==0.4.0 +evaluate==0.4.1 # via -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pytorch-triton-rocm # torch # transformers -frozenlist==1.3.3 +frozenlist==1.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.5.0 +fsspec[http]==2023.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate # huggingface-hub + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -95,11 +100,13 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt + # accelerate # datasets # evaluate + # tokenizers # transformers idna==3.4 # via @@ -110,15 +117,15 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==16.0.5 +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -135,7 +142,7 @@ multidict==6.0.4 # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # yarl -multiprocess==0.70.14 +multiprocess==0.70.15 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets @@ -144,11 +151,11 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -ninja==1.11.1 +ninja==1.11.1.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate @@ -167,7 +174,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate @@ -176,16 +183,16 @@ packaging==23.1 # evaluate # huggingface-hub # transformers -pandas==2.0.2 +pandas==2.1.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision -psutil==5.9.5 +psutil==5.9.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate @@ -198,15 +205,19 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pyarrow==12.0.0 +pyarrow==13.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets -pydantic==1.10.8 +pydantic==2.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pygments==2.15.1 +pydantic-core==2.10.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pydantic +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -218,15 +229,15 @@ python-dateutil==2.8.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pandas -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pytz==2023.3 +pytz==2023.3.post1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pandas -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate @@ -238,7 +249,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -regex==2023.5.5 +regex==2023.10.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers @@ -255,12 +266,15 @@ requests==2.31.0 responses==0.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # datasets # evaluate -rich==13.3.5 +rich==13.6.0 # via # -r benchmarks/accelerate_opt/requirements.in # voir +safetensors==0.4.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # transformers six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -270,11 +284,11 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -tokenizers==0.13.3 +tokenizers==0.14.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/accelerate_opt/requirements.in # accelerate @@ -282,11 +296,11 @@ torch==2.0.1+rocm5.4.2 # pytorch-triton-rocm # torchaudio # torchvision -torchaudio==2.0.2+rocm5.4.2 +torchaudio==2.1.0+rocm5.6 # via -r benchmarks/accelerate_opt/requirements.in -torchvision==0.15.2+rocm5.4.2 +torchvision==0.16.0+rocm5.6 # via -r benchmarks/accelerate_opt/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets @@ -294,20 +308,21 @@ tqdm==4.65.0 # evaluate # huggingface-hub # transformers -transformers==4.29.2 +transformers==4.34.0 # via -r benchmarks/accelerate_opt/requirements.in -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pydantic + # pydantic-core # reactivex # torch tzdata==2023.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pandas -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -318,7 +333,7 @@ varname==0.10.0 # giving voir==0.2.10 # via -r benchmarks/accelerate_opt/requirements.in -xxhash==3.2.0 +xxhash==3.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets diff --git a/benchmarks/dlrm/requirements.rocm.txt b/benchmarks/dlrm/requirements.rocm.txt index a8d29307e..1d040803f 100644 --- a/benchmarks/dlrm/requirements.rocm.txt +++ b/benchmarks/dlrm/requirements.rocm.txt @@ -2,11 +2,11 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/dlrm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-dlrm.txt benchmarks/dlrm/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-dlrm.txt benchmarks/dlrm/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ -absl-py==1.4.0 +absl-py==2.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard @@ -14,7 +14,7 @@ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving @@ -22,15 +22,15 @@ cachetools==5.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # google-auth -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -38,7 +38,7 @@ codefind==0.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera -docker==6.1.2 +docker==6.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx @@ -50,19 +50,20 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -fbgemm-gpu==0.4.1 +fbgemm-gpu==0.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchrec -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch # torchx -fsspec==2023.5.0 +fsspec==2023.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt + # torch # torchx future==0.18.3 # via -r benchmarks/dlrm/requirements.in @@ -71,7 +72,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -google-auth==2.19.0 +google-auth==2.23.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # google-auth-oauthlib @@ -84,7 +85,7 @@ graphviz==0.20.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchviz -grpcio==1.54.2 +grpcio==1.59.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard @@ -92,36 +93,36 @@ idna==3.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -importlib-metadata==6.6.0 +importlib-metadata==6.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # markdown # torchx -iopath==0.1.10 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # torchrec jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -joblib==1.2.0 +joblib==1.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # scikit-learn -lit==16.0.5 +lightning-utilities==0.9.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torchmetrics +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown==3.4.3 +markdown==3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -142,11 +143,11 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/dlrm/requirements.in + # fbgemm-gpu # onnx - # pandas # scikit-learn # scipy # tensorboard @@ -159,26 +160,19 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -onnx==1.14.0 +onnx==1.14.1 # via -r benchmarks/dlrm/requirements.in ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # docker + # lightning-utilities # torchmetrics -pandas==2.0.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # torchrec -portalocker==2.7.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # iopath -protobuf==4.23.2 +protobuf==4.24.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # onnx @@ -198,7 +192,7 @@ pyasn1-modules==0.3.0 # google-auth pydot==1.4.2 # via -r benchmarks/dlrm/requirements.in -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -206,7 +200,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pyparsing==3.0.9 +pyparsing==3.1.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pydot @@ -214,19 +208,11 @@ pyre-extensions==0.0.30 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx -python-dateutil==2.8.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pandas -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pytz==2023.3 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pandas -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf @@ -245,7 +231,7 @@ requests-oauthlib==1.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # google-auth-oauthlib -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -253,9 +239,9 @@ rsa==4.9 # via # -c .pin/../.pin/constraints-rocm-torch.txt # google-auth -scikit-learn==1.2.2 +scikit-learn==1.3.1 # via -r benchmarks/dlrm/requirements.in -scipy==1.10.1 +scipy==1.11.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # scikit-learn @@ -263,8 +249,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens - # google-auth - # python-dateutil + # tensorboard sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -272,43 +257,41 @@ sympy==1.12 tabulate==0.9.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # torchrec # torchx -tensorboard==2.13.0 +tensorboard==2.14.1 # via -r benchmarks/dlrm/requirements.in -tensorboard-data-server==0.7.0 +tensorboard-data-server==0.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard -threadpoolctl==3.1.0 +threadpoolctl==3.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # scikit-learn -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/dlrm/requirements.in # pytorch-triton-rocm # torchmetrics # torchviz -torchmetrics==0.11.4 +torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchrec -torchrec==0.4.0 +torchrec==0.5.0 # via -r benchmarks/dlrm/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 # via -r benchmarks/dlrm/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via # -r benchmarks/dlrm/requirements.in - # iopath # torchrec -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # iopath + # lightning-utilities # onnx # pyre-extensions # reactivex @@ -318,15 +301,10 @@ typing-inspect==0.9.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pyre-extensions -tzdata==2023.3 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pandas -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # docker - # google-auth # requests # torchx varname==0.10.0 @@ -335,19 +313,15 @@ varname==0.10.0 # giving voir==0.2.10 # via -r benchmarks/dlrm/requirements.in -websocket-client==1.5.2 +websocket-client==1.6.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # docker -werkzeug==2.3.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # tensorboard -wheel==0.40.0 +werkzeug==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard -zipp==3.15.0 +zipp==3.17.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # importlib-metadata diff --git a/benchmarks/huggingface/requirements.rocm.txt b/benchmarks/huggingface/requirements.rocm.txt index b8d76be1f..e0068b2ad 100644 --- a/benchmarks/huggingface/requirements.rocm.txt +++ b/benchmarks/huggingface/requirements.rocm.txt @@ -2,27 +2,27 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/huggingface/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-hf.txt benchmarks/huggingface/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-hf.txt benchmarks/huggingface/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -34,25 +34,27 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pytorch-triton-rocm # torch # transformers -fsspec==2023.5.0 +fsspec==2023.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt + # tokenizers # transformers idna==3.4 # via @@ -62,15 +64,15 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==16.0.5 +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -86,7 +88,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers @@ -98,7 +100,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub @@ -107,7 +109,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -115,11 +117,11 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub @@ -129,7 +131,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -regex==2023.5.5 +regex==2023.10.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers @@ -138,10 +140,14 @@ requests==2.31.0 # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # transformers -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir +safetensors==0.4.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # transformers six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -150,28 +156,28 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -tokenizers==0.13.3 +tokenizers==0.14.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/huggingface/requirements.in # pytorch-triton-rocm -tqdm==4.65.0 +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # transformers -transformers==4.29.2 +transformers==4.34.0 # via -r benchmarks/huggingface/requirements.in -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests diff --git a/benchmarks/rwkv/requirements.rocm.txt b/benchmarks/rwkv/requirements.rocm.txt index 79710b03d..ba94a506d 100644 --- a/benchmarks/rwkv/requirements.rocm.txt +++ b/benchmarks/rwkv/requirements.rocm.txt @@ -2,11 +2,11 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/rwkv/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-rwkv.txt benchmarks/rwkv/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-rwkv.txt benchmarks/rwkv/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ -aiohttp==3.8.4 +aiohttp==3.8.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # fsspec @@ -14,15 +14,19 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp +annotated-types==0.6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -async-timeout==4.0.2 +async-timeout==4.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp @@ -30,16 +34,16 @@ attrs==23.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -53,20 +57,21 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -frozenlist==1.3.3 +frozenlist==1.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.5.0 +fsspec[http]==2023.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -85,19 +90,20 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lightning-utilities==0.8.0 +lightning-utilities==0.9.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning -lit==16.0.5 + # torchmetrics +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -118,11 +124,11 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -ninja==1.11.1 +ninja==1.11.1.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed @@ -136,14 +142,14 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed # lightning-utilities # pytorch-lightning # torchmetrics -psutil==5.9.5 +psutil==5.9.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed @@ -155,11 +161,15 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pydantic==1.10.8 +pydantic==2.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pygments==2.15.1 +pydantic-core==2.10.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pydantic +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -169,11 +179,11 @@ pynvml==11.5.0 # voir pytorch-lightning==1.9.5 # via -r benchmarks/rwkv/requirements.in -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf @@ -186,7 +196,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # fsspec -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -198,31 +208,32 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning # pytorch-triton-rocm # torchmetrics -torchmetrics==0.11.4 +torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning -tqdm==4.65.0 +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed # pytorch-lightning -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # lightning-utilities # pydantic + # pydantic-core # pytorch-lightning # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -230,7 +241,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.9 +voir==0.2.10 # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/stargan/requirements.rocm.txt b/benchmarks/stargan/requirements.rocm.txt index 594c94949..a5ae5e902 100644 --- a/benchmarks/stargan/requirements.rocm.txt +++ b/benchmarks/stargan/requirements.rocm.txt @@ -2,27 +2,27 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/stargan/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-stargan.txt benchmarks/stargan/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-stargan.txt benchmarks/stargan/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -34,11 +34,15 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -52,15 +56,15 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==16.0.5 +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -76,7 +80,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/stargan/requirements.in # torchvision @@ -88,7 +92,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -96,7 +100,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -104,11 +108,11 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf @@ -120,7 +124,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -132,19 +136,19 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/stargan/requirements.in # pytorch-triton-rocm # torchvision -torchvision==0.15.2+rocm5.4.2 +torchvision==0.16.0+rocm5.6 # via -r benchmarks/stargan/requirements.in -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests diff --git a/benchmarks/super-slomo/requirements.rocm.txt b/benchmarks/super-slomo/requirements.rocm.txt index a3557abc5..e185e1ceb 100644 --- a/benchmarks/super-slomo/requirements.rocm.txt +++ b/benchmarks/super-slomo/requirements.rocm.txt @@ -2,27 +2,27 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/super-slomo/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-super-slomo.txt benchmarks/super-slomo/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-super-slomo.txt benchmarks/super-slomo/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -34,11 +34,15 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -52,15 +56,15 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==16.0.5 +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -76,7 +80,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -r benchmarks/super-slomo/requirements.in # opencv-python @@ -85,13 +89,13 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -opencv-python==4.7.0.72 +opencv-python==4.8.1.78 # via -r benchmarks/super-slomo/requirements.in ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -99,7 +103,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -107,11 +111,11 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf @@ -123,7 +127,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -135,21 +139,21 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/super-slomo/requirements.in # pytorch-triton-rocm # torchvision -torchvision==0.15.2+rocm5.4.2 +torchvision==0.16.0+rocm5.6 # via -r benchmarks/super-slomo/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via -r benchmarks/super-slomo/requirements.in -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests diff --git a/benchmarks/timm/requirements.rocm.txt b/benchmarks/timm/requirements.rocm.txt index 12864565b..8474489a7 100644 --- a/benchmarks/timm/requirements.rocm.txt +++ b/benchmarks/timm/requirements.rocm.txt @@ -2,27 +2,27 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/timm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-timm.txt benchmarks/timm/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-timm.txt benchmarks/timm/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -34,22 +34,23 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pytorch-triton-rocm # torch -fsspec==2023.5.0 +fsspec==2023.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -huggingface-hub==0.14.1 +huggingface-hub==0.17.3 # via -r benchmarks/timm/requirements.in idna==3.4 # via @@ -59,15 +60,15 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==16.0.5 +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -83,7 +84,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -95,11 +96,11 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.1 +packaging==23.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -107,7 +108,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -115,11 +116,11 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pyyaml==6.0 +pyyaml==6.0.1 # via # -r benchmarks/timm/requirements.in # huggingface-hub @@ -133,11 +134,11 @@ requests==2.31.0 # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -safetensors==0.3.1 +safetensors==0.4.0 # via -r benchmarks/timm/requirements.in six==1.16.0 # via @@ -147,24 +148,24 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/timm/requirements.in # pytorch-triton-rocm # torchvision -torchvision==0.15.2+rocm5.4.2 +torchvision==0.16.0+rocm5.6 # via -r benchmarks/timm/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests diff --git a/benchmarks/torchvision/requirements.rocm.txt b/benchmarks/torchvision/requirements.rocm.txt index 09ebf1695..aab1c06f0 100644 --- a/benchmarks/torchvision/requirements.rocm.txt +++ b/benchmarks/torchvision/requirements.rocm.txt @@ -2,27 +2,27 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --output-file=benchmarks/torchvision/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-torchvision.txt benchmarks/torchvision/requirements.in +# pip-compile --config=pyproject.toml --output-file=benchmarks/torchvision/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-torchvision.txt benchmarks/torchvision/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.4.2/ +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf -asttokens==2.2.1 +asttokens==2.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.5.7 +certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.1.0 +charset-normalizer==3.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.26.3 +cmake==3.27.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -34,11 +34,15 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.12.0 +filelock==3.12.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -52,15 +56,15 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==16.0.5 +lit==17.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm -markdown-it-py==2.2.0 +markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.2 +markupsafe==2.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -76,7 +80,7 @@ networkx==3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.24.3 +numpy==1.26.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -88,7 +92,7 @@ ovld==0.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pillow==9.5.0 +pillow==10.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -96,7 +100,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.15.1 +pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -104,11 +108,11 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.0.2 +pytorch-triton-rocm==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pyyaml==6.0 +pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # omegaconf @@ -120,7 +124,7 @@ requests==2.31.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision -rich==13.3.5 +rich==13.6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -132,21 +136,21 @@ sympy==1.12 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.0.1+rocm5.4.2 +torch==2.1.0+rocm5.6 # via # -r benchmarks/torchvision/requirements.in # pytorch-triton-rocm # torchvision -torchvision==0.15.2+rocm5.4.2 +torchvision==0.16.0+rocm5.6 # via -r benchmarks/torchvision/requirements.in -tqdm==4.65.0 +tqdm==4.66.1 # via -r benchmarks/torchvision/requirements.in -typing-extensions==4.6.2 +typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # reactivex # torch -urllib3==1.26.16 +urllib3==1.26.17 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests From f3533d050ad2e3b0ce28cd1032a5f4b413498110 Mon Sep 17 00:00:00 2001 From: Pierre Delaunay Date: Tue, 17 Oct 2023 13:09:50 -0400 Subject: [PATCH 03/36] Add pin command in the CI --- .github/workflows/tests.yml | 4 +- .pin/constraints-cpu-torch.txt | 385 ++++++++++++++++++ .../accelerate_opt/requirements.cpu.txt | 335 +++++++++++++++ benchmarks/dlrm/requirements.cpu.txt | 320 +++++++++++++++ benchmarks/huggingface/requirements.cpu.txt | 179 ++++++++ benchmarks/rwkv/requirements.cpu.txt | 240 +++++++++++ benchmarks/stargan/requirements.cpu.txt | 151 +++++++ benchmarks/super-slomo/requirements.cpu.txt | 156 +++++++ benchmarks/timm/requirements.cpu.txt | 168 ++++++++ benchmarks/torchvision/requirements.cpu.txt | 153 +++++++ 10 files changed, 2090 insertions(+), 1 deletion(-) create mode 100644 .pin/constraints-cpu-torch.txt create mode 100644 benchmarks/accelerate_opt/requirements.cpu.txt create mode 100644 benchmarks/dlrm/requirements.cpu.txt create mode 100644 benchmarks/huggingface/requirements.cpu.txt create mode 100644 benchmarks/rwkv/requirements.cpu.txt create mode 100644 benchmarks/stargan/requirements.cpu.txt create mode 100644 benchmarks/super-slomo/requirements.cpu.txt create mode 100644 benchmarks/timm/requirements.cpu.txt create mode 100644 benchmarks/torchvision/requirements.cpu.txt diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a81dd15d5..5eb69d068 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -78,7 +78,9 @@ jobs: - name: pin run: | - poetry run milabench pin --config config/standard.yaml + MILABENCH_GPU_ARCH=cuda poetry run milabench pin -c constraints/cuda.txt --config config/standard.yaml + MILABENCH_GPU_ARCH=rocm poetry run milabench pin -c constraints/rocm.txt --config config/standard.yaml + git diff --stat - name: tests run: | diff --git a/.pin/constraints-cpu-torch.txt b/.pin/constraints-cpu-torch.txt new file mode 100644 index 000000000..9ad402d9e --- /dev/null +++ b/.pin/constraints-cpu-torch.txt @@ -0,0 +1,385 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=.pin/constraints-cpu-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +absl-py==2.0.0 + # via tensorboard +accelerate==0.23.0 + # via -r benchmarks/accelerate_opt/requirements.in +aiohttp==3.8.6 + # via + # datasets + # fsspec +aiosignal==1.3.1 + # via aiohttp +annotated-types==0.6.0 + # via pydantic +antlr4-python3-runtime==4.9.3 + # via omegaconf +asttokens==2.4.0 + # via giving +async-timeout==4.0.3 + # via aiohttp +attrs==23.1.0 + # via aiohttp +cachetools==5.3.1 + # via google-auth +certifi==2023.7.22 + # via requests +charset-normalizer==3.3.0 + # via + # aiohttp + # requests +codefind==0.1.3 + # via ptera +datasets==2.14.5 + # via + # -r benchmarks/accelerate_opt/requirements.in + # evaluate +deepspeed==0.8.3 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/rwkv/requirements.in +dill==0.3.7 + # via + # datasets + # evaluate + # multiprocess +docker==6.1.3 + # via torchx +docstring-parser==0.8.1 + # via torchx +evaluate==0.4.1 + # via -r benchmarks/accelerate_opt/requirements.in +executing==1.2.0 + # via varname +fbgemm-gpu==0.5.0+cu118 + # via torchrec +filelock==3.12.4 + # via + # huggingface-hub + # torch + # torchx + # transformers + # triton +frozenlist==1.4.0 + # via + # aiohttp + # aiosignal +fsspec[http]==2023.6.0 + # via + # datasets + # evaluate + # huggingface-hub + # pytorch-lightning + # torch + # torchx +future==0.18.3 + # via -r benchmarks/dlrm/requirements.in +giving==0.4.2 + # via + # ptera + # voir +google-auth==2.23.3 + # via + # google-auth-oauthlib + # tensorboard +google-auth-oauthlib==1.0.0 + # via tensorboard +graphviz==0.20.1 + # via torchviz +grpcio==1.59.0 + # via tensorboard +hjson==3.1.0 + # via deepspeed +huggingface-hub==0.17.3 + # via + # -r benchmarks/timm/requirements.in + # accelerate + # datasets + # evaluate + # tokenizers + # transformers +idna==3.4 + # via + # requests + # yarl +importlib-metadata==6.8.0 + # via + # markdown + # torchx +jinja2==3.1.2 + # via torch +joblib==1.3.2 + # via scikit-learn +lightning-utilities==0.9.0 + # via + # pytorch-lightning + # torchmetrics +markdown==3.5 + # via tensorboard +markdown-it-py==3.0.0 + # via rich +markupsafe==2.1.3 + # via + # jinja2 + # werkzeug +mdurl==0.1.2 + # via markdown-it-py +mpmath==1.3.0 + # via sympy +multidict==6.0.4 + # via + # aiohttp + # yarl +multiprocess==0.70.15 + # via + # datasets + # evaluate +mypy-extensions==1.0.0 + # via typing-inspect +networkx==3.1 + # via torch +ninja==1.11.1.1 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed +numpy==1.26.1 + # via + # -r benchmarks/rwkv/requirements.in + # -r benchmarks/stargan/requirements.in + # accelerate + # datasets + # deepspeed + # evaluate + # onnx + # opencv-python + # pandas + # pyarrow + # pytorch-lightning + # scikit-learn + # scipy + # tensorboard + # torchmetrics + # torchvision + # transformers +oauthlib==3.2.2 + # via requests-oauthlib +omegaconf==2.3.0 + # via voir +onnx==1.14.1 + # via -r benchmarks/dlrm/requirements.in +opencv-python==4.8.1.78 + # via -r benchmarks/super-slomo/requirements.in +ovld==0.3.2 + # via voir +packaging==23.2 + # via + # accelerate + # datasets + # deepspeed + # docker + # evaluate + # huggingface-hub + # lightning-utilities + # pytorch-lightning + # torchmetrics + # transformers +pandas==2.1.1 + # via + # datasets + # evaluate +pillow==10.1.0 + # via torchvision +protobuf==4.24.4 + # via + # onnx + # tensorboard +psutil==5.9.6 + # via + # accelerate + # deepspeed +ptera==1.4.1 + # via voir +py-cpuinfo==9.0.0 + # via deepspeed +pyarrow==13.0.0 + # via datasets +pyasn1==0.5.0 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.3.0 + # via google-auth +pydantic==2.4.2 + # via deepspeed +pydantic-core==2.10.1 + # via pydantic +pydot==1.4.2 + # via -r benchmarks/dlrm/requirements.in +pygments==2.16.1 + # via rich +pynvml==11.5.0 + # via voir +pyparsing==3.1.1 + # via pydot +pyre-extensions==0.0.30 + # via torchx +python-dateutil==2.8.2 + # via pandas +pytorch-lightning==1.9.5 + # via -r benchmarks/rwkv/requirements.in +pytz==2023.3.post1 + # via pandas +pyyaml==6.0.1 + # via + # -r benchmarks/timm/requirements.in + # accelerate + # datasets + # huggingface-hub + # omegaconf + # pytorch-lightning + # torchx + # transformers +reactivex==4.0.4 + # via giving +regex==2023.10.3 + # via transformers +requests==2.31.0 + # via + # datasets + # docker + # evaluate + # fsspec + # huggingface-hub + # requests-oauthlib + # responses + # tensorboard + # torchvision + # transformers +requests-oauthlib==1.3.1 + # via google-auth-oauthlib +responses==0.18.0 + # via evaluate +rich==13.6.0 + # via + # -r benchmarks/accelerate_opt/requirements.in + # voir +rsa==4.9 + # via google-auth +safetensors==0.4.0 + # via + # -r benchmarks/timm/requirements.in + # transformers +scikit-learn==1.3.1 + # via -r benchmarks/dlrm/requirements.in +scipy==1.11.3 + # via scikit-learn +six==1.16.0 + # via + # asttokens + # python-dateutil + # tensorboard +sympy==1.12 + # via torch +tabulate==0.9.0 + # via torchx +tensorboard==2.14.1 + # via -r benchmarks/dlrm/requirements.in +tensorboard-data-server==0.7.1 + # via tensorboard +threadpoolctl==3.2.0 + # via scikit-learn +tokenizers==0.14.1 + # via transformers +torch==2.1.0+cu118 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/stargan/requirements.in + # accelerate + # deepspeed + # pytorch-lightning + # torchaudio + # torchmetrics + # torchvision + # torchviz +torchaudio==2.1.0+cu118 + # via -r benchmarks/accelerate_opt/requirements.in +torchmetrics==1.0.3 + # via + # pytorch-lightning + # torchrec +torchrec==0.5.0+cu118 + # via -r benchmarks/dlrm/requirements.in +torchvision==0.16.0+cu118 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/stargan/requirements.in +torchviz==0.0.2 + # via -r benchmarks/dlrm/requirements.in +torchx==0.5.0 + # via -r benchmarks/dlrm/requirements.in +tqdm==4.66.1 + # via + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/super-slomo/requirements.in + # datasets + # deepspeed + # evaluate + # huggingface-hub + # pytorch-lightning + # torchrec + # transformers +transformers==4.34.0 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/huggingface/requirements.in +triton==2.1.0 + # via torch +typing-extensions==4.8.0 + # via + # huggingface-hub + # lightning-utilities + # onnx + # pydantic + # pydantic-core + # pyre-extensions + # pytorch-lightning + # reactivex + # torch + # typing-inspect +typing-inspect==0.9.0 + # via pyre-extensions +tzdata==2023.3 + # via pandas +urllib3==1.26.17 + # via + # docker + # requests + # responses + # torchx +varname==0.10.0 + # via giving +voir==0.2.10 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/stargan/requirements.in +websocket-client==1.6.4 + # via docker +werkzeug==3.0.0 + # via tensorboard +xxhash==3.4.1 + # via + # datasets + # evaluate +yarl==1.9.2 + # via aiohttp +zipp==3.17.0 + # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/benchmarks/accelerate_opt/requirements.cpu.txt b/benchmarks/accelerate_opt/requirements.cpu.txt new file mode 100644 index 000000000..8a243548d --- /dev/null +++ b/benchmarks/accelerate_opt/requirements.cpu.txt @@ -0,0 +1,335 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-opt.txt benchmarks/accelerate_opt/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +accelerate==0.23.0 + # via -r benchmarks/accelerate_opt/requirements.in +aiohttp==3.8.6 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # fsspec +aiosignal==1.3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp +annotated-types==0.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pydantic +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +async-timeout==4.0.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp +attrs==23.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +datasets==2.14.5 + # via + # -r benchmarks/accelerate_opt/requirements.in + # evaluate +deepspeed==0.8.3 + # via -r benchmarks/accelerate_opt/requirements.in +dill==0.3.7 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # evaluate + # multiprocess +evaluate==0.4.1 + # via -r benchmarks/accelerate_opt/requirements.in +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # torch + # transformers + # triton +frozenlist==1.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp + # aiosignal +fsspec[http]==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # evaluate + # huggingface-hub + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +hjson==3.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +huggingface-hub==0.17.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # accelerate + # datasets + # evaluate + # tokenizers + # transformers +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests + # yarl +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +multidict==6.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp + # yarl +multiprocess==0.70.15 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # evaluate +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +ninja==1.11.1.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +numpy==1.26.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # accelerate + # datasets + # deepspeed + # evaluate + # pandas + # pyarrow + # torchvision + # transformers +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +packaging==23.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # accelerate + # datasets + # deepspeed + # evaluate + # huggingface-hub + # transformers +pandas==2.1.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # evaluate +pillow==10.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +psutil==5.9.6 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # accelerate + # deepspeed +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +py-cpuinfo==9.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +pyarrow==13.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets +pydantic==2.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +pydantic-core==2.10.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pydantic +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +python-dateutil==2.8.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pandas +pytz==2023.3.post1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pandas +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # accelerate + # datasets + # huggingface-hub + # omegaconf + # transformers +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +regex==2023.10.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # transformers +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # evaluate + # fsspec + # huggingface-hub + # responses + # torchvision + # transformers +responses==0.18.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # evaluate +rich==13.6.0 + # via + # -r benchmarks/accelerate_opt/requirements.in + # voir +safetensors==0.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # transformers +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens + # python-dateutil +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +tokenizers==0.14.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # transformers +torch==2.1.0+cu118 + # via + # -r benchmarks/accelerate_opt/requirements.in + # accelerate + # deepspeed + # torchaudio + # torchvision +torchaudio==2.1.0+cu118 + # via -r benchmarks/accelerate_opt/requirements.in +torchvision==0.16.0+cu118 + # via -r benchmarks/accelerate_opt/requirements.in +tqdm==4.66.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # deepspeed + # evaluate + # huggingface-hub + # transformers +transformers==4.34.0 + # via -r benchmarks/accelerate_opt/requirements.in +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # pydantic + # pydantic-core + # reactivex + # torch +tzdata==2023.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pandas +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests + # responses +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/accelerate_opt/requirements.in +xxhash==3.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # datasets + # evaluate +yarl==1.9.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp diff --git a/benchmarks/dlrm/requirements.cpu.txt b/benchmarks/dlrm/requirements.cpu.txt new file mode 100644 index 000000000..0dba4a2c0 --- /dev/null +++ b/benchmarks/dlrm/requirements.cpu.txt @@ -0,0 +1,320 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-dlrm.txt benchmarks/dlrm/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +absl-py==2.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # tensorboard +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +cachetools==5.3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # google-auth +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +docker==6.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchx +docstring-parser==0.8.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchx +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +fbgemm-gpu==0.5.0+cu118 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchrec +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch + # torchx + # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch + # torchx +future==0.18.3 + # via -r benchmarks/dlrm/requirements.in +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +google-auth==2.23.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # google-auth-oauthlib + # tensorboard +google-auth-oauthlib==1.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # tensorboard +graphviz==0.20.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchviz +grpcio==1.59.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # tensorboard +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +importlib-metadata==6.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown + # torchx +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +joblib==1.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # scikit-learn +lightning-utilities==0.9.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchmetrics +markdown==3.5 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # tensorboard +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 + # werkzeug +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +mypy-extensions==1.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # typing-inspect +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +numpy==1.26.1 + # via + # -r benchmarks/dlrm/requirements.in + # onnx + # scikit-learn + # scipy + # tensorboard + # torchmetrics +oauthlib==3.2.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests-oauthlib +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +onnx==1.14.1 + # via -r benchmarks/dlrm/requirements.in +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +packaging==23.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # docker + # lightning-utilities + # torchmetrics +protobuf==4.24.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # onnx + # tensorboard +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pyasn1==0.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pyasn1-modules + # rsa +pyasn1-modules==0.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # google-auth +pydot==1.4.2 + # via -r benchmarks/dlrm/requirements.in +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pyparsing==3.1.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pydot +pyre-extensions==0.0.30 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchx +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf + # torchx +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # docker + # requests-oauthlib + # tensorboard +requests-oauthlib==1.3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # google-auth-oauthlib +rich==13.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +rsa==4.9 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # google-auth +scikit-learn==1.3.1 + # via -r benchmarks/dlrm/requirements.in +scipy==1.11.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # scikit-learn +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens + # tensorboard +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +tabulate==0.9.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchx +tensorboard==2.14.1 + # via -r benchmarks/dlrm/requirements.in +tensorboard-data-server==0.7.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # tensorboard +threadpoolctl==3.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # scikit-learn +torch==2.1.0+cu118 + # via + # -r benchmarks/dlrm/requirements.in + # torchmetrics + # torchviz +torchmetrics==1.0.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchrec +torchrec==0.5.0+cu118 + # via -r benchmarks/dlrm/requirements.in +torchviz==0.0.2 + # via -r benchmarks/dlrm/requirements.in +torchx==0.5.0 + # via -r benchmarks/dlrm/requirements.in +tqdm==4.66.1 + # via + # -r benchmarks/dlrm/requirements.in + # torchrec +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # lightning-utilities + # onnx + # pyre-extensions + # reactivex + # torch + # typing-inspect +typing-inspect==0.9.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pyre-extensions +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # docker + # requests + # torchx +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/dlrm/requirements.in +websocket-client==1.6.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # docker +werkzeug==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # tensorboard +zipp==3.17.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/benchmarks/huggingface/requirements.cpu.txt b/benchmarks/huggingface/requirements.cpu.txt new file mode 100644 index 000000000..5d0a261ec --- /dev/null +++ b/benchmarks/huggingface/requirements.cpu.txt @@ -0,0 +1,179 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-hf.txt benchmarks/huggingface/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # torch + # transformers + # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +huggingface-hub==0.17.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # tokenizers + # transformers +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +numpy==1.26.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # transformers +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +packaging==23.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # transformers +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # omegaconf + # transformers +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +regex==2023.10.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # transformers +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # transformers +rich==13.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +safetensors==0.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # transformers +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +tokenizers==0.14.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # transformers +torch==2.1.0+cu118 + # via -r benchmarks/huggingface/requirements.in +tqdm==4.66.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # transformers +transformers==4.34.0 + # via -r benchmarks/huggingface/requirements.in +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # reactivex + # torch +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/rwkv/requirements.cpu.txt b/benchmarks/rwkv/requirements.cpu.txt new file mode 100644 index 000000000..2b5a8181e --- /dev/null +++ b/benchmarks/rwkv/requirements.cpu.txt @@ -0,0 +1,240 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-rwkv.txt benchmarks/rwkv/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +aiohttp==3.8.6 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # fsspec +aiosignal==1.3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp +annotated-types==0.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pydantic +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +async-timeout==4.0.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp +attrs==23.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +deepspeed==0.8.3 + # via -r benchmarks/rwkv/requirements.in +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch + # triton +frozenlist==1.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp + # aiosignal +fsspec[http]==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pytorch-lightning + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +hjson==3.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests + # yarl +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +lightning-utilities==0.9.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pytorch-lightning + # torchmetrics +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +multidict==6.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp + # yarl +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +ninja==1.11.1.1 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed +numpy==1.26.1 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed + # pytorch-lightning + # torchmetrics +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +packaging==23.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed + # lightning-utilities + # pytorch-lightning + # torchmetrics +psutil==5.9.6 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +py-cpuinfo==9.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +pydantic==2.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed +pydantic-core==2.10.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pydantic +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pytorch-lightning==1.9.5 + # via -r benchmarks/rwkv/requirements.in +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf + # pytorch-lightning +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # fsspec +rich==13.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +torch==2.1.0+cu118 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed + # pytorch-lightning + # torchmetrics +torchmetrics==1.0.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # pytorch-lightning +tqdm==4.66.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # deepspeed + # pytorch-lightning +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # lightning-utilities + # pydantic + # pydantic-core + # pytorch-lightning + # reactivex + # torch +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/rwkv/requirements.in +yarl==1.9.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # aiohttp diff --git a/benchmarks/stargan/requirements.cpu.txt b/benchmarks/stargan/requirements.cpu.txt new file mode 100644 index 000000000..e2bce7b04 --- /dev/null +++ b/benchmarks/stargan/requirements.cpu.txt @@ -0,0 +1,151 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-stargan.txt benchmarks/stargan/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch + # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +numpy==1.26.1 + # via + # -r benchmarks/stargan/requirements.in + # torchvision +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pillow==10.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +rich==13.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +torch==2.1.0+cu118 + # via + # -r benchmarks/stargan/requirements.in + # torchvision +torchvision==0.16.0+cu118 + # via -r benchmarks/stargan/requirements.in +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # reactivex + # torch +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/super-slomo/requirements.cpu.txt b/benchmarks/super-slomo/requirements.cpu.txt new file mode 100644 index 000000000..c8d469b4b --- /dev/null +++ b/benchmarks/super-slomo/requirements.cpu.txt @@ -0,0 +1,156 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-super-slomo.txt benchmarks/super-slomo/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch + # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +numpy==1.26.1 + # via + # -r benchmarks/super-slomo/requirements.in + # opencv-python + # torchvision +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +opencv-python==4.8.1.78 + # via -r benchmarks/super-slomo/requirements.in +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pillow==10.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +rich==13.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +torch==2.1.0+cu118 + # via + # -r benchmarks/super-slomo/requirements.in + # torchvision +torchvision==0.16.0+cu118 + # via -r benchmarks/super-slomo/requirements.in +tqdm==4.66.1 + # via -r benchmarks/super-slomo/requirements.in +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # reactivex + # torch +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/timm/requirements.cpu.txt b/benchmarks/timm/requirements.cpu.txt new file mode 100644 index 000000000..2c905249a --- /dev/null +++ b/benchmarks/timm/requirements.cpu.txt @@ -0,0 +1,168 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-timm.txt benchmarks/timm/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # torch + # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +huggingface-hub==0.17.3 + # via -r benchmarks/timm/requirements.in +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +numpy==1.26.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +packaging==23.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub +pillow==10.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pyyaml==6.0.1 + # via + # -r benchmarks/timm/requirements.in + # huggingface-hub + # omegaconf +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # torchvision +rich==13.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +safetensors==0.4.0 + # via -r benchmarks/timm/requirements.in +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +torch==2.1.0+cu118 + # via + # -r benchmarks/timm/requirements.in + # torchvision +torchvision==0.16.0+cu118 + # via -r benchmarks/timm/requirements.in +tqdm==4.66.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # huggingface-hub + # reactivex + # torch +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/torchvision/requirements.cpu.txt b/benchmarks/torchvision/requirements.cpu.txt new file mode 100644 index 000000000..88b82e687 --- /dev/null +++ b/benchmarks/torchvision/requirements.cpu.txt @@ -0,0 +1,153 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --config=pyproject.toml --output-file=benchmarks/torchvision/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-torchvision.txt benchmarks/torchvision/requirements.in +# +--extra-index-url https://download.pytorch.org/whl/cu118 + +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +asttokens==2.4.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +certifi==2023.7.22 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +charset-normalizer==3.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +codefind==0.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # varname +filelock==3.12.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch + # triton +fsspec==2023.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # ptera + # voir +idna==3.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +jinja2==3.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +markupsafe==2.1.3 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # sympy +networkx==3.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +numpy==1.26.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +ovld==0.3.2 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pillow==10.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pygments==2.16.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # omegaconf +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +requests==2.31.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torchvision +rich==13.6.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # voir +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # asttokens +sympy==1.12 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +torch==2.1.0+cu118 + # via + # -r benchmarks/torchvision/requirements.in + # torchvision +torchvision==0.16.0+cu118 + # via -r benchmarks/torchvision/requirements.in +tqdm==4.66.1 + # via -r benchmarks/torchvision/requirements.in +triton==2.1.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # torch +typing-extensions==4.8.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # reactivex + # torch +urllib3==1.26.17 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # requests +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cpu-torch.txt + # giving +voir==0.2.10 + # via -r benchmarks/torchvision/requirements.in From df4bf03d8a76752fe0e70ef0a32b6f36bb101c27 Mon Sep 17 00:00:00 2001 From: Pierre Delaunay Date: Wed, 18 Oct 2023 10:53:44 -0400 Subject: [PATCH 04/36] Add script to launch milabench on slurm --- milabench/cli.py | 232 ++++--------------------------- milabench/schedule.py | 151 ++++++++++++++++++++ milabench/scripts/milabench.bash | 90 ++++++++++++ 3 files changed, 267 insertions(+), 206 deletions(-) create mode 100644 milabench/schedule.py create mode 100644 milabench/scripts/milabench.bash diff --git a/milabench/cli.py b/milabench/cli.py index d163114ff..673ad9257 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -33,6 +33,7 @@ from .report import make_report from .slurm import expand_node_list from .summary import aggregate, make_summary +from .schedule import launch_milabench def main(argv=None): @@ -263,14 +264,17 @@ def _read_reports(*runs): pth = XPath(parent) / file with pth.open() as f: lines = f.readlines() - try: - data = [json.loads(line) for line in lines] - except Exception: - import traceback - - print(f"Could not parse line inside {pth}\n\t- {line}") - traceback.print_exc() - else: + data = [] + + for line in lines: + try: + data.append(json.loads(line)) + except Exception: + import traceback + print(f"Could not parse line inside {pth}\n\t- {line}") + traceback.print_exc() + + if len(data) == 0: all_data[str(pth)] = data return all_data @@ -734,201 +738,17 @@ def publish(): backend = SQLAlchemy(uri, meta_override=meta) publish_archived_run(backend, folder) - def container(): - """Build a container image (might not work properly at the moment).""" - - # Configuration file - # [positional] - config_file: Option & str = None - - config = _get_multipack(config, return_config=True) - config_file = XPath(config["defaults"]["config_file"]) - config_base = XPath(config["defaults"]["config_base"]) - benchmarks = config["benchmarks"] - - # The container type to create - type: Option & str = None - - # Include the dataset in the image - include_data: Option & bool = False - - # Optional path to copy build dir to, instead of building the image. - # This directory must not exist and will be created. - output_dir: Option & str = None - - # File in which to generate the SIF image (Singularity). - # Defaults to milabench.sif. - # [alias: -o] - output_file: Option & str = None - - # Optional python version to use for the image, ignored for - # conda-based benchmarks. Can be specified as any of - # ('3', '3.9', '3.9.2') - python_version: Option & str = "3.9" - - # Milabench source to clone from - milabench: Option & str = "v2" - - # The tag for the generated container - tag: Option & str = None - - if type not in ["docker", "singularity"]: - sys.exit(f"Unsupported type {type}") - - with tempfile.TemporaryDirectory() as base: - root = XPath(base) - - common_base = config_base - - # Figure out common base between the benchmark config and all - # the benchmarks. - for defn in benchmarks.values(): - pack = XPath(defn["definition"]).expanduser() - while not pack.is_relative_to(common_base): - common_base = common_base.parent - - def _transfer(pth): - dest = root / pth.relative_to(common_base) - shutil.copytree(pth, dest, dirs_exist_ok=True) - - for defn in benchmarks.values(): - _transfer(XPath(defn["definition"])) - - _transfer(config_base) - - # We check all configs since they may not have all the same setting - use_conda = any( - defn["venv"]["type"] == "conda" for defn in benchmarks.values() - ) - - if "//" not in milabench: - milabench = ( - f"git+https://github.com/mila-iqia/milabench.git@{milabench}" - ) - - if type == "docker": - if output_file is not None: - sys.exit("Error: --output-file only valid with Singularity") - tag = tag or "milabench" - with (root / "Dockerfile").open("w") as f: - f.write( - dockerfile_template( - milabench_req=milabench, - include_data=include_data, - use_conda=use_conda, - python_version=python_version, - config_file=config_file.relative_to(common_base), - ) - ) - if output_dir: - root.copy(output_dir) - else: - subprocess.check_call(["docker", "build", ".", "-t", tag], cwd=root) - - elif type == "singularity": - if tag is not None: - sys.exit("Error: --tag only valid with Docker") - output_file = output_file or "milabench.sif" - - with (root / "milabench.def").open("w") as f: - f.write( - singularitydef_template( - milabench_req=milabench, - include_data=include_data, - use_conda=use_conda, - python_version=python_version, - config_file=config_file.relative_to(common_base), - ) - ) - if output_dir: - root.copy(output_dir) - else: - user = os.environ["USER"] - filename = str(XPath(output_file).absolute()) - singularity = subprocess.check_output( - ["which", "singularity"] - ).strip() - subprocess.check_call( - ["sudo", singularity, "build", filename, "milabench.def"], - cwd=root, - ) - subprocess.check_call(["sudo", "chown", f"{user}:{user}", filename]) - - -def dockerfile_template( - milabench_req, include_data, use_conda, python_version, config_file -): - conda_clean = "conda clean -a" if use_conda else "echo" - return f""" -FROM { 'continuumio/miniconda3' if use_conda else f'python:{python_version}-slim' } - -RUN apt-get update && apt-get install --no-install-suggests --no-install-recommends -y \ - git \ - wget \ - patch \ - && apt-get clean - -RUN mkdir /bench && mkdir /base -ENV MILABENCH_BASE /base -# This is to signal to milabench to use that as fallback -ENV VIRTUAL_ENV /base/venv/_ -ENV MILABENCH_CONFIG /bench/{ config_file } -ENV HEADLESS 1 -WORKDIR /base - -RUN echo '{ milabench_req }' > /version.txt - -COPY / /bench - -RUN pip install -U pip && \ - pip install -r /version.txt && \ - milabench install && \ - { conda_clean } && \ - pip cache purge - -{ 'RUN milabench prepare' if include_data else '' } - -CMD ["milabench", "run"] -""" - - -def singularitydef_template( - milabench_req, include_data, use_conda, python_version, config_file -): - conda_clean = "conda clean -a" if use_conda else "echo" - return f"""\ -BootStrap: docker -From: { 'continuumio/miniconda3' if use_conda else f'python:{python_version}-slim' } - -%files - . /bench - -%environment - export MILABENCH_BASE=/base - export MILABENCH_CONFIG=/bench/{ config_file } - export HEADLESS=1 - -%post - export MILABENCH_BASE=/base - export MILABENCH_CONFIG=/bench/{ config_file } - export HEADLESS=1 - - apt-get update && apt-get install --no-install-suggests --no-install-recommends -y git wget patch - apt-get clean - - mkdir /base - cd /bench - - echo '{ milabench_req }' > /version.txt - pip install -U pip && \ - pip install -r /version.txt && \ - milabench install && \ - { conda_clean } && \ - pip cache purge -{ ' milabench prepare' if include_data else '' } - - chmod -R o+rwx /base /bench - -%runscript - milabench run -""" + def schedule(): + """Launch a slurm job to run milabench""" + + # tail -f on the slurm job + sync: Option & bool = False + + # Print the command and return without running it + dry: Option & bool = False + + launch_milabench( + None, + dry, + sync + ) \ No newline at end of file diff --git a/milabench/schedule.py b/milabench/schedule.py new file mode 100644 index 000000000..fcbf2d5d2 --- /dev/null +++ b/milabench/schedule.py @@ -0,0 +1,151 @@ + +from dataclasses import dataclass +import re +import importlib_resources +import subprocess + + +def popen(cmd, callback=None): + def println(line): + print(line, end="") + + if callback is None: + callback=println + + with subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + shell=False, + ) as process: + def readoutput(): + process.stdout.flush() + line = process.stdout.readline() + + if callback: + callback(line) + + try: + while process.poll() is None: + readoutput() + + readoutput() + return 0 + + except KeyboardInterrupt: + print("Stopping due to user interrupt") + process.kill() + return -1 + +def sbatch(args, sync=False, tags=None, **kwargs): + jobid_regex = re.compile(r"Submitted batch job (?P[0-9]*)") + jobid = None + + def readline(line): + nonlocal jobid + + if match := jobid_regex.match(line): + data = match.groupdict() + jobid = data["jobid"] + + print(line, end="") + + code = popen(['sbatch'] + args, readline) + + if jobid is not None and sync: + try: + subprocess.run(["touch", f"slurm-{jobid}.out"]) + subprocess.run(["tail", "-f", f"slurm-{jobid}.out"]) + except KeyboardInterrupt: + pass + + return code, jobid + + +def shell(cmd): + return subprocess.check_output( + cmd.split(" "), + stderr=subprocess.STDOUT, + text=True + ).strip() + + +class SlurmBatchOptions: + pass + + +@dataclass +class SetupOptions: + branch: str = "master" + origin: str = "https://github.com/mila-iqia/milabench.git" + config: str = "milabench/config/standard.yaml" + env: str = "./env" + python: str = "3.9" + + def deduce_remote(self, branch): + prefix = "refs/heads/" + + # Fetch all remotes + remotes = shell("get remote").splitlines() + possible_remotes = [] + + # Find remotes that have our branch + for remote in remotes: + branches = shell(f"git ls-remote --heads {remote}").splitlines() + + for ref, name in branches.split('\t'): + name = name[len(prefix):] + + if branch == name: + possible_remotes.append(remote) + + if len(possible_remotes) == 1: + return possible_remotes[0] + + raise RuntimeError(f"Multiple suitable remotes found {possible_remotes}") + + def deduce_from_repository(self, remote="origin"): + self.branch = shell("git rev-parse --abbrev-ref HEAD") + + if remote is None: + remote = self.deduce_remote(self.branch) + + self.origin = shell(f"git remote get-url {remote}") + + def arguments(self): + return [ + "-b", self.branch, + "-o", self.origin, + "-c", self.config, + "-e", self.env, + "-p", self.python, + ] + + +def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): + sbatch_script = importlib_resources.files(__name__) / "scripts" / "milabench.bash" + sbatch_script = str(sbatch_script) + + if sbatch_args is None: + sbatch_args = [ + "--gpus-per-task=1", + "--cpus-per-task=4", + "--time=01:00:00", + "--ntasks-per-node=1", + "--mem=32G" + ] + + script_args = SetupOptions() + script_args.deduce_from_repository() + script_args = script_args.arguments() + + cmd = ["sbatch"] + sbatch_args + [sbatch_script] + script_args + + if dry: + print(' '.join(cmd)) + code = 0 + else: + code, _ = sbatch(cmd, sync=sync, tags=None) + + return code \ No newline at end of file diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash new file mode 100644 index 000000000..fbb3dfa3d --- /dev/null +++ b/milabench/scripts/milabench.bash @@ -0,0 +1,90 @@ +#!/bin/bash + +function usage() { + echo "Usage: $0 [-m] [-p]" + echo " -h Display this help message." + echo " -b BRANCH Branch to checkout (default: master)" + echo " -o ORIGIN Origin to use (default: github/mila/milabench)" + echo " -c CONFIG Configuration (default: milabench/config/standard.yaml)" + echo " -e ENV Environment (default: ./env)" + echo " -p PYTHON Python version (default: 3.9)" + echo " ARGUMENT Any additional argument you want to process." + exit 1 +} + +PYTHON="3.9" +BRANCH="master" +ORIGIN="https://github.com/mila-iqia/milabench.git" +CONFIG="$SLURM_TMPDIR/milabench/config/standard.yaml" +BASE="$SLURM_TMPDIR/base" +ENV="./env" +REMAINING_ARGS="" + +while getopts ":hm:p:e:b:o:c:" opt; do + case $opt in + h) + usage + ;; + p) + PYTHON="$OPTARG" + ;; + b) + BRANCH="$OPTARG" + ;; + o) + ORIGIN="$OPTARG" + ;; + c) + CONFIG="$OPTARG" + ;; + e) + ENV="$OPTARG" + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + usage + ;; + esac +done + +shift "$((OPTIND-1))" +REMAINING_ARGS="$@" + +echo " PYTHON: $PYTHON" +echo " branch: $BRANCH" +echo " origin: $ORIGIN" +echo " config: $CONFIG" +echo " env: $ENV" +echo " args: $REMAINING_ARGS" +# +# Fix problem with conda saying it is not "init properly" +# +CONDA_EXEC="$(which conda)" +CONDA_BASE=$(dirname $CONDA_EXEC) +source $CONDA_BASE/../etc/profile.d/conda.sh + +# +# Create a new environment +# +if [ ! -d "$ENV" ] && [ "$ENV" != "base" ] && [ ! -d "$CONDA_ENVS/$ENV" ]; then + conda create --prefix $ENV python=$PYTHON -y +fi +conda activate $ENV + +# +# Fetch the repo +# +cd $SLURM_TMPDIR +git clone --single-branch -d 1 -b $BRANCH $ORIGIN +python -m pip install milabench + +SYSTEM="$SLURM_TMPDIR/system.yaml" + +milabench slurm_system +milabench slurm_system > $SYSTE + +milabench install --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS +milabench prepare --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS +milabench run --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS + +milabench summary $SLURM_TMPDIR/base/runs/ From 0a10c92558b38ea54636cd41b11e123169296b19 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Wed, 18 Oct 2023 11:24:40 -0400 Subject: [PATCH 05/36] Add importlib_resources depeendency --- milabench/_version.py | 6 +++--- milabench/schedule.py | 18 ++++++++++-------- milabench/scripts/milabench.bash | 6 +++--- pyproject.toml | 3 ++- 4 files changed, 18 insertions(+), 15 deletions(-) mode change 100644 => 100755 milabench/scripts/milabench.bash diff --git a/milabench/_version.py b/milabench/_version.py index 1d65f2645..5a1f304fd 100644 --- a/milabench/_version.py +++ b/milabench/_version.py @@ -1,5 +1,5 @@ """This file is generated, do not modify""" -__tag__ = "v0.0.6-23-ga210276" -__commit__ = "a2102768d7a3a8ce81d9599daa9b40ca412e7121" -__date__ = "2023-07-26 12:25:40 -0400" +__tag__ = "v0.0.6-27-gdf4bf03" +__commit__ = "df4bf03d8a76752fe0e70ef0a32b6f36bb101c27" +__date__ = "2023-10-18 10:53:44 -0400" diff --git a/milabench/schedule.py b/milabench/schedule.py index fcbf2d5d2..3c887ef0f 100644 --- a/milabench/schedule.py +++ b/milabench/schedule.py @@ -83,21 +83,22 @@ class SetupOptions: env: str = "./env" python: str = "3.9" - def deduce_remote(self, branch): + def deduce_remote(self, current_branch): prefix = "refs/heads/" # Fetch all remotes - remotes = shell("get remote").splitlines() + remotes = shell("git remote").splitlines() possible_remotes = [] # Find remotes that have our branch for remote in remotes: branches = shell(f"git ls-remote --heads {remote}").splitlines() - for ref, name in branches.split('\t'): + for branch in branches: + _, name = branch.split('\t') name = name[len(prefix):] - - if branch == name: + + if current_branch == name: possible_remotes.append(remote) if len(possible_remotes) == 1: @@ -105,7 +106,7 @@ def deduce_remote(self, branch): raise RuntimeError(f"Multiple suitable remotes found {possible_remotes}") - def deduce_from_repository(self, remote="origin"): + def deduce_from_repository(self, remote=None): self.branch = shell("git rev-parse --abbrev-ref HEAD") if remote is None: @@ -129,6 +130,7 @@ def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): if sbatch_args is None: sbatch_args = [ + "--ntasks=1", "--gpus-per-task=1", "--cpus-per-task=4", "--time=01:00:00", @@ -140,10 +142,10 @@ def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): script_args.deduce_from_repository() script_args = script_args.arguments() - cmd = ["sbatch"] + sbatch_args + [sbatch_script] + script_args + cmd = sbatch_args + [sbatch_script] + script_args if dry: - print(' '.join(cmd)) + print("sbatch " + ' '.join(cmd)) code = 0 else: code, _ = sbatch(cmd, sync=sync, tags=None) diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash old mode 100644 new mode 100755 index fbb3dfa3d..9032ff832 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -75,13 +75,13 @@ conda activate $ENV # Fetch the repo # cd $SLURM_TMPDIR -git clone --single-branch -d 1 -b $BRANCH $ORIGIN -python -m pip install milabench +git clone --single-branch --depth 1 -b $BRANCH $ORIGIN +python -m pip install ./milabench SYSTEM="$SLURM_TMPDIR/system.yaml" milabench slurm_system -milabench slurm_system > $SYSTE +milabench slurm_system > $SYSTEM milabench install --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS milabench prepare --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS diff --git a/pyproject.toml b/pyproject.toml index 8dca0260f..4d66117b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ authors = ["Olivier Breuleux "] license = "MIT" [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.8,<4.0" giving = "^0.4.0" ptera = "^1.2.0" coleo = "^0.3.0" @@ -32,6 +32,7 @@ pymongo = "^4.3.3" psycopg2-binary = {version = "^2.9.6", optional = true} py-cpuinfo = "^9.0.0" psutil = "^5.9.5" +importlib-resources = "^6.1.0" [tool.poetry.dev-dependencies] From 23a7f50d95a86dfbb90d821dde29716e78d98b70 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Wed, 18 Oct 2023 13:48:09 -0400 Subject: [PATCH 06/36] - --- milabench/executors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/milabench/executors.py b/milabench/executors.py index 5e5f9a3d8..eae0107dc 100644 --- a/milabench/executors.py +++ b/milabench/executors.py @@ -425,7 +425,7 @@ def _find_node_config(self) -> Dict: def is_local(self): localnode = self.pack.config["system"]["self"] - + # self is none; the node we are currently # on is not part of the system; we are running # milabench remotely, sending remote commands to From f57eceeddd6ecc777ebbba9d109bce5ab59e9989 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Wed, 18 Oct 2023 14:19:11 -0400 Subject: [PATCH 07/36] - --- milabench/executors.py | 2 +- milabench/schedule.py | 2 +- milabench/scripts/milabench.bash | 29 +++++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/milabench/executors.py b/milabench/executors.py index eae0107dc..41d308919 100644 --- a/milabench/executors.py +++ b/milabench/executors.py @@ -660,7 +660,7 @@ def _argv(self, **_) -> List: ngpu = len(get_gpu_info()["gpus"].values()) nproc = ngpu * num_machines - assert nproc > 0 + assert nproc > 0, f"nproc: {nproc} num_machines: {num_machines} ngpu: {ngpu}" deepspeed_argv = ( [ diff --git a/milabench/schedule.py b/milabench/schedule.py index 3c887ef0f..7a494469c 100644 --- a/milabench/schedule.py +++ b/milabench/schedule.py @@ -131,7 +131,7 @@ def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): if sbatch_args is None: sbatch_args = [ "--ntasks=1", - "--gpus-per-task=1", + "--gpus-per-task=rtx8000:1", "--cpus-per-task=4", "--time=01:00:00", "--ntasks-per-node=1", diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash index 9032ff832..54abc46c7 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -71,6 +71,11 @@ if [ ! -d "$ENV" ] && [ "$ENV" != "base" ] && [ ! -d "$CONDA_ENVS/$ENV" ]; then fi conda activate $ENV +export HF_HOME=$BASE/cache +export HF_DATASETS_CACHE=$BASE/cache +export TORCH_HOME=$BASE/cache +export XDG_CACHE_HOME=$BASE/cache + # # Fetch the repo # @@ -80,11 +85,35 @@ python -m pip install ./milabench SYSTEM="$SLURM_TMPDIR/system.yaml" +echo "" +echo "System" +echo "------" + milabench slurm_system milabench slurm_system > $SYSTEM +module load cuda/11.8 + +echo "" +echo "Install" +echo "-------" milabench install --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS + +echo "" +echo "Prepare" +echo "-------" milabench prepare --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS + +echo "" +echo "Run" +echo "---" milabench run --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS +echo "" +echo "Report" +echo "------" milabench summary $SLURM_TMPDIR/base/runs/ + +echo "----" +echo "Done" +echo "" From ff08fae4e9715a4a425d86ba9b994118d475254d Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 13:32:25 -0400 Subject: [PATCH 08/36] Tweaks --- benchmarks/rwkv/requirements.in | 3 ++- milabench/cli.py | 2 +- milabench/config.py | 3 +++ milabench/schedule.py | 42 +++++++++++++++++++++++++++++--- milabench/scripts/milabench.bash | 15 +++++++++++- 5 files changed, 59 insertions(+), 6 deletions(-) diff --git a/benchmarks/rwkv/requirements.in b/benchmarks/rwkv/requirements.in index 79d763f72..29a8180d0 100644 --- a/benchmarks/rwkv/requirements.in +++ b/benchmarks/rwkv/requirements.in @@ -3,4 +3,5 @@ torch deepspeed pytorch-lightning<2.0 ninja -voir>=0.2.9,<0.3 +voir>=0.2.10,<0.3 +pydantic<2 diff --git a/milabench/cli.py b/milabench/cli.py index 673ad9257..4c2825357 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -357,7 +357,7 @@ def run(): report: Option & bool = True # Which type of dashboard to show (short, long, or no) - dash: Option & str = os.environ.get("MILABENCH_DASH", "long") + dash: Option & str = os.getenv("MILABENCH_DASH", "long") noterm: Option & bool = os.getenv("MILABENCH_NOTERM", "0") == "1" diff --git a/milabench/config.py b/milabench/config.py index da29da294..bdb9e1de7 100644 --- a/milabench/config.py +++ b/milabench/config.py @@ -137,6 +137,9 @@ def resolve_addresses(nodes): or (hostname in ("localhost", socket.gethostname())) or len(ip_list.intersection(ipaddrlist)) > 0 ) + print() + print("HERE", hostname, socket.gethostname()) + print() node["local"] = is_local if is_local: diff --git a/milabench/schedule.py b/milabench/schedule.py index 7a494469c..78f1fb945 100644 --- a/milabench/schedule.py +++ b/milabench/schedule.py @@ -3,6 +3,7 @@ import re import importlib_resources import subprocess +import requests def popen(cmd, callback=None): @@ -133,9 +134,9 @@ def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): "--ntasks=1", "--gpus-per-task=rtx8000:1", "--cpus-per-task=4", - "--time=01:00:00", + "--time=03:00:00", "--ntasks-per-node=1", - "--mem=32G" + "--mem=64G" ] script_args = SetupOptions() @@ -150,4 +151,39 @@ def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): else: code, _ = sbatch(cmd, sync=sync, tags=None) - return code \ No newline at end of file + return code + + +def post_comment_on_pr(owner, branch, comment, access_token=None): + url = "https://api.github.com/repos/mila-iqia/milabench/pulls" + + response = requests.get(url, params={"head": f"{owner}:{branch}"}) + + if response.status_code != 200: + raise RuntimeError(response) + + pull_requests = response.json() + + if not pull_requests: + raise RuntimeError("No matching pull requests found.") + + pr = pull_requests[0] + post_url = pr["_links"]["review_comments"]["href"] + + data = { + "body": comment, + } + + headers = { + "Authorization": f"Bearer {access_token}", + "Accept": "application/vnd.github.v3+json" + } + + response = requests.post( + post_url, + json=data, + headers=headers + ) + + if response.status_code != 201: + raise RuntimeError(response) diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash index 54abc46c7..67760334d 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -3,6 +3,7 @@ function usage() { echo "Usage: $0 [-m] [-p]" echo " -h Display this help message." + echo " -b arch GPU arch (default: cuda)" echo " -b BRANCH Branch to checkout (default: master)" echo " -o ORIGIN Origin to use (default: github/mila/milabench)" echo " -c CONFIG Configuration (default: milabench/config/standard.yaml)" @@ -12,6 +13,7 @@ function usage() { exit 1 } +ARCH="cuda" PYTHON="3.9" BRANCH="master" ORIGIN="https://github.com/mila-iqia/milabench.git" @@ -40,6 +42,9 @@ while getopts ":hm:p:e:b:o:c:" opt; do e) ENV="$OPTARG" ;; + a) + ARCH="$OPTARG" + ;; :) echo "Option -$OPTARG requires an argument." >&2 usage @@ -75,6 +80,11 @@ export HF_HOME=$BASE/cache export HF_DATASETS_CACHE=$BASE/cache export TORCH_HOME=$BASE/cache export XDG_CACHE_HOME=$BASE/cache +export MILABENCH_GPU_ARCH=$ARCH + +export MILABENCH_DASH=no +export MILABENCH_NOTERM=1 +export PYTHONUNBUFFERED=1 # # Fetch the repo @@ -84,6 +94,7 @@ git clone --single-branch --depth 1 -b $BRANCH $ORIGIN python -m pip install ./milabench SYSTEM="$SLURM_TMPDIR/system.yaml" +unset CUDA_VISIBLE_DEVICES echo "" echo "System" @@ -92,6 +103,8 @@ echo "------" milabench slurm_system milabench slurm_system > $SYSTEM + +nvidia-smi module load cuda/11.8 echo "" @@ -109,7 +122,7 @@ echo "Run" echo "---" milabench run --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS -echo "" +# echo "" echo "Report" echo "------" milabench summary $SLURM_TMPDIR/base/runs/ From 86ceec1cf8861a54a48e13f7b1b39dd01944ab23 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 14:41:59 -0400 Subject: [PATCH 09/36] Twweaks --- milabench/cli.py | 5 +++-- milabench/config.py | 42 ++++++++++++++++++++++++++---------------- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index 4c2825357..b3ef50f89 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -383,8 +383,9 @@ def run(): # Terminal Formatter slows down the dashboard, # if lots of info needs to be printed # in particular rwkv - TerminalFormatter() if not noterm else None, - dash_class and dash_class(), + # TerminalFormatter() if not noterm else None, + # dash_class and dash_class(), + TerminalFormatter(), TextReporter("stdout"), TextReporter("stderr"), DataReporter(), diff --git a/milabench/config.py b/milabench/config.py index bdb9e1de7..4ea90a7f7 100644 --- a/milabench/config.py +++ b/milabench/config.py @@ -101,6 +101,27 @@ def get_remote_ip(): return set(result) +def _resolve_ip(ip) + + # Resolve the IP + try: + hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) + lazy_raise = None + except socket.gaierror as err: + # Get Addr Info (GAI) Error + # + # When we are connecting to a node through a ssh proxy jump + # the node IPs/Hostnames are not available until we reach + # the first node inside the cluster + # + hostname = i[] + aliaslist = [] + ipaddrlist = [] + lazy_raise = err + + return hostname, aliaslist, ipaddrlist, lazy_raise + + def resolve_addresses(nodes): # Note: it is possible for self to be none # if we are running milabench on a node that is not part of the system @@ -111,26 +132,15 @@ def resolve_addresses(nodes): ip_list = get_remote_ip() for node in nodes: - # Resolve the IP - try: - hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(node["ip"]) - - except socket.gaierror as err: - # Get Addr Info (GAI) Error - # - # When we are connecting to a node through a ssh proxy jump - # the node IPs/Hostnames are not available until we reach - # the first node inside the cluster - # - hostname = node["ip"] - aliaslist = [] - ipaddrlist = [] - - lazy_raise = err + hostname, aliaslist, ipaddrlist, lazy_raise = _resolve_ip(node["ip"]) node["hostname"] = hostname node["aliaslist"] = aliaslist node["ipaddrlist"] = ipaddrlist + + if hostname.endswith(".server.mila.quebec.server.mila.quebec"): + # why is this happening + hostname = hostname[:-len(".server.mila.quebec")] is_local = ( ("127.0.0.1" in ipaddrlist) From a049f08f42748a7bfbd47db9c500099ef15ad8df Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 14:44:56 -0400 Subject: [PATCH 10/36] Twweaks --- milabench/config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/milabench/config.py b/milabench/config.py index 4ea90a7f7..d16b6f5ed 100644 --- a/milabench/config.py +++ b/milabench/config.py @@ -101,8 +101,7 @@ def get_remote_ip(): return set(result) -def _resolve_ip(ip) - +def _resolve_ip(ip): # Resolve the IP try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) From 6e76e648e4395cee1659500aebe859cab2530127 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 14:45:16 -0400 Subject: [PATCH 11/36] Twweaks --- milabench/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/milabench/config.py b/milabench/config.py index d16b6f5ed..665dd88fa 100644 --- a/milabench/config.py +++ b/milabench/config.py @@ -113,7 +113,7 @@ def _resolve_ip(ip): # the node IPs/Hostnames are not available until we reach # the first node inside the cluster # - hostname = i[] + hostname = ip aliaslist = [] ipaddrlist = [] lazy_raise = err From d24fe8cd2a1d78b3124b1f07d635096b0e9bc516 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 15:58:06 -0400 Subject: [PATCH 12/36] - --- .gitignore | 4 ++ .pin/constraints-cuda-torch.txt | 37 ++++++++----------- .../accelerate_opt/requirements.cuda.txt | 19 +++------- benchmarks/dlrm/requirements.cuda.txt | 13 +++---- benchmarks/huggingface/requirements.cuda.txt | 8 ++-- benchmarks/rwkv/requirements.cuda.txt | 19 +++------- benchmarks/stargan/requirements.cuda.txt | 6 +-- benchmarks/super-slomo/requirements.cuda.txt | 6 +-- benchmarks/timm/requirements.cuda.txt | 6 +-- benchmarks/torchvision/requirements.cuda.txt | 6 +-- milabench/config.py | 8 ++-- milabench/scripts/milabench.bash | 1 + 12 files changed, 58 insertions(+), 75 deletions(-) diff --git a/.gitignore b/.gitignore index 6ff12f7c5..8e6de4a30 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,7 @@ sqlite.db .pin-constraints-* .mongo/ .pin/tmp-* + + +.no_report +trash/ diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 6e40fb118..85a8c0f5d 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=.pin/constraints-cuda-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in @@ -16,8 +16,6 @@ aiohttp==3.8.6 # fsspec aiosignal==1.3.1 # via aiohttp -annotated-types==0.6.0 - # via pydantic antlr4-python3-runtime==4.9.3 # via omegaconf asttokens==2.4.0 @@ -88,7 +86,7 @@ google-auth==2.23.3 # via # google-auth-oauthlib # tensorboard -google-auth-oauthlib==1.0.0 +google-auth-oauthlib==1.1.0 # via tensorboard graphviz==0.20.1 # via torchviz @@ -109,9 +107,7 @@ idna==3.4 # requests # yarl importlib-metadata==6.8.0 - # via - # markdown - # torchx + # via torchx jinja2==3.1.2 # via torch joblib==1.3.2 @@ -142,7 +138,7 @@ multiprocess==0.70.15 # evaluate mypy-extensions==1.0.0 # via typing-inspect -networkx==3.1 +networkx==3.2 # via torch ninja==1.11.1.1 # via @@ -150,7 +146,7 @@ ninja==1.11.1.1 # deepspeed numpy==1.26.1 # via - # -r benchmarks/stargan/requirements.in + # -r benchmarks/rwkv/requirements.in # -r benchmarks/super-slomo/requirements.in # accelerate # datasets @@ -195,7 +191,7 @@ pandas==2.1.1 # evaluate pillow==10.1.0 # via torchvision -protobuf==4.24.4 +protobuf==4.23.4 # via # onnx # tensorboard @@ -215,10 +211,10 @@ pyasn1==0.5.0 # rsa pyasn1-modules==0.3.0 # via google-auth -pydantic==2.4.2 - # via deepspeed -pydantic-core==2.10.1 - # via pydantic +pydantic==1.10.13 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed pydot==1.4.2 # via -r benchmarks/dlrm/requirements.in pygments==2.16.1 @@ -288,7 +284,7 @@ sympy==1.12 # via torch tabulate==0.9.0 # via torchx -tensorboard==2.14.1 +tensorboard==2.15.0 # via -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.1 # via tensorboard @@ -298,8 +294,8 @@ tokenizers==0.14.1 # via transformers torch==2.1.0+cu118 # via + # -r benchmarks/rwkv/requirements.in # -r benchmarks/super-slomo/requirements.in - # -r benchmarks/torchvision/requirements.in # accelerate # deepspeed # pytorch-lightning @@ -317,8 +313,8 @@ torchrec==0.5.0+cu118 # via -r benchmarks/dlrm/requirements.in torchvision==0.16.0+cu118 # via + # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/super-slomo/requirements.in - # -r benchmarks/torchvision/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 @@ -334,7 +330,7 @@ tqdm==4.66.1 # pytorch-lightning # torchrec # transformers -transformers==4.34.0 +transformers==4.34.1 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/huggingface/requirements.in @@ -346,7 +342,6 @@ typing-extensions==4.8.0 # lightning-utilities # onnx # pydantic - # pydantic-core # pyre-extensions # pytorch-lightning # reactivex @@ -356,7 +351,7 @@ typing-inspect==0.9.0 # via pyre-extensions tzdata==2023.3 # via pandas -urllib3==1.26.17 +urllib3==1.26.18 # via # docker # requests @@ -366,8 +361,8 @@ varname==0.10.0 # via giving voir==0.2.10 # via + # -r benchmarks/rwkv/requirements.in # -r benchmarks/super-slomo/requirements.in - # -r benchmarks/torchvision/requirements.in websocket-client==1.6.4 # via docker werkzeug==3.0.0 diff --git a/benchmarks/accelerate_opt/requirements.cuda.txt b/benchmarks/accelerate_opt/requirements.cuda.txt index 3eb4a0dba..7864b19ed 100644 --- a/benchmarks/accelerate_opt/requirements.cuda.txt +++ b/benchmarks/accelerate_opt/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-opt.txt benchmarks/accelerate_opt/requirements.in @@ -17,10 +17,6 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -annotated-types==0.6.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -139,7 +135,7 @@ multiprocess==0.70.15 # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -201,14 +197,10 @@ pyarrow==13.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -pydantic==2.4.2 +pydantic==1.10.13 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pydantic-core==2.10.1 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # pydantic pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -295,7 +287,7 @@ tqdm==4.66.1 # evaluate # huggingface-hub # transformers -transformers==4.34.0 +transformers==4.34.1 # via -r benchmarks/accelerate_opt/requirements.in triton==2.1.0 # via @@ -306,14 +298,13 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # pydantic - # pydantic-core # reactivex # torch tzdata==2023.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/dlrm/requirements.cuda.txt b/benchmarks/dlrm/requirements.cuda.txt index 67c8619f1..c0a902576 100644 --- a/benchmarks/dlrm/requirements.cuda.txt +++ b/benchmarks/dlrm/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-dlrm.txt benchmarks/dlrm/requirements.in @@ -73,7 +73,7 @@ google-auth==2.23.3 # -c .pin/../.pin/constraints-cuda-torch.txt # google-auth-oauthlib # tensorboard -google-auth-oauthlib==1.0.0 +google-auth-oauthlib==1.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard @@ -92,7 +92,6 @@ idna==3.4 importlib-metadata==6.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # markdown # torchx jinja2==3.1.2 # via @@ -131,7 +130,7 @@ mypy-extensions==1.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # typing-inspect -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -163,7 +162,7 @@ packaging==23.2 # docker # lightning-utilities # torchmetrics -protobuf==4.24.4 +protobuf==4.23.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # onnx @@ -245,7 +244,7 @@ tabulate==0.9.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx -tensorboard==2.14.1 +tensorboard==2.15.0 # via -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.1 # via @@ -291,7 +290,7 @@ typing-inspect==0.9.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pyre-extensions -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # docker diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index 8d9e91fa5..f505bd715 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-hf.txt benchmarks/huggingface/requirements.in @@ -76,7 +76,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -155,7 +155,7 @@ tqdm==4.66.1 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # transformers -transformers==4.34.0 +transformers==4.34.1 # via -r benchmarks/huggingface/requirements.in triton==2.1.0 # via @@ -167,7 +167,7 @@ typing-extensions==4.8.0 # huggingface-hub # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/rwkv/requirements.cuda.txt b/benchmarks/rwkv/requirements.cuda.txt index 4a52cb3c3..65f52f7e6 100644 --- a/benchmarks/rwkv/requirements.cuda.txt +++ b/benchmarks/rwkv/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-rwkv.txt benchmarks/rwkv/requirements.in @@ -14,10 +14,6 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -annotated-types==0.6.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -112,7 +108,7 @@ multidict==6.0.4 # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # yarl -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -153,14 +149,10 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pydantic==2.4.2 +pydantic==1.10.13 # via - # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/rwkv/requirements.in # deepspeed -pydantic-core==2.10.1 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # pydantic pygments==2.16.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -220,11 +212,10 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-cuda-torch.txt # lightning-utilities # pydantic - # pydantic-core # pytorch-lightning # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/stargan/requirements.cuda.txt b/benchmarks/stargan/requirements.cuda.txt index a84334b2e..79f4f404d 100644 --- a/benchmarks/stargan/requirements.cuda.txt +++ b/benchmarks/stargan/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-stargan.txt benchmarks/stargan/requirements.in @@ -68,7 +68,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -139,7 +139,7 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/super-slomo/requirements.cuda.txt b/benchmarks/super-slomo/requirements.cuda.txt index 4b3196fec..821fdc9ae 100644 --- a/benchmarks/super-slomo/requirements.cuda.txt +++ b/benchmarks/super-slomo/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-super-slomo.txt benchmarks/super-slomo/requirements.in @@ -68,7 +68,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -144,7 +144,7 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index 577888870..a81ad461c 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-timm.txt benchmarks/timm/requirements.in @@ -72,7 +72,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -156,7 +156,7 @@ typing-extensions==4.8.0 # huggingface-hub # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index ea1d5e2c8..bb704fa25 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/torchvision/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-torchvision.txt benchmarks/torchvision/requirements.in @@ -68,7 +68,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -141,7 +141,7 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests diff --git a/milabench/config.py b/milabench/config.py index 665dd88fa..88053ca20 100644 --- a/milabench/config.py +++ b/milabench/config.py @@ -138,6 +138,11 @@ def resolve_addresses(nodes): node["ipaddrlist"] = ipaddrlist if hostname.endswith(".server.mila.quebec.server.mila.quebec"): + print() + print("Hostname was extra long for no reason") + print(hostname, socket.gethostname()) + print() + # why is this happening hostname = hostname[:-len(".server.mila.quebec")] @@ -146,9 +151,6 @@ def resolve_addresses(nodes): or (hostname in ("localhost", socket.gethostname())) or len(ip_list.intersection(ipaddrlist)) > 0 ) - print() - print("HERE", hostname, socket.gethostname()) - print() node["local"] = is_local if is_local: diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash index 67760334d..401ab5b6a 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -112,6 +112,7 @@ echo "Install" echo "-------" milabench install --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS + echo "" echo "Prepare" echo "-------" From d5f34c1d6d772bc5efbca9ff59ef597f72aaa665 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 17:37:20 -0400 Subject: [PATCH 13/36] update voir --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4d66117b3..645863a1d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ hrepr = "^0.4.0" blessed = "^1.19.1" pathspec = "^0.9.0" cp-template = "^0.3.0" -voir = "^0.2.10" +voir = {git = "https://github.com/breuleux/voir", ref = "master"} pandas = "^1.4.2" numpy = ">=1.23.0" pynvml = "^11.4.1" From efc5810f4e91f38722043677dba6c2b43ebd78f9 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 17:38:52 -0400 Subject: [PATCH 14/36] update voir --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 645863a1d..f349e2dea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,7 @@ authors = ["Olivier Breuleux "] license = "MIT" [tool.poetry.dependencies] +voir = {git = "https://github.com/breuleux/voir", branch = "master"} python = ">=3.8,<4.0" giving = "^0.4.0" ptera = "^1.2.0" @@ -19,7 +20,6 @@ hrepr = "^0.4.0" blessed = "^1.19.1" pathspec = "^0.9.0" cp-template = "^0.3.0" -voir = {git = "https://github.com/breuleux/voir", ref = "master"} pandas = "^1.4.2" numpy = ">=1.23.0" pynvml = "^11.4.1" From c517452526a8d3bc4bad799039ef655aa6c5f5e3 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 18:32:14 -0400 Subject: [PATCH 15/36] - --- benchmarks/torchvision/main.py | 5 ++++- milabench/cli.py | 12 +++++++++--- milabench/report.py | 7 +++++-- milabench/schedule.py | 4 ++-- milabench/scripts/milabench.bash | 3 +-- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/benchmarks/torchvision/main.py b/benchmarks/torchvision/main.py index a52cbbec0..b0a7cf847 100644 --- a/benchmarks/torchvision/main.py +++ b/benchmarks/torchvision/main.py @@ -165,7 +165,10 @@ def main(): if data_directory: args.data = os.path.join(data_directory, "FakeImageNet") - use_cuda = not args.no_cuda and torch.cuda.is_available() + if not args.no_cuda: + assert torch.cuda.is_available(), "Why is CUDA not available" + + use_cuda = not args.no_cuda torch.manual_seed(args.seed) if use_cuda: diff --git a/milabench/cli.py b/milabench/cli.py index b3ef50f89..d052d26f2 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -741,6 +741,7 @@ def publish(): def schedule(): """Launch a slurm job to run milabench""" + # milabench schedule --sync -- --select resnet50 # tail -f on the slurm job sync: Option & bool = False @@ -748,8 +749,13 @@ def schedule(): # Print the command and return without running it dry: Option & bool = False + # pip arguments + # [remainder] + args: Option = [] + launch_milabench( - None, - dry, - sync + args, + sbatch_args=None, + dry=dry, + sync=sync ) \ No newline at end of file diff --git a/milabench/report.py b/milabench/report.py index 1c0c63f56..b4a5cc71a 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -49,6 +49,7 @@ def _make_row(summary, compare, weights): score = (acc if acc > 0 else row["perf"]) * success_ratio row["score"] = score + print(score) row["weight"] = weights.get("weight", summary["weight"]) # ---- @@ -224,10 +225,12 @@ def make_report( weights = dict() df = make_dataframe(summary, compare, weights) - + print(df) + # Reorder columns df = df[sorted(df.columns, key=lambda k: columns_order.get(k, 0))] - + print(df) + out = Outputter(stdout=sys.stdout, html=html) if sources: diff --git a/milabench/schedule.py b/milabench/schedule.py index 78f1fb945..14f9de735 100644 --- a/milabench/schedule.py +++ b/milabench/schedule.py @@ -125,7 +125,7 @@ def arguments(self): ] -def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): +def launch_milabench(args, sbatch_args=None, dry: bool = False, sync: bool = False): sbatch_script = importlib_resources.files(__name__) / "scripts" / "milabench.bash" sbatch_script = str(sbatch_script) @@ -143,7 +143,7 @@ def launch_milabench(sbatch_args=None, dry: bool = False, sync: bool = False): script_args.deduce_from_repository() script_args = script_args.arguments() - cmd = sbatch_args + [sbatch_script] + script_args + cmd = sbatch_args + [sbatch_script] + script_args + args if dry: print("sbatch " + ' '.join(cmd)) diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash index 401ab5b6a..bc63b4bfd 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -68,6 +68,7 @@ CONDA_EXEC="$(which conda)" CONDA_BASE=$(dirname $CONDA_EXEC) source $CONDA_BASE/../etc/profile.d/conda.sh +cd $SLURM_TMPDIR # # Create a new environment # @@ -89,12 +90,10 @@ export PYTHONUNBUFFERED=1 # # Fetch the repo # -cd $SLURM_TMPDIR git clone --single-branch --depth 1 -b $BRANCH $ORIGIN python -m pip install ./milabench SYSTEM="$SLURM_TMPDIR/system.yaml" -unset CUDA_VISIBLE_DEVICES echo "" echo "System" From ddb3104b68576168d045c15823c8eaa2031c7758 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 18:40:12 -0400 Subject: [PATCH 16/36] - --- milabench/cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/milabench/cli.py b/milabench/cli.py index d052d26f2..bfa469a98 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -404,8 +404,10 @@ def run(): reports = None if runs: reports = _read_reports(*runs) + print(reports) summary = make_summary(reports.values()) + print(summary) make_report( summary, compare=compare, From e280a27c8aca8358d94ebcf7b34aa71e0a2dbbad Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 18:46:45 -0400 Subject: [PATCH 17/36] - --- milabench/cli.py | 7 +++++-- milabench/report.py | 3 --- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index bfa469a98..ae0388426 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -403,11 +403,14 @@ def run(): reports = None if runs: + print(runs) + reports = _read_reports(*runs) - print(reports) + assert len(reports) != 0, "No reports found" + summary = make_summary(reports.values()) + assert len(summary) != 0, "No summaries" - print(summary) make_report( summary, compare=compare, diff --git a/milabench/report.py b/milabench/report.py index b4a5cc71a..00c495d8c 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -49,7 +49,6 @@ def _make_row(summary, compare, weights): score = (acc if acc > 0 else row["perf"]) * success_ratio row["score"] = score - print(score) row["weight"] = weights.get("weight", summary["weight"]) # ---- @@ -225,11 +224,9 @@ def make_report( weights = dict() df = make_dataframe(summary, compare, weights) - print(df) # Reorder columns df = df[sorted(df.columns, key=lambda k: columns_order.get(k, 0))] - print(df) out = Outputter(stdout=sys.stdout, html=html) From 5f30dcc7219a570cf694a7f1653ceeb45dc2e5d6 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 18:58:06 -0400 Subject: [PATCH 18/36] - --- milabench/cli.py | 40 ++++++++++++++++++++------------ milabench/scripts/milabench.bash | 6 ++--- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index ae0388426..4e3f35e19 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -254,6 +254,30 @@ def is_selected(defn): ) + +def _parse_report(pth): + with pth.open() as f: + lines = f.readlines() + data = [] + good_lines = 0 + bad_lines = 0 + + for line in lines: + try: + data.append(json.loads(line)) + good_lines += 1 + except Exception: + import traceback + print(f"Could not parse line inside {pth}\n\t- {line}") + traceback.print_exc() + bad_lines += 1 + + if good_lines == 0: + raise RuntimeError(f"Unknow format for file {pth}") + + return data + + def _read_reports(*runs): all_data = {} for folder in runs: @@ -262,20 +286,8 @@ def _read_reports(*runs): if not file.endswith(".data"): continue pth = XPath(parent) / file - with pth.open() as f: - lines = f.readlines() - data = [] + all_data[str(pth)] = _parse_report(pth) - for line in lines: - try: - data.append(json.loads(line)) - except Exception: - import traceback - print(f"Could not parse line inside {pth}\n\t- {line}") - traceback.print_exc() - - if len(data) == 0: - all_data[str(pth)] = data return all_data @@ -403,8 +415,6 @@ def run(): reports = None if runs: - print(runs) - reports = _read_reports(*runs) assert len(reports) != 0, "No reports found" diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash index bc63b4bfd..00b565d8d 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -91,7 +91,7 @@ export PYTHONUNBUFFERED=1 # Fetch the repo # git clone --single-branch --depth 1 -b $BRANCH $ORIGIN -python -m pip install ./milabench +python -m pip install -e ./milabench SYSTEM="$SLURM_TMPDIR/system.yaml" @@ -102,8 +102,6 @@ echo "------" milabench slurm_system milabench slurm_system > $SYSTEM - -nvidia-smi module load cuda/11.8 echo "" @@ -122,6 +120,8 @@ echo "Run" echo "---" milabench run --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS +ls $BASE + # echo "" echo "Report" echo "------" From 9ccbdae6679bb318cbb745171ae3166c9fd2d8dd Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 19:21:09 -0400 Subject: [PATCH 19/36] - --- .pin/constraints-cuda-torch.txt | 10 +++++----- benchmarks/accelerate_opt/requirements.cuda.txt | 2 +- benchmarks/dlrm/requirements.cuda.txt | 2 +- benchmarks/huggingface/requirements.cuda.txt | 2 +- benchmarks/rwkv/requirements.cuda.txt | 2 +- benchmarks/stable_baselines3/requirements.cuda.txt | 2 +- benchmarks/stargan/requirements.cuda.txt | 2 +- benchmarks/super-slomo/requirements.cuda.txt | 2 +- benchmarks/timm/requirements.cuda.txt | 2 +- benchmarks/torchvision/requirements.cuda.txt | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 85a8c0f5d..14e59567d 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -146,8 +146,8 @@ ninja==1.11.1.1 # deepspeed numpy==1.26.1 # via + # -r benchmarks/dlrm/requirements.in # -r benchmarks/rwkv/requirements.in - # -r benchmarks/super-slomo/requirements.in # accelerate # datasets # deepspeed @@ -295,7 +295,7 @@ tokenizers==0.14.1 torch==2.1.0+cu118 # via # -r benchmarks/rwkv/requirements.in - # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in # accelerate # deepspeed # pytorch-lightning @@ -314,7 +314,7 @@ torchrec==0.5.0+cu118 torchvision==0.16.0+cu118 # via # -r benchmarks/accelerate_opt/requirements.in - # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 @@ -359,10 +359,10 @@ urllib3==1.26.18 # torchx varname==0.10.0 # via giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via # -r benchmarks/rwkv/requirements.in - # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in websocket-client==1.6.4 # via docker werkzeug==3.0.0 diff --git a/benchmarks/accelerate_opt/requirements.cuda.txt b/benchmarks/accelerate_opt/requirements.cuda.txt index 7864b19ed..c1ba65d8d 100644 --- a/benchmarks/accelerate_opt/requirements.cuda.txt +++ b/benchmarks/accelerate_opt/requirements.cuda.txt @@ -313,7 +313,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/accelerate_opt/requirements.in xxhash==3.4.1 # via diff --git a/benchmarks/dlrm/requirements.cuda.txt b/benchmarks/dlrm/requirements.cuda.txt index c0a902576..7214829f7 100644 --- a/benchmarks/dlrm/requirements.cuda.txt +++ b/benchmarks/dlrm/requirements.cuda.txt @@ -300,7 +300,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/dlrm/requirements.in websocket-client==1.6.4 # via diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index f505bd715..eec7f297a 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -175,5 +175,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/rwkv/requirements.cuda.txt b/benchmarks/rwkv/requirements.cuda.txt index 65f52f7e6..aa839b529 100644 --- a/benchmarks/rwkv/requirements.cuda.txt +++ b/benchmarks/rwkv/requirements.cuda.txt @@ -223,7 +223,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/stable_baselines3/requirements.cuda.txt b/benchmarks/stable_baselines3/requirements.cuda.txt index a2dcc069c..61df81cde 100644 --- a/benchmarks/stable_baselines3/requirements.cuda.txt +++ b/benchmarks/stable_baselines3/requirements.cuda.txt @@ -304,7 +304,7 @@ urllib3==1.26.15 # sentry-sdk varname==0.10.0 # via giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/stable_baselines3/requirements.in wandb==0.14.0 # via -r benchmarks/stable_baselines3/requirements.in diff --git a/benchmarks/stargan/requirements.cuda.txt b/benchmarks/stargan/requirements.cuda.txt index 79f4f404d..406a22781 100644 --- a/benchmarks/stargan/requirements.cuda.txt +++ b/benchmarks/stargan/requirements.cuda.txt @@ -147,5 +147,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/super-slomo/requirements.cuda.txt b/benchmarks/super-slomo/requirements.cuda.txt index 821fdc9ae..794c92031 100644 --- a/benchmarks/super-slomo/requirements.cuda.txt +++ b/benchmarks/super-slomo/requirements.cuda.txt @@ -152,5 +152,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index a81ad461c..1ce77eae9 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -164,5 +164,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index bb704fa25..b21dad482 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -149,5 +149,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir==0.2.10 +voir @ git+git://github.com/mozilla/elasticutils.git # via -r benchmarks/torchvision/requirements.in From 89f56f670db0f22880d057262a320c935c217d77 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 19:29:36 -0400 Subject: [PATCH 20/36] - --- .pin/constraints-cuda-torch.txt | 2 +- benchmarks/accelerate_opt/requirements.cuda.txt | 2 +- benchmarks/dlrm/requirements.cuda.txt | 2 +- benchmarks/huggingface/requirements.cuda.txt | 2 +- benchmarks/rwkv/requirements.cuda.txt | 2 +- benchmarks/stable_baselines3/requirements.cuda.txt | 2 +- benchmarks/stargan/requirements.cuda.txt | 2 +- benchmarks/super-slomo/requirements.cuda.txt | 2 +- benchmarks/timm/requirements.cuda.txt | 2 +- benchmarks/torchvision/requirements.cuda.txt | 2 +- milabench/scripts/milabench.bash | 10 ++++++---- 11 files changed, 16 insertions(+), 14 deletions(-) diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 14e59567d..108cc1352 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -359,7 +359,7 @@ urllib3==1.26.18 # torchx varname==0.10.0 # via giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via # -r benchmarks/rwkv/requirements.in # -r benchmarks/timm/requirements.in diff --git a/benchmarks/accelerate_opt/requirements.cuda.txt b/benchmarks/accelerate_opt/requirements.cuda.txt index c1ba65d8d..6eade8dd1 100644 --- a/benchmarks/accelerate_opt/requirements.cuda.txt +++ b/benchmarks/accelerate_opt/requirements.cuda.txt @@ -313,7 +313,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/accelerate_opt/requirements.in xxhash==3.4.1 # via diff --git a/benchmarks/dlrm/requirements.cuda.txt b/benchmarks/dlrm/requirements.cuda.txt index 7214829f7..823545753 100644 --- a/benchmarks/dlrm/requirements.cuda.txt +++ b/benchmarks/dlrm/requirements.cuda.txt @@ -300,7 +300,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/dlrm/requirements.in websocket-client==1.6.4 # via diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index eec7f297a..ae10bc7c7 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -175,5 +175,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/rwkv/requirements.cuda.txt b/benchmarks/rwkv/requirements.cuda.txt index aa839b529..0fec95e69 100644 --- a/benchmarks/rwkv/requirements.cuda.txt +++ b/benchmarks/rwkv/requirements.cuda.txt @@ -223,7 +223,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/stable_baselines3/requirements.cuda.txt b/benchmarks/stable_baselines3/requirements.cuda.txt index 61df81cde..7af1e0780 100644 --- a/benchmarks/stable_baselines3/requirements.cuda.txt +++ b/benchmarks/stable_baselines3/requirements.cuda.txt @@ -304,7 +304,7 @@ urllib3==1.26.15 # sentry-sdk varname==0.10.0 # via giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/stable_baselines3/requirements.in wandb==0.14.0 # via -r benchmarks/stable_baselines3/requirements.in diff --git a/benchmarks/stargan/requirements.cuda.txt b/benchmarks/stargan/requirements.cuda.txt index 406a22781..da7691003 100644 --- a/benchmarks/stargan/requirements.cuda.txt +++ b/benchmarks/stargan/requirements.cuda.txt @@ -147,5 +147,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/super-slomo/requirements.cuda.txt b/benchmarks/super-slomo/requirements.cuda.txt index 794c92031..2012a7cf4 100644 --- a/benchmarks/super-slomo/requirements.cuda.txt +++ b/benchmarks/super-slomo/requirements.cuda.txt @@ -152,5 +152,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index 1ce77eae9..865830349 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -164,5 +164,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index b21dad482..9c7f06d04 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -149,5 +149,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+git://github.com/mozilla/elasticutils.git +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/torchvision/requirements.in diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash index 00b565d8d..5754be996 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -120,12 +120,14 @@ echo "Run" echo "---" milabench run --config $CONFIG --system $SYSTEM --base $BASE $REMAINING_ARGS -ls $BASE - -# echo "" +echo "" echo "Report" echo "------" -milabench summary $SLURM_TMPDIR/base/runs/ + +# json +# milabench summary $SLURM_TMPDIR/base/runs/ + +milabench report --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ echo "----" echo "Done" From cc8e30dae384c84e0a25a84683eca6f9c26b5f55 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 19 Oct 2023 19:58:57 -0400 Subject: [PATCH 21/36] update everything and seems to work --- benchmarks/accelerate_opt/requirements.cpu.txt | 2 +- benchmarks/accelerate_opt/requirements.rocm.txt | 2 +- benchmarks/dlrm/requirements.cpu.txt | 2 +- benchmarks/dlrm/requirements.rocm.txt | 2 +- benchmarks/huggingface/requirements.cpu.txt | 2 +- benchmarks/huggingface/requirements.rocm.txt | 2 +- benchmarks/rwkv/requirements.cpu.txt | 2 +- benchmarks/rwkv/requirements.rocm.txt | 2 +- benchmarks/stable_baselines3/requirements.rocm.txt | 2 +- benchmarks/stargan/requirements.cpu.txt | 2 +- benchmarks/stargan/requirements.rocm.txt | 2 +- benchmarks/super-slomo/requirements.cpu.txt | 2 +- benchmarks/super-slomo/requirements.rocm.txt | 2 +- benchmarks/timm/requirements.cpu.txt | 2 +- benchmarks/timm/requirements.rocm.txt | 2 +- benchmarks/torchvision/requirements.cpu.txt | 2 +- benchmarks/torchvision/requirements.rocm.txt | 2 +- milabench/_version.py | 6 +++--- milabench/scripts/milabench.bash | 5 +++-- 19 files changed, 23 insertions(+), 22 deletions(-) diff --git a/benchmarks/accelerate_opt/requirements.cpu.txt b/benchmarks/accelerate_opt/requirements.cpu.txt index 8a243548d..82a92da3c 100644 --- a/benchmarks/accelerate_opt/requirements.cpu.txt +++ b/benchmarks/accelerate_opt/requirements.cpu.txt @@ -322,7 +322,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/accelerate_opt/requirements.in xxhash==3.4.1 # via diff --git a/benchmarks/accelerate_opt/requirements.rocm.txt b/benchmarks/accelerate_opt/requirements.rocm.txt index 4bddec1f0..2bbbb84bb 100644 --- a/benchmarks/accelerate_opt/requirements.rocm.txt +++ b/benchmarks/accelerate_opt/requirements.rocm.txt @@ -331,7 +331,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/accelerate_opt/requirements.in xxhash==3.4.1 # via diff --git a/benchmarks/dlrm/requirements.cpu.txt b/benchmarks/dlrm/requirements.cpu.txt index 0dba4a2c0..9e89dd49f 100644 --- a/benchmarks/dlrm/requirements.cpu.txt +++ b/benchmarks/dlrm/requirements.cpu.txt @@ -301,7 +301,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/dlrm/requirements.in websocket-client==1.6.4 # via diff --git a/benchmarks/dlrm/requirements.rocm.txt b/benchmarks/dlrm/requirements.rocm.txt index 1d040803f..cadc67bb7 100644 --- a/benchmarks/dlrm/requirements.rocm.txt +++ b/benchmarks/dlrm/requirements.rocm.txt @@ -311,7 +311,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/dlrm/requirements.in websocket-client==1.6.4 # via diff --git a/benchmarks/huggingface/requirements.cpu.txt b/benchmarks/huggingface/requirements.cpu.txt index 5d0a261ec..05f35fc68 100644 --- a/benchmarks/huggingface/requirements.cpu.txt +++ b/benchmarks/huggingface/requirements.cpu.txt @@ -175,5 +175,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/huggingface/requirements.rocm.txt b/benchmarks/huggingface/requirements.rocm.txt index e0068b2ad..efeb3285d 100644 --- a/benchmarks/huggingface/requirements.rocm.txt +++ b/benchmarks/huggingface/requirements.rocm.txt @@ -185,5 +185,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/rwkv/requirements.cpu.txt b/benchmarks/rwkv/requirements.cpu.txt index 2b5a8181e..15474be62 100644 --- a/benchmarks/rwkv/requirements.cpu.txt +++ b/benchmarks/rwkv/requirements.cpu.txt @@ -232,7 +232,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/rwkv/requirements.rocm.txt b/benchmarks/rwkv/requirements.rocm.txt index ba94a506d..a649f17ee 100644 --- a/benchmarks/rwkv/requirements.rocm.txt +++ b/benchmarks/rwkv/requirements.rocm.txt @@ -241,7 +241,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/stable_baselines3/requirements.rocm.txt b/benchmarks/stable_baselines3/requirements.rocm.txt index 2096b0592..14b49bd41 100644 --- a/benchmarks/stable_baselines3/requirements.rocm.txt +++ b/benchmarks/stable_baselines3/requirements.rocm.txt @@ -304,7 +304,7 @@ urllib3==1.26.15 # sentry-sdk varname==0.10.0 # via giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/stable_baselines3/requirements.in wandb==0.14.0 # via -r benchmarks/stable_baselines3/requirements.in diff --git a/benchmarks/stargan/requirements.cpu.txt b/benchmarks/stargan/requirements.cpu.txt index e2bce7b04..2837a0e76 100644 --- a/benchmarks/stargan/requirements.cpu.txt +++ b/benchmarks/stargan/requirements.cpu.txt @@ -147,5 +147,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/stargan/requirements.rocm.txt b/benchmarks/stargan/requirements.rocm.txt index a5ae5e902..a1a08269f 100644 --- a/benchmarks/stargan/requirements.rocm.txt +++ b/benchmarks/stargan/requirements.rocm.txt @@ -156,5 +156,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/super-slomo/requirements.cpu.txt b/benchmarks/super-slomo/requirements.cpu.txt index c8d469b4b..b2fd20ce5 100644 --- a/benchmarks/super-slomo/requirements.cpu.txt +++ b/benchmarks/super-slomo/requirements.cpu.txt @@ -152,5 +152,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/super-slomo/requirements.rocm.txt b/benchmarks/super-slomo/requirements.rocm.txt index e185e1ceb..26f26a039 100644 --- a/benchmarks/super-slomo/requirements.rocm.txt +++ b/benchmarks/super-slomo/requirements.rocm.txt @@ -161,5 +161,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/timm/requirements.cpu.txt b/benchmarks/timm/requirements.cpu.txt index 2c905249a..7a76f3287 100644 --- a/benchmarks/timm/requirements.cpu.txt +++ b/benchmarks/timm/requirements.cpu.txt @@ -164,5 +164,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/timm/requirements.rocm.txt b/benchmarks/timm/requirements.rocm.txt index 8474489a7..f9db05821 100644 --- a/benchmarks/timm/requirements.rocm.txt +++ b/benchmarks/timm/requirements.rocm.txt @@ -173,5 +173,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/torchvision/requirements.cpu.txt b/benchmarks/torchvision/requirements.cpu.txt index 88b82e687..c0718b1eb 100644 --- a/benchmarks/torchvision/requirements.cpu.txt +++ b/benchmarks/torchvision/requirements.cpu.txt @@ -149,5 +149,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cpu-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/torchvision/requirements.in diff --git a/benchmarks/torchvision/requirements.rocm.txt b/benchmarks/torchvision/requirements.rocm.txt index aab1c06f0..8026040ea 100644 --- a/benchmarks/torchvision/requirements.rocm.txt +++ b/benchmarks/torchvision/requirements.rocm.txt @@ -158,5 +158,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir==0.2.10 +voir @ git+https://github.com/breuleux/voir.git # via -r benchmarks/torchvision/requirements.in diff --git a/milabench/_version.py b/milabench/_version.py index 5a1f304fd..8fc180822 100644 --- a/milabench/_version.py +++ b/milabench/_version.py @@ -1,5 +1,5 @@ """This file is generated, do not modify""" -__tag__ = "v0.0.6-27-gdf4bf03" -__commit__ = "df4bf03d8a76752fe0e70ef0a32b6f36bb101c27" -__date__ = "2023-10-18 10:53:44 -0400" +__tag__ = "v0.0.6-43-g89f56f6" +__commit__ = "89f56f670db0f22880d057262a320c935c217d77" +__date__ = "2023-10-19 19:29:36 -0400" diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench.bash index 5754be996..6a3ddb1a3 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench.bash @@ -102,6 +102,7 @@ echo "------" milabench slurm_system milabench slurm_system > $SYSTEM +module load gcc/9.3.0 module load cuda/11.8 echo "" @@ -127,8 +128,8 @@ echo "------" # json # milabench summary $SLURM_TMPDIR/base/runs/ -milabench report --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ +milabench report --config $CONFIG --base $BASE --runs $SLURM_TMPDIR/base/runs/ echo "----" -echo "Done" +echo "Done after $SECONDS" echo "" From f759ea03b2a375015348a4e5a6be7ea9daf7adec Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 12:07:51 -0400 Subject: [PATCH 22/36] - --- milabench/cli.py | 52 ++++++++++++++++++- milabench/report.py | 9 +++- milabench/schedule.py | 33 ++++++++++-- milabench/scripts/milabench_pin.bash | 3 ++ .../{milabench.bash => milabench_run.bash} | 9 ++-- 5 files changed, 95 insertions(+), 11 deletions(-) create mode 100644 milabench/scripts/milabench_pin.bash rename milabench/scripts/{milabench.bash => milabench_run.bash} (91%) diff --git a/milabench/cli.py b/milabench/cli.py index 4e3f35e19..7c3bd6d24 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -1,6 +1,7 @@ import json import os import re +import io import runpy import shutil import subprocess @@ -33,7 +34,7 @@ from .report import make_report from .slurm import expand_node_list from .summary import aggregate, make_summary -from .schedule import launch_milabench +from .schedule import launch_milabench, post_comment_on_pr def main(argv=None): @@ -773,4 +774,51 @@ def schedule(): sbatch_args=None, dry=dry, sync=sync - ) \ No newline at end of file + ) + + def write_report_to_pr(): + remote: str & Option + + branch: str & Option + + # Runs directory + # [action: append] + runs: Option = [] + + # Configuration file (for weights) + config: Option & str = os.environ.get("MILABENCH_CONFIG", None) + + token: str & Option = os.getenv("MILABENCH_GITHUB_PAT") + + report = _short_make_report(runs, config) + + post_comment_on_pr( + remote, + branch, + steam.getvalue(), + token + ) + + +def _short_make_report(runs, config): + reports = None + + if runs: + reports = _read_reports(*runs) + summary = make_summary(reports.values()) + + if config: + config = _get_multipack(config, return_config=True) + + stream = io.StringIO() + + make_report( + summary, + weights=config, + html=html, + stream=stream, + sources=runs, + errdata=reports and _error_report(reports), + ) + + return stream.getvalue() \ No newline at end of file diff --git a/milabench/report.py b/milabench/report.py index 00c495d8c..e7609d880 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -49,6 +49,7 @@ def _make_row(summary, compare, weights): score = (acc if acc > 0 else row["perf"]) * success_ratio row["score"] = score + print(score) row["weight"] = weights.get("weight", summary["weight"]) # ---- @@ -208,6 +209,11 @@ def make_dataframe(summary, compare=None, weights=None): ).transpose() + + + + + @error_guard({}) def make_report( summary, @@ -219,6 +225,7 @@ def make_report( sources=None, errdata=None, weights=None, + stream=sys.stdout ): if weights is None: weights = dict() @@ -228,7 +235,7 @@ def make_report( # Reorder columns df = df[sorted(df.columns, key=lambda k: columns_order.get(k, 0))] - out = Outputter(stdout=sys.stdout, html=html) + out = Outputter(stdout=stream, html=html) if sources: if isinstance(sources, str): diff --git a/milabench/schedule.py b/milabench/schedule.py index 14f9de735..c572e0e38 100644 --- a/milabench/schedule.py +++ b/milabench/schedule.py @@ -4,6 +4,7 @@ import importlib_resources import subprocess import requests +import os def popen(cmd, callback=None): @@ -126,7 +127,7 @@ def arguments(self): def launch_milabench(args, sbatch_args=None, dry: bool = False, sync: bool = False): - sbatch_script = importlib_resources.files(__name__) / "scripts" / "milabench.bash" + sbatch_script = importlib_resources.files(__name__) / "scripts" / "milabench_run.bash" sbatch_script = str(sbatch_script) if sbatch_args is None: @@ -134,7 +135,7 @@ def launch_milabench(args, sbatch_args=None, dry: bool = False, sync: bool = Fal "--ntasks=1", "--gpus-per-task=rtx8000:1", "--cpus-per-task=4", - "--time=03:00:00", + "--time=01:30:00", "--ntasks-per-node=1", "--mem=64G" ] @@ -154,7 +155,27 @@ def launch_milabench(args, sbatch_args=None, dry: bool = False, sync: bool = Fal return code -def post_comment_on_pr(owner, branch, comment, access_token=None): +def get_remote_owner(remote): + sshremote = re.compile(r"git@[A-Za-z]*\.[A-Za-z]*:(?P[A-Za-z\-.0-9]*)\/([A-Za-z]*).([A-Za-z]*)") + httpsremote = re.compile(r"https:\/\/[A-Za-z]*\.[A-Za-z]*\/(?P[A-Za-z\-.0-9]*)\/([A-Za-z]*).([A-Za-z]*)") + + patterns = [sshremote, httpsremote] + + for pat in patterns: + if match := sshremote.match(remote): + results = match.groupdict() + return results['owner'] + + return None + + +def post_comment_on_pr(remote, branch, comment, access_token=None): + owner = get_remote_owner(remote) + assert owner is not None, "Remote owner not found" + + if access_token is None: + access_token = os.getenv("MILABENCH_GITHUB_PAT") + url = "https://api.github.com/repos/mila-iqia/milabench/pulls" response = requests.get(url, params={"head": f"{owner}:{branch}"}) @@ -167,8 +188,10 @@ def post_comment_on_pr(owner, branch, comment, access_token=None): if not pull_requests: raise RuntimeError("No matching pull requests found.") + assert len(pull_requests) == 1, "Multiple PR found" + pr = pull_requests[0] - post_url = pr["_links"]["review_comments"]["href"] + post_url = pr["_links"]["comments"]["href"] data = { "body": comment, @@ -186,4 +209,4 @@ def post_comment_on_pr(owner, branch, comment, access_token=None): ) if response.status_code != 201: - raise RuntimeError(response) + raise RuntimeError(response, response.json()) diff --git a/milabench/scripts/milabench_pin.bash b/milabench/scripts/milabench_pin.bash new file mode 100644 index 000000000..a679a8c49 --- /dev/null +++ b/milabench/scripts/milabench_pin.bash @@ -0,0 +1,3 @@ +#!/bin/bash + +# 16Gb \ No newline at end of file diff --git a/milabench/scripts/milabench.bash b/milabench/scripts/milabench_run.bash similarity index 91% rename from milabench/scripts/milabench.bash rename to milabench/scripts/milabench_run.bash index 6a3ddb1a3..0e7f401bc 100755 --- a/milabench/scripts/milabench.bash +++ b/milabench/scripts/milabench_run.bash @@ -84,9 +84,9 @@ export XDG_CACHE_HOME=$BASE/cache export MILABENCH_GPU_ARCH=$ARCH export MILABENCH_DASH=no -export MILABENCH_NOTERM=1 export PYTHONUNBUFFERED=1 - +export MILABENCH_BASE=$BASE +export MILABENCH_CONFIG=$CONFIG # # Fetch the repo # @@ -128,7 +128,10 @@ echo "------" # json # milabench summary $SLURM_TMPDIR/base/runs/ -milabench report --config $CONFIG --base $BASE --runs $SLURM_TMPDIR/base/runs/ +# milabench report --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ + +milabench write_report_to_pr --remote $ORIGIN --branch $BRANCH\ + --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ echo "----" echo "Done after $SECONDS" From a7d14f7700d10a143acb7f44ac27b9f1c785f018 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 12:23:45 -0400 Subject: [PATCH 23/36] - --- milabench/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/milabench/cli.py b/milabench/cli.py index 7c3bd6d24..56b810d29 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -795,7 +795,7 @@ def write_report_to_pr(): post_comment_on_pr( remote, branch, - steam.getvalue(), + "```\n" + report + "\n```", token ) From d44e67aa9c3e09e581195c8d25ab668a9082ddce Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 12:31:58 -0400 Subject: [PATCH 24/36] - --- milabench/cli.py | 1 - 1 file changed, 1 deletion(-) diff --git a/milabench/cli.py b/milabench/cli.py index 56b810d29..2999a5f48 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -815,7 +815,6 @@ def _short_make_report(runs, config): make_report( summary, weights=config, - html=html, stream=stream, sources=runs, errdata=reports and _error_report(reports), From 1d082f9fae206edf6cd587843083656fbc4ebb9b Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 13:48:57 -0400 Subject: [PATCH 25/36] - --- milabench/cli.py | 2 +- milabench/scripts/milabench_pin.bash | 14 +++++++++++++- milabench/scripts/milabench_run.bash | 4 ++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index 2999a5f48..5bf739e4f 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -782,7 +782,7 @@ def write_report_to_pr(): branch: str & Option # Runs directory - # [action: append] + # [positional: +] runs: Option = [] # Configuration file (for weights) diff --git a/milabench/scripts/milabench_pin.bash b/milabench/scripts/milabench_pin.bash index a679a8c49..694933bff 100644 --- a/milabench/scripts/milabench_pin.bash +++ b/milabench/scripts/milabench_pin.bash @@ -1,3 +1,15 @@ #!/bin/bash -# 16Gb \ No newline at end of file +# CPU only +# 16Gb + + + +MILABENCH_GPU_ARCH=cuda milabench pin -c constraints/cuda.txt --config config/standard.yaml +MILABENCH_GPU_ARCH=rocm milabench pin -c constraints/rocm.txt --config config/standard.yaml + + +cd $SLURM_TMPDIR/milabench +git add --all +git commit -m "milabench pin" +git push $ORIGIN $BRANCH \ No newline at end of file diff --git a/milabench/scripts/milabench_run.bash b/milabench/scripts/milabench_run.bash index 0e7f401bc..ae362c739 100755 --- a/milabench/scripts/milabench_run.bash +++ b/milabench/scripts/milabench_run.bash @@ -128,10 +128,10 @@ echo "------" # json # milabench summary $SLURM_TMPDIR/base/runs/ -# milabench report --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ +milabench report --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ milabench write_report_to_pr --remote $ORIGIN --branch $BRANCH\ - --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ + --config $CONFIG --runs $SLURM_TMPDIR/base/runs/* echo "----" echo "Done after $SECONDS" From bce797f9efa68a35c6dffe2dd6059dc9c8e68a12 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 15:00:07 -0400 Subject: [PATCH 26/36] - --- milabench/cli.py | 17 +++++++++++------ milabench/report.py | 6 ------ milabench/scripts/milabench_run.bash | 6 ++++-- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index 5bf739e4f..38e2d56e5 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -781,14 +781,19 @@ def write_report_to_pr(): branch: str & Option - # Runs directory - # [positional: +] - runs: Option = [] - - # Configuration file (for weights) - config: Option & str = os.environ.get("MILABENCH_CONFIG", None) + base: Option & str = os.getenv("MILABENCH_BASE", None) + + config: Option & str = os.getenv("MILABENCH_CONFIG", None) token: str & Option = os.getenv("MILABENCH_GITHUB_PAT") + + assert base is not None + + runfolder = os.path.join(base, "runs") + + runs = [] + for folder in osl.listdir(runfolder): + runs.append(os.path.join(runfolder, folder)) report = _short_make_report(runs, config) diff --git a/milabench/report.py b/milabench/report.py index e7609d880..8427e9bdf 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -49,7 +49,6 @@ def _make_row(summary, compare, weights): score = (acc if acc > 0 else row["perf"]) * success_ratio row["score"] = score - print(score) row["weight"] = weights.get("weight", summary["weight"]) # ---- @@ -209,11 +208,6 @@ def make_dataframe(summary, compare=None, weights=None): ).transpose() - - - - - @error_guard({}) def make_report( summary, diff --git a/milabench/scripts/milabench_run.bash b/milabench/scripts/milabench_run.bash index ae362c739..71b57821b 100755 --- a/milabench/scripts/milabench_run.bash +++ b/milabench/scripts/milabench_run.bash @@ -128,10 +128,12 @@ echo "------" # json # milabench summary $SLURM_TMPDIR/base/runs/ -milabench report --config $CONFIG --runs $SLURM_TMPDIR/base/runs/ + +ls $BASE/runs + milabench write_report_to_pr --remote $ORIGIN --branch $BRANCH\ - --config $CONFIG --runs $SLURM_TMPDIR/base/runs/* + --config $CONFIG $SLURM_TMPDIR/base/runs/* echo "----" echo "Done after $SECONDS" From fc4de5c87e6dce1e19e1223d97f9980b3419b972 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 15:06:29 -0400 Subject: [PATCH 27/36] - --- milabench/cli.py | 2 +- milabench/scripts/milabench_run.bash | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index 38e2d56e5..8a1baaec6 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -792,7 +792,7 @@ def write_report_to_pr(): runfolder = os.path.join(base, "runs") runs = [] - for folder in osl.listdir(runfolder): + for folder in os.listdir(runfolder): runs.append(os.path.join(runfolder, folder)) report = _short_make_report(runs, config) diff --git a/milabench/scripts/milabench_run.bash b/milabench/scripts/milabench_run.bash index 71b57821b..bddd827f8 100755 --- a/milabench/scripts/milabench_run.bash +++ b/milabench/scripts/milabench_run.bash @@ -132,8 +132,7 @@ echo "------" ls $BASE/runs -milabench write_report_to_pr --remote $ORIGIN --branch $BRANCH\ - --config $CONFIG $SLURM_TMPDIR/base/runs/* +milabench write_report_to_pr --remote $ORIGIN --branch $BRANCH --config $CONFIG echo "----" echo "Done after $SECONDS" From 67312441c5a4424de814aa9a5b2d5b54e73e0700 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 15:21:58 -0400 Subject: [PATCH 28/36] - --- milabench/cli.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/milabench/cli.py b/milabench/cli.py index 8a1baaec6..c9995512e 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -791,9 +791,16 @@ def write_report_to_pr(): runfolder = os.path.join(base, "runs") + def filter(folder): + for f in ('install', 'prepare'): + if f in folder: + return False + return True + runs = [] for folder in os.listdir(runfolder): - runs.append(os.path.join(runfolder, folder)) + if filter(folder): + runs.append(os.path.join(runfolder, folder)) report = _short_make_report(runs, config) From 158e43be94f183b99d83e51d07ca6f2d338a643d Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 15:30:40 -0400 Subject: [PATCH 29/36] - --- milabench/cli.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/milabench/cli.py b/milabench/cli.py index c9995512e..cb61a4eef 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -414,6 +414,10 @@ def run(): html = None price = None + print("HERE") + print(runs) + print("") + reports = None if runs: reports = _read_reports(*runs) @@ -802,6 +806,9 @@ def filter(folder): if filter(folder): runs.append(os.path.join(runfolder, folder)) + print("HERE") + print(runs) + print() report = _short_make_report(runs, config) post_comment_on_pr( From 1b93ae3c94dec21445aae669c058edcf7d6b0258 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 15:43:50 -0400 Subject: [PATCH 30/36] - --- milabench/cli.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index cb61a4eef..c3c034049 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -413,10 +413,6 @@ def run(): compare_gpus = False html = None price = None - - print("HERE") - print(runs) - print("") reports = None if runs: @@ -805,10 +801,7 @@ def filter(folder): for folder in os.listdir(runfolder): if filter(folder): runs.append(os.path.join(runfolder, folder)) - - print("HERE") - print(runs) - print() + report = _short_make_report(runs, config) post_comment_on_pr( From 141d30d51bf0d0e96bc533a4defec304439a2fad Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 16:01:47 -0400 Subject: [PATCH 31/36] - --- milabench/cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/milabench/cli.py b/milabench/cli.py index c3c034049..839f85d80 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -802,6 +802,7 @@ def filter(folder): if filter(folder): runs.append(os.path.join(runfolder, folder)) + print(runs) report = _short_make_report(runs, config) post_comment_on_pr( @@ -824,6 +825,7 @@ def _short_make_report(runs, config): stream = io.StringIO() + assert len(summary) > 0 make_report( summary, weights=config, From aee600507a18be6453c20f2193465d3b63d91dc2 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 16:08:33 -0400 Subject: [PATCH 32/36] - --- milabench/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/milabench/cli.py b/milabench/cli.py index 839f85d80..31fc0e2c2 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -818,6 +818,7 @@ def _short_make_report(runs, config): if runs: reports = _read_reports(*runs) + print(reports) summary = make_summary(reports.values()) if config: @@ -825,7 +826,6 @@ def _short_make_report(runs, config): stream = io.StringIO() - assert len(summary) > 0 make_report( summary, weights=config, From 7468ba271da4236641b1d8c1ad004ed666caab3f Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 16:22:18 -0400 Subject: [PATCH 33/36] - --- milabench/cli.py | 4 +--- milabench/report.py | 10 +++++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/milabench/cli.py b/milabench/cli.py index 31fc0e2c2..08294852b 100644 --- a/milabench/cli.py +++ b/milabench/cli.py @@ -801,8 +801,7 @@ def filter(folder): for folder in os.listdir(runfolder): if filter(folder): runs.append(os.path.join(runfolder, folder)) - - print(runs) + report = _short_make_report(runs, config) post_comment_on_pr( @@ -818,7 +817,6 @@ def _short_make_report(runs, config): if runs: reports = _read_reports(*runs) - print(reports) summary = make_summary(reports.values()) if config: diff --git a/milabench/report.py b/milabench/report.py index 8427e9bdf..76e22110e 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -41,9 +41,13 @@ def _make_row(summary, compare, weights): # Sum of all the GPU performance # to get the overall perf of the whole machine - acc = 0 - for _, metrics in summary["per_gpu"].items(): - acc += metrics[metric] + + if "per_gpu" in summary: + acc = 0 + for _, metrics in summary["per_gpu"].items(): + acc += metrics[metric] + else: + acc = row["perf"] success_ratio = 1 - row["fail"] / row["n"] score = (acc if acc > 0 else row["perf"]) * success_ratio From 6a634f40a64a2f5f3e71d2dcbe0049659a2c1780 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Fri, 20 Oct 2023 16:39:23 -0400 Subject: [PATCH 34/36] - --- milabench/report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/milabench/report.py b/milabench/report.py index 76e22110e..f8de43abf 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -53,7 +53,7 @@ def _make_row(summary, compare, weights): score = (acc if acc > 0 else row["perf"]) * success_ratio row["score"] = score - row["weight"] = weights.get("weight", summary["weight"]) + row["weight"] = weights.get("weight", summary.get("weight", 0)) # ---- return row From c9fa3ebe8e465771a29e9158d1021cc014549148 Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Mon, 23 Oct 2023 12:03:27 -0400 Subject: [PATCH 35/36] - --- .pin/constraints-cpu-torch.txt | 385 ------------------ .../accelerate_opt/requirements.cpu.txt | 335 --------------- benchmarks/dlrm/requirements.cpu.txt | 320 --------------- benchmarks/huggingface/requirements.cpu.txt | 179 -------- benchmarks/rwkv/requirements.cpu.txt | 240 ----------- benchmarks/stargan/requirements.cpu.txt | 151 ------- benchmarks/super-slomo/requirements.cpu.txt | 156 ------- benchmarks/timm/requirements.cpu.txt | 168 -------- benchmarks/torchvision/requirements.cpu.txt | 153 ------- milabench/scripts/milabench_run.bash | 11 +- 10 files changed, 4 insertions(+), 2094 deletions(-) delete mode 100644 .pin/constraints-cpu-torch.txt delete mode 100644 benchmarks/accelerate_opt/requirements.cpu.txt delete mode 100644 benchmarks/dlrm/requirements.cpu.txt delete mode 100644 benchmarks/huggingface/requirements.cpu.txt delete mode 100644 benchmarks/rwkv/requirements.cpu.txt delete mode 100644 benchmarks/stargan/requirements.cpu.txt delete mode 100644 benchmarks/super-slomo/requirements.cpu.txt delete mode 100644 benchmarks/timm/requirements.cpu.txt delete mode 100644 benchmarks/torchvision/requirements.cpu.txt diff --git a/.pin/constraints-cpu-torch.txt b/.pin/constraints-cpu-torch.txt deleted file mode 100644 index 9ad402d9e..000000000 --- a/.pin/constraints-cpu-torch.txt +++ /dev/null @@ -1,385 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=.pin/constraints-cpu-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -absl-py==2.0.0 - # via tensorboard -accelerate==0.23.0 - # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.6 - # via - # datasets - # fsspec -aiosignal==1.3.1 - # via aiohttp -annotated-types==0.6.0 - # via pydantic -antlr4-python3-runtime==4.9.3 - # via omegaconf -asttokens==2.4.0 - # via giving -async-timeout==4.0.3 - # via aiohttp -attrs==23.1.0 - # via aiohttp -cachetools==5.3.1 - # via google-auth -certifi==2023.7.22 - # via requests -charset-normalizer==3.3.0 - # via - # aiohttp - # requests -codefind==0.1.3 - # via ptera -datasets==2.14.5 - # via - # -r benchmarks/accelerate_opt/requirements.in - # evaluate -deepspeed==0.8.3 - # via - # -r benchmarks/accelerate_opt/requirements.in - # -r benchmarks/rwkv/requirements.in -dill==0.3.7 - # via - # datasets - # evaluate - # multiprocess -docker==6.1.3 - # via torchx -docstring-parser==0.8.1 - # via torchx -evaluate==0.4.1 - # via -r benchmarks/accelerate_opt/requirements.in -executing==1.2.0 - # via varname -fbgemm-gpu==0.5.0+cu118 - # via torchrec -filelock==3.12.4 - # via - # huggingface-hub - # torch - # torchx - # transformers - # triton -frozenlist==1.4.0 - # via - # aiohttp - # aiosignal -fsspec[http]==2023.6.0 - # via - # datasets - # evaluate - # huggingface-hub - # pytorch-lightning - # torch - # torchx -future==0.18.3 - # via -r benchmarks/dlrm/requirements.in -giving==0.4.2 - # via - # ptera - # voir -google-auth==2.23.3 - # via - # google-auth-oauthlib - # tensorboard -google-auth-oauthlib==1.0.0 - # via tensorboard -graphviz==0.20.1 - # via torchviz -grpcio==1.59.0 - # via tensorboard -hjson==3.1.0 - # via deepspeed -huggingface-hub==0.17.3 - # via - # -r benchmarks/timm/requirements.in - # accelerate - # datasets - # evaluate - # tokenizers - # transformers -idna==3.4 - # via - # requests - # yarl -importlib-metadata==6.8.0 - # via - # markdown - # torchx -jinja2==3.1.2 - # via torch -joblib==1.3.2 - # via scikit-learn -lightning-utilities==0.9.0 - # via - # pytorch-lightning - # torchmetrics -markdown==3.5 - # via tensorboard -markdown-it-py==3.0.0 - # via rich -markupsafe==2.1.3 - # via - # jinja2 - # werkzeug -mdurl==0.1.2 - # via markdown-it-py -mpmath==1.3.0 - # via sympy -multidict==6.0.4 - # via - # aiohttp - # yarl -multiprocess==0.70.15 - # via - # datasets - # evaluate -mypy-extensions==1.0.0 - # via typing-inspect -networkx==3.1 - # via torch -ninja==1.11.1.1 - # via - # -r benchmarks/rwkv/requirements.in - # deepspeed -numpy==1.26.1 - # via - # -r benchmarks/rwkv/requirements.in - # -r benchmarks/stargan/requirements.in - # accelerate - # datasets - # deepspeed - # evaluate - # onnx - # opencv-python - # pandas - # pyarrow - # pytorch-lightning - # scikit-learn - # scipy - # tensorboard - # torchmetrics - # torchvision - # transformers -oauthlib==3.2.2 - # via requests-oauthlib -omegaconf==2.3.0 - # via voir -onnx==1.14.1 - # via -r benchmarks/dlrm/requirements.in -opencv-python==4.8.1.78 - # via -r benchmarks/super-slomo/requirements.in -ovld==0.3.2 - # via voir -packaging==23.2 - # via - # accelerate - # datasets - # deepspeed - # docker - # evaluate - # huggingface-hub - # lightning-utilities - # pytorch-lightning - # torchmetrics - # transformers -pandas==2.1.1 - # via - # datasets - # evaluate -pillow==10.1.0 - # via torchvision -protobuf==4.24.4 - # via - # onnx - # tensorboard -psutil==5.9.6 - # via - # accelerate - # deepspeed -ptera==1.4.1 - # via voir -py-cpuinfo==9.0.0 - # via deepspeed -pyarrow==13.0.0 - # via datasets -pyasn1==0.5.0 - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.3.0 - # via google-auth -pydantic==2.4.2 - # via deepspeed -pydantic-core==2.10.1 - # via pydantic -pydot==1.4.2 - # via -r benchmarks/dlrm/requirements.in -pygments==2.16.1 - # via rich -pynvml==11.5.0 - # via voir -pyparsing==3.1.1 - # via pydot -pyre-extensions==0.0.30 - # via torchx -python-dateutil==2.8.2 - # via pandas -pytorch-lightning==1.9.5 - # via -r benchmarks/rwkv/requirements.in -pytz==2023.3.post1 - # via pandas -pyyaml==6.0.1 - # via - # -r benchmarks/timm/requirements.in - # accelerate - # datasets - # huggingface-hub - # omegaconf - # pytorch-lightning - # torchx - # transformers -reactivex==4.0.4 - # via giving -regex==2023.10.3 - # via transformers -requests==2.31.0 - # via - # datasets - # docker - # evaluate - # fsspec - # huggingface-hub - # requests-oauthlib - # responses - # tensorboard - # torchvision - # transformers -requests-oauthlib==1.3.1 - # via google-auth-oauthlib -responses==0.18.0 - # via evaluate -rich==13.6.0 - # via - # -r benchmarks/accelerate_opt/requirements.in - # voir -rsa==4.9 - # via google-auth -safetensors==0.4.0 - # via - # -r benchmarks/timm/requirements.in - # transformers -scikit-learn==1.3.1 - # via -r benchmarks/dlrm/requirements.in -scipy==1.11.3 - # via scikit-learn -six==1.16.0 - # via - # asttokens - # python-dateutil - # tensorboard -sympy==1.12 - # via torch -tabulate==0.9.0 - # via torchx -tensorboard==2.14.1 - # via -r benchmarks/dlrm/requirements.in -tensorboard-data-server==0.7.1 - # via tensorboard -threadpoolctl==3.2.0 - # via scikit-learn -tokenizers==0.14.1 - # via transformers -torch==2.1.0+cu118 - # via - # -r benchmarks/accelerate_opt/requirements.in - # -r benchmarks/stargan/requirements.in - # accelerate - # deepspeed - # pytorch-lightning - # torchaudio - # torchmetrics - # torchvision - # torchviz -torchaudio==2.1.0+cu118 - # via -r benchmarks/accelerate_opt/requirements.in -torchmetrics==1.0.3 - # via - # pytorch-lightning - # torchrec -torchrec==0.5.0+cu118 - # via -r benchmarks/dlrm/requirements.in -torchvision==0.16.0+cu118 - # via - # -r benchmarks/accelerate_opt/requirements.in - # -r benchmarks/stargan/requirements.in -torchviz==0.0.2 - # via -r benchmarks/dlrm/requirements.in -torchx==0.5.0 - # via -r benchmarks/dlrm/requirements.in -tqdm==4.66.1 - # via - # -r benchmarks/dlrm/requirements.in - # -r benchmarks/super-slomo/requirements.in - # datasets - # deepspeed - # evaluate - # huggingface-hub - # pytorch-lightning - # torchrec - # transformers -transformers==4.34.0 - # via - # -r benchmarks/accelerate_opt/requirements.in - # -r benchmarks/huggingface/requirements.in -triton==2.1.0 - # via torch -typing-extensions==4.8.0 - # via - # huggingface-hub - # lightning-utilities - # onnx - # pydantic - # pydantic-core - # pyre-extensions - # pytorch-lightning - # reactivex - # torch - # typing-inspect -typing-inspect==0.9.0 - # via pyre-extensions -tzdata==2023.3 - # via pandas -urllib3==1.26.17 - # via - # docker - # requests - # responses - # torchx -varname==0.10.0 - # via giving -voir==0.2.10 - # via - # -r benchmarks/accelerate_opt/requirements.in - # -r benchmarks/stargan/requirements.in -websocket-client==1.6.4 - # via docker -werkzeug==3.0.0 - # via tensorboard -xxhash==3.4.1 - # via - # datasets - # evaluate -yarl==1.9.2 - # via aiohttp -zipp==3.17.0 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/benchmarks/accelerate_opt/requirements.cpu.txt b/benchmarks/accelerate_opt/requirements.cpu.txt deleted file mode 100644 index 82a92da3c..000000000 --- a/benchmarks/accelerate_opt/requirements.cpu.txt +++ /dev/null @@ -1,335 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-opt.txt benchmarks/accelerate_opt/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -accelerate==0.23.0 - # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.6 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # fsspec -aiosignal==1.3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp -annotated-types==0.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pydantic -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -async-timeout==4.0.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp -attrs==23.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -datasets==2.14.5 - # via - # -r benchmarks/accelerate_opt/requirements.in - # evaluate -deepspeed==0.8.3 - # via -r benchmarks/accelerate_opt/requirements.in -dill==0.3.7 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # evaluate - # multiprocess -evaluate==0.4.1 - # via -r benchmarks/accelerate_opt/requirements.in -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # torch - # transformers - # triton -frozenlist==1.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp - # aiosignal -fsspec[http]==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # evaluate - # huggingface-hub - # torch -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -hjson==3.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -huggingface-hub==0.17.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # accelerate - # datasets - # evaluate - # tokenizers - # transformers -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests - # yarl -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -multidict==6.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp - # yarl -multiprocess==0.70.15 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # evaluate -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -ninja==1.11.1.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -numpy==1.26.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # accelerate - # datasets - # deepspeed - # evaluate - # pandas - # pyarrow - # torchvision - # transformers -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -packaging==23.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # accelerate - # datasets - # deepspeed - # evaluate - # huggingface-hub - # transformers -pandas==2.1.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # evaluate -pillow==10.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -psutil==5.9.6 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # accelerate - # deepspeed -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -py-cpuinfo==9.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -pyarrow==13.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets -pydantic==2.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -pydantic-core==2.10.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pydantic -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -python-dateutil==2.8.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pandas -pytz==2023.3.post1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pandas -pyyaml==6.0.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # accelerate - # datasets - # huggingface-hub - # omegaconf - # transformers -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -regex==2023.10.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # transformers -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # evaluate - # fsspec - # huggingface-hub - # responses - # torchvision - # transformers -responses==0.18.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # evaluate -rich==13.6.0 - # via - # -r benchmarks/accelerate_opt/requirements.in - # voir -safetensors==0.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # transformers -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens - # python-dateutil -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -tokenizers==0.14.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # transformers -torch==2.1.0+cu118 - # via - # -r benchmarks/accelerate_opt/requirements.in - # accelerate - # deepspeed - # torchaudio - # torchvision -torchaudio==2.1.0+cu118 - # via -r benchmarks/accelerate_opt/requirements.in -torchvision==0.16.0+cu118 - # via -r benchmarks/accelerate_opt/requirements.in -tqdm==4.66.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # deepspeed - # evaluate - # huggingface-hub - # transformers -transformers==4.34.0 - # via -r benchmarks/accelerate_opt/requirements.in -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # pydantic - # pydantic-core - # reactivex - # torch -tzdata==2023.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pandas -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests - # responses -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/accelerate_opt/requirements.in -xxhash==3.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # datasets - # evaluate -yarl==1.9.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp diff --git a/benchmarks/dlrm/requirements.cpu.txt b/benchmarks/dlrm/requirements.cpu.txt deleted file mode 100644 index 9e89dd49f..000000000 --- a/benchmarks/dlrm/requirements.cpu.txt +++ /dev/null @@ -1,320 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-dlrm.txt benchmarks/dlrm/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -absl-py==2.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # tensorboard -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -cachetools==5.3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # google-auth -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -docker==6.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchx -docstring-parser==0.8.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchx -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -fbgemm-gpu==0.5.0+cu118 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchrec -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch - # torchx - # triton -fsspec==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch - # torchx -future==0.18.3 - # via -r benchmarks/dlrm/requirements.in -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -google-auth==2.23.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # google-auth-oauthlib - # tensorboard -google-auth-oauthlib==1.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # tensorboard -graphviz==0.20.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchviz -grpcio==1.59.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # tensorboard -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -importlib-metadata==6.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown - # torchx -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -joblib==1.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # scikit-learn -lightning-utilities==0.9.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchmetrics -markdown==3.5 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # tensorboard -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 - # werkzeug -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -mypy-extensions==1.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # typing-inspect -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -numpy==1.26.1 - # via - # -r benchmarks/dlrm/requirements.in - # onnx - # scikit-learn - # scipy - # tensorboard - # torchmetrics -oauthlib==3.2.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests-oauthlib -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -onnx==1.14.1 - # via -r benchmarks/dlrm/requirements.in -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -packaging==23.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # docker - # lightning-utilities - # torchmetrics -protobuf==4.24.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # onnx - # tensorboard -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pyasn1==0.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pyasn1-modules - # rsa -pyasn1-modules==0.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # google-auth -pydot==1.4.2 - # via -r benchmarks/dlrm/requirements.in -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pyparsing==3.1.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pydot -pyre-extensions==0.0.30 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchx -pyyaml==6.0.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf - # torchx -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # docker - # requests-oauthlib - # tensorboard -requests-oauthlib==1.3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # google-auth-oauthlib -rich==13.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -rsa==4.9 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # google-auth -scikit-learn==1.3.1 - # via -r benchmarks/dlrm/requirements.in -scipy==1.11.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # scikit-learn -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens - # tensorboard -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -tabulate==0.9.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchx -tensorboard==2.14.1 - # via -r benchmarks/dlrm/requirements.in -tensorboard-data-server==0.7.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # tensorboard -threadpoolctl==3.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # scikit-learn -torch==2.1.0+cu118 - # via - # -r benchmarks/dlrm/requirements.in - # torchmetrics - # torchviz -torchmetrics==1.0.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchrec -torchrec==0.5.0+cu118 - # via -r benchmarks/dlrm/requirements.in -torchviz==0.0.2 - # via -r benchmarks/dlrm/requirements.in -torchx==0.5.0 - # via -r benchmarks/dlrm/requirements.in -tqdm==4.66.1 - # via - # -r benchmarks/dlrm/requirements.in - # torchrec -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # lightning-utilities - # onnx - # pyre-extensions - # reactivex - # torch - # typing-inspect -typing-inspect==0.9.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pyre-extensions -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # docker - # requests - # torchx -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/dlrm/requirements.in -websocket-client==1.6.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # docker -werkzeug==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # tensorboard -zipp==3.17.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/benchmarks/huggingface/requirements.cpu.txt b/benchmarks/huggingface/requirements.cpu.txt deleted file mode 100644 index 05f35fc68..000000000 --- a/benchmarks/huggingface/requirements.cpu.txt +++ /dev/null @@ -1,179 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-hf.txt benchmarks/huggingface/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # torch - # transformers - # triton -fsspec==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # torch -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -huggingface-hub==0.17.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # tokenizers - # transformers -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -numpy==1.26.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # transformers -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -packaging==23.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # transformers -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pyyaml==6.0.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # omegaconf - # transformers -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -regex==2023.10.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # transformers -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # transformers -rich==13.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -safetensors==0.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # transformers -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -tokenizers==0.14.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # transformers -torch==2.1.0+cu118 - # via -r benchmarks/huggingface/requirements.in -tqdm==4.66.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # transformers -transformers==4.34.0 - # via -r benchmarks/huggingface/requirements.in -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # reactivex - # torch -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/rwkv/requirements.cpu.txt b/benchmarks/rwkv/requirements.cpu.txt deleted file mode 100644 index 15474be62..000000000 --- a/benchmarks/rwkv/requirements.cpu.txt +++ /dev/null @@ -1,240 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-rwkv.txt benchmarks/rwkv/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -aiohttp==3.8.6 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # fsspec -aiosignal==1.3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp -annotated-types==0.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pydantic -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -async-timeout==4.0.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp -attrs==23.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -deepspeed==0.8.3 - # via -r benchmarks/rwkv/requirements.in -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch - # triton -frozenlist==1.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp - # aiosignal -fsspec[http]==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pytorch-lightning - # torch -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -hjson==3.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests - # yarl -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -lightning-utilities==0.9.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pytorch-lightning - # torchmetrics -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -multidict==6.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp - # yarl -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -ninja==1.11.1.1 - # via - # -r benchmarks/rwkv/requirements.in - # deepspeed -numpy==1.26.1 - # via - # -r benchmarks/rwkv/requirements.in - # deepspeed - # pytorch-lightning - # torchmetrics -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -packaging==23.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed - # lightning-utilities - # pytorch-lightning - # torchmetrics -psutil==5.9.6 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -py-cpuinfo==9.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -pydantic==2.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed -pydantic-core==2.10.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pydantic -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pytorch-lightning==1.9.5 - # via -r benchmarks/rwkv/requirements.in -pyyaml==6.0.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf - # pytorch-lightning -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # fsspec -rich==13.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -torch==2.1.0+cu118 - # via - # -r benchmarks/rwkv/requirements.in - # deepspeed - # pytorch-lightning - # torchmetrics -torchmetrics==1.0.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # pytorch-lightning -tqdm==4.66.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # deepspeed - # pytorch-lightning -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # lightning-utilities - # pydantic - # pydantic-core - # pytorch-lightning - # reactivex - # torch -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/rwkv/requirements.in -yarl==1.9.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # aiohttp diff --git a/benchmarks/stargan/requirements.cpu.txt b/benchmarks/stargan/requirements.cpu.txt deleted file mode 100644 index 2837a0e76..000000000 --- a/benchmarks/stargan/requirements.cpu.txt +++ /dev/null @@ -1,151 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-stargan.txt benchmarks/stargan/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch - # triton -fsspec==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -numpy==1.26.1 - # via - # -r benchmarks/stargan/requirements.in - # torchvision -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pillow==10.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pyyaml==6.0.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -rich==13.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -torch==2.1.0+cu118 - # via - # -r benchmarks/stargan/requirements.in - # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/stargan/requirements.in -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # reactivex - # torch -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/super-slomo/requirements.cpu.txt b/benchmarks/super-slomo/requirements.cpu.txt deleted file mode 100644 index b2fd20ce5..000000000 --- a/benchmarks/super-slomo/requirements.cpu.txt +++ /dev/null @@ -1,156 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-super-slomo.txt benchmarks/super-slomo/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch - # triton -fsspec==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -numpy==1.26.1 - # via - # -r benchmarks/super-slomo/requirements.in - # opencv-python - # torchvision -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -opencv-python==4.8.1.78 - # via -r benchmarks/super-slomo/requirements.in -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pillow==10.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pyyaml==6.0.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -rich==13.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -torch==2.1.0+cu118 - # via - # -r benchmarks/super-slomo/requirements.in - # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/super-slomo/requirements.in -tqdm==4.66.1 - # via -r benchmarks/super-slomo/requirements.in -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # reactivex - # torch -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/timm/requirements.cpu.txt b/benchmarks/timm/requirements.cpu.txt deleted file mode 100644 index 7a76f3287..000000000 --- a/benchmarks/timm/requirements.cpu.txt +++ /dev/null @@ -1,168 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-timm.txt benchmarks/timm/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # torch - # triton -fsspec==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # torch -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -huggingface-hub==0.17.3 - # via -r benchmarks/timm/requirements.in -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -numpy==1.26.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -packaging==23.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub -pillow==10.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pyyaml==6.0.1 - # via - # -r benchmarks/timm/requirements.in - # huggingface-hub - # omegaconf -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # torchvision -rich==13.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -safetensors==0.4.0 - # via -r benchmarks/timm/requirements.in -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -torch==2.1.0+cu118 - # via - # -r benchmarks/timm/requirements.in - # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/timm/requirements.in -tqdm==4.66.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # huggingface-hub - # reactivex - # torch -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/torchvision/requirements.cpu.txt b/benchmarks/torchvision/requirements.cpu.txt deleted file mode 100644 index c0718b1eb..000000000 --- a/benchmarks/torchvision/requirements.cpu.txt +++ /dev/null @@ -1,153 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --config=pyproject.toml --output-file=benchmarks/torchvision/requirements.cpu.txt --resolver=backtracking .pin/tmp-constraints-cpu-torchvision.txt benchmarks/torchvision/requirements.in -# ---extra-index-url https://download.pytorch.org/whl/cu118 - -antlr4-python3-runtime==4.9.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -asttokens==2.4.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -charset-normalizer==3.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -codefind==0.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera -executing==1.2.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # varname -filelock==3.12.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch - # triton -fsspec==2023.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -giving==0.4.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # ptera - # voir -idna==3.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -jinja2==3.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -markdown-it-py==3.0.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -markupsafe==2.1.3 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # jinja2 -mdurl==0.1.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # markdown-it-py -mpmath==1.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # sympy -networkx==3.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -numpy==1.26.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -omegaconf==2.3.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -ovld==0.3.2 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pillow==10.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -ptera==1.4.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pygments==2.16.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # rich -pynvml==11.5.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -pyyaml==6.0.1 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # omegaconf -reactivex==4.0.4 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torchvision -rich==13.6.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # voir -six==1.16.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # asttokens -sympy==1.12 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -torch==2.1.0+cu118 - # via - # -r benchmarks/torchvision/requirements.in - # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/torchvision/requirements.in -tqdm==4.66.1 - # via -r benchmarks/torchvision/requirements.in -triton==2.1.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # torch -typing-extensions==4.8.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # reactivex - # torch -urllib3==1.26.17 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # requests -varname==0.10.0 - # via - # -c .pin/../.pin/constraints-cpu-torch.txt - # giving -voir @ git+https://github.com/breuleux/voir.git - # via -r benchmarks/torchvision/requirements.in diff --git a/milabench/scripts/milabench_run.bash b/milabench/scripts/milabench_run.bash index bddd827f8..cf502cbf4 100755 --- a/milabench/scripts/milabench_run.bash +++ b/milabench/scripts/milabench_run.bash @@ -68,6 +68,10 @@ CONDA_EXEC="$(which conda)" CONDA_BASE=$(dirname $CONDA_EXEC) source $CONDA_BASE/../etc/profile.d/conda.sh +if [ -e $HOME/.credentials.env ]; then + source $HOME/.credentials.env +fi + cd $SLURM_TMPDIR # # Create a new environment @@ -125,13 +129,6 @@ echo "" echo "Report" echo "------" -# json -# milabench summary $SLURM_TMPDIR/base/runs/ - - -ls $BASE/runs - - milabench write_report_to_pr --remote $ORIGIN --branch $BRANCH --config $CONFIG echo "----" From 310fe4264ef12c530ac274d29421b09f32c41ebc Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Mon, 23 Oct 2023 13:18:17 -0400 Subject: [PATCH 36/36] Update deepspeed --- .pin/constraints-cuda-torch.txt | 32 ++-- .pin/constraints-rocm-torch.txt | 49 +++--- .../accelerate_opt/requirements.cuda.txt | 10 +- .../accelerate_opt/requirements.rocm.txt | 31 ++-- benchmarks/dlrm/requirements.cuda.txt | 12 +- benchmarks/dlrm/requirements.rocm.txt | 27 ++-- benchmarks/huggingface/requirements.cuda.txt | 6 +- benchmarks/huggingface/requirements.rocm.txt | 16 +- benchmarks/rwkv/requirements.cuda.txt | 8 +- benchmarks/rwkv/requirements.rocm.txt | 29 ++-- benchmarks/stargan/requirements.cuda.txt | 6 +- benchmarks/stargan/requirements.rocm.txt | 14 +- benchmarks/super-slomo/requirements.cuda.txt | 6 +- benchmarks/super-slomo/requirements.rocm.txt | 14 +- benchmarks/timm/requirements.cuda.txt | 6 +- benchmarks/timm/requirements.rocm.txt | 14 +- benchmarks/torchvision/requirements.cuda.txt | 6 +- benchmarks/torchvision/requirements.rocm.txt | 14 +- constraints/cuda.txt | 3 +- constraints/rocm.txt | 3 +- milabench/scripts/milabench_pin.bash | 4 +- poetry.lock | 144 +++++------------- 22 files changed, 180 insertions(+), 274 deletions(-) diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 108cc1352..9662ad4d3 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -28,17 +28,17 @@ cachetools==5.3.1 # via google-auth certifi==2023.7.22 # via requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # aiohttp # requests codefind==0.1.3 # via ptera -datasets==2.14.5 +datasets==2.14.6 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate -deepspeed==0.8.3 +deepspeed==0.11.1 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/rwkv/requirements.in @@ -49,7 +49,7 @@ dill==0.3.7 # multiprocess docker==6.1.3 # via torchx -docstring-parser==0.8.1 +docstring-parser==0.15 # via torchx evaluate==0.4.1 # via -r benchmarks/accelerate_opt/requirements.in @@ -68,7 +68,7 @@ frozenlist==1.4.0 # via # aiohttp # aiosignal -fsspec[http]==2023.6.0 +fsspec[http]==2023.1.0 # via # datasets # evaluate @@ -146,8 +146,8 @@ ninja==1.11.1.1 # deepspeed numpy==1.26.1 # via - # -r benchmarks/dlrm/requirements.in - # -r benchmarks/rwkv/requirements.in + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in # accelerate # datasets # deepspeed @@ -271,7 +271,7 @@ safetensors==0.4.0 # via # -r benchmarks/timm/requirements.in # transformers -scikit-learn==1.3.1 +scikit-learn==1.3.2 # via -r benchmarks/dlrm/requirements.in scipy==1.11.3 # via scikit-learn @@ -294,8 +294,8 @@ tokenizers==0.14.1 # via transformers torch==2.1.0+cu118 # via - # -r benchmarks/rwkv/requirements.in - # -r benchmarks/timm/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/super-slomo/requirements.in # accelerate # deepspeed # pytorch-lightning @@ -313,11 +313,11 @@ torchrec==0.5.0+cu118 # via -r benchmarks/dlrm/requirements.in torchvision==0.16.0+cu118 # via - # -r benchmarks/accelerate_opt/requirements.in - # -r benchmarks/timm/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/torchvision/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in -torchx==0.5.0 +torchx==0.6.0 # via -r benchmarks/dlrm/requirements.in tqdm==4.66.1 # via @@ -359,10 +359,10 @@ urllib3==1.26.18 # torchx varname==0.10.0 # via giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via - # -r benchmarks/rwkv/requirements.in - # -r benchmarks/timm/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/super-slomo/requirements.in websocket-client==1.6.4 # via docker werkzeug==3.0.0 diff --git a/.pin/constraints-rocm-torch.txt b/.pin/constraints-rocm-torch.txt index 5f1ce62c1..486032fcf 100644 --- a/.pin/constraints-rocm-torch.txt +++ b/.pin/constraints-rocm-torch.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=.pin/constraints-rocm-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in @@ -16,8 +16,6 @@ aiohttp==3.8.6 # fsspec aiosignal==1.3.1 # via aiohttp -annotated-types==0.6.0 - # via pydantic antlr4-python3-runtime==4.9.3 # via omegaconf asttokens==2.4.0 @@ -30,7 +28,7 @@ cachetools==5.3.1 # via google-auth certifi==2023.7.22 # via requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # aiohttp # requests @@ -38,11 +36,11 @@ cmake==3.27.7 # via pytorch-triton-rocm codefind==0.1.3 # via ptera -datasets==2.14.5 +datasets==2.14.6 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate -deepspeed==0.8.3 +deepspeed==0.11.1 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/rwkv/requirements.in @@ -53,7 +51,7 @@ dill==0.3.7 # multiprocess docker==6.1.3 # via torchx -docstring-parser==0.8.1 +docstring-parser==0.15 # via torchx evaluate==0.4.1 # via -r benchmarks/accelerate_opt/requirements.in @@ -72,7 +70,7 @@ frozenlist==1.4.0 # via # aiohttp # aiosignal -fsspec[http]==2023.6.0 +fsspec[http]==2023.1.0 # via # datasets # evaluate @@ -90,7 +88,7 @@ google-auth==2.23.3 # via # google-auth-oauthlib # tensorboard -google-auth-oauthlib==1.0.0 +google-auth-oauthlib==1.1.0 # via tensorboard graphviz==0.20.1 # via torchviz @@ -111,9 +109,7 @@ idna==3.4 # requests # yarl importlib-metadata==6.8.0 - # via - # markdown - # torchx + # via torchx jinja2==3.1.2 # via torch joblib==1.3.2 @@ -122,7 +118,7 @@ lightning-utilities==0.9.0 # via # pytorch-lightning # torchmetrics -lit==17.0.2 +lit==17.0.3 # via pytorch-triton-rocm markdown==3.5 # via tensorboard @@ -146,7 +142,7 @@ multiprocess==0.70.15 # evaluate mypy-extensions==1.0.0 # via typing-inspect -networkx==3.1 +networkx==3.2 # via torch ninja==1.11.1.1 # via @@ -154,8 +150,8 @@ ninja==1.11.1.1 # deepspeed numpy==1.26.1 # via - # -r benchmarks/dlrm/requirements.in # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in # accelerate # datasets # deepspeed @@ -200,7 +196,7 @@ pandas==2.1.1 # evaluate pillow==10.1.0 # via torchvision -protobuf==4.24.4 +protobuf==4.23.4 # via # onnx # tensorboard @@ -220,10 +216,10 @@ pyasn1==0.5.0 # rsa pyasn1-modules==0.3.0 # via google-auth -pydantic==2.4.2 - # via deepspeed -pydantic-core==2.10.1 - # via pydantic +pydantic==1.10.13 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed pydot==1.4.2 # via -r benchmarks/dlrm/requirements.in pygments==2.16.1 @@ -282,7 +278,7 @@ safetensors==0.4.0 # via # -r benchmarks/timm/requirements.in # transformers -scikit-learn==1.3.1 +scikit-learn==1.3.2 # via -r benchmarks/dlrm/requirements.in scipy==1.11.3 # via scikit-learn @@ -295,7 +291,7 @@ sympy==1.12 # via torch tabulate==0.9.0 # via torchx -tensorboard==2.14.1 +tensorboard==2.15.0 # via -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.1 # via tensorboard @@ -329,11 +325,11 @@ torchvision==0.16.0+rocm5.6 # -r benchmarks/torchvision/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in -torchx==0.5.0 +torchx==0.6.0 # via -r benchmarks/dlrm/requirements.in tqdm==4.66.1 # via - # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/dlrm/requirements.in # -r benchmarks/torchvision/requirements.in # datasets # deepspeed @@ -342,7 +338,7 @@ tqdm==4.66.1 # pytorch-lightning # torchrec # transformers -transformers==4.34.0 +transformers==4.34.1 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/huggingface/requirements.in @@ -352,7 +348,6 @@ typing-extensions==4.8.0 # lightning-utilities # onnx # pydantic - # pydantic-core # pyre-extensions # pytorch-lightning # reactivex @@ -362,7 +357,7 @@ typing-inspect==0.9.0 # via pyre-extensions tzdata==2023.3 # via pandas -urllib3==1.26.17 +urllib3==1.26.18 # via # docker # requests diff --git a/benchmarks/accelerate_opt/requirements.cuda.txt b/benchmarks/accelerate_opt/requirements.cuda.txt index 6eade8dd1..0886bde19 100644 --- a/benchmarks/accelerate_opt/requirements.cuda.txt +++ b/benchmarks/accelerate_opt/requirements.cuda.txt @@ -37,7 +37,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp @@ -46,11 +46,11 @@ codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==2.14.5 +datasets==2.14.6 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate -deepspeed==0.8.3 +deepspeed==0.11.1 # via -r benchmarks/accelerate_opt/requirements.in dill==0.3.7 # via @@ -76,7 +76,7 @@ frozenlist==1.4.0 # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.6.0 +fsspec[http]==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -313,7 +313,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/accelerate_opt/requirements.in xxhash==3.4.1 # via diff --git a/benchmarks/accelerate_opt/requirements.rocm.txt b/benchmarks/accelerate_opt/requirements.rocm.txt index 2bbbb84bb..64f34b7b1 100644 --- a/benchmarks/accelerate_opt/requirements.rocm.txt +++ b/benchmarks/accelerate_opt/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-opt.txt benchmarks/accelerate_opt/requirements.in @@ -17,10 +17,6 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp -annotated-types==0.6.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -41,7 +37,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp @@ -54,11 +50,11 @@ codefind==0.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera -datasets==2.14.5 +datasets==2.14.6 # via # -r benchmarks/accelerate_opt/requirements.in # evaluate -deepspeed==0.8.3 +deepspeed==0.11.1 # via -r benchmarks/accelerate_opt/requirements.in dill==0.3.7 # via @@ -84,7 +80,7 @@ frozenlist==1.4.0 # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.6.0 +fsspec[http]==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets @@ -117,7 +113,7 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -147,7 +143,7 @@ multiprocess==0.70.15 # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -209,14 +205,10 @@ pyarrow==13.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets -pydantic==2.4.2 +pydantic==1.10.13 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pydantic-core==2.10.1 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pydantic pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -308,21 +300,20 @@ tqdm==4.66.1 # evaluate # huggingface-hub # transformers -transformers==4.34.0 +transformers==4.34.1 # via -r benchmarks/accelerate_opt/requirements.in typing-extensions==4.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pydantic - # pydantic-core # reactivex # torch tzdata==2023.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pandas -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -331,7 +322,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/accelerate_opt/requirements.in xxhash==3.4.1 # via diff --git a/benchmarks/dlrm/requirements.cuda.txt b/benchmarks/dlrm/requirements.cuda.txt index 823545753..def37f9e5 100644 --- a/benchmarks/dlrm/requirements.cuda.txt +++ b/benchmarks/dlrm/requirements.cuda.txt @@ -26,7 +26,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -38,7 +38,7 @@ docker==6.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx -docstring-parser==0.8.1 +docstring-parser==0.15 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx @@ -56,7 +56,7 @@ filelock==3.12.4 # torch # torchx # triton -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -225,7 +225,7 @@ rsa==4.9 # via # -c .pin/../.pin/constraints-cuda-torch.txt # google-auth -scikit-learn==1.3.1 +scikit-learn==1.3.2 # via -r benchmarks/dlrm/requirements.in scipy==1.11.3 # via @@ -267,7 +267,7 @@ torchrec==0.5.0+cu118 # via -r benchmarks/dlrm/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in -torchx==0.5.0 +torchx==0.6.0 # via -r benchmarks/dlrm/requirements.in tqdm==4.66.1 # via @@ -300,7 +300,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/dlrm/requirements.in websocket-client==1.6.4 # via diff --git a/benchmarks/dlrm/requirements.rocm.txt b/benchmarks/dlrm/requirements.rocm.txt index cadc67bb7..1249e82b2 100644 --- a/benchmarks/dlrm/requirements.rocm.txt +++ b/benchmarks/dlrm/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-dlrm.txt benchmarks/dlrm/requirements.in @@ -26,7 +26,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -42,7 +42,7 @@ docker==6.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx -docstring-parser==0.8.1 +docstring-parser==0.15 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx @@ -60,7 +60,7 @@ filelock==3.12.4 # pytorch-triton-rocm # torch # torchx -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -77,7 +77,7 @@ google-auth==2.23.3 # -c .pin/../.pin/constraints-rocm-torch.txt # google-auth-oauthlib # tensorboard -google-auth-oauthlib==1.0.0 +google-auth-oauthlib==1.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard @@ -96,7 +96,6 @@ idna==3.4 importlib-metadata==6.8.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # markdown # torchx jinja2==3.1.2 # via @@ -110,7 +109,7 @@ lightning-utilities==0.9.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchmetrics -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -139,7 +138,7 @@ mypy-extensions==1.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # typing-inspect -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -172,7 +171,7 @@ packaging==23.2 # docker # lightning-utilities # torchmetrics -protobuf==4.24.4 +protobuf==4.23.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # onnx @@ -239,7 +238,7 @@ rsa==4.9 # via # -c .pin/../.pin/constraints-rocm-torch.txt # google-auth -scikit-learn==1.3.1 +scikit-learn==1.3.2 # via -r benchmarks/dlrm/requirements.in scipy==1.11.3 # via @@ -258,7 +257,7 @@ tabulate==0.9.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx -tensorboard==2.14.1 +tensorboard==2.15.0 # via -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.1 # via @@ -282,7 +281,7 @@ torchrec==0.5.0 # via -r benchmarks/dlrm/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in -torchx==0.5.0 +torchx==0.6.0 # via -r benchmarks/dlrm/requirements.in tqdm==4.66.1 # via @@ -301,7 +300,7 @@ typing-inspect==0.9.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pyre-extensions -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # docker @@ -311,7 +310,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/dlrm/requirements.in websocket-client==1.6.4 # via diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index ae10bc7c7..1e2890b37 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -37,7 +37,7 @@ filelock==3.12.4 # torch # transformers # triton -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub @@ -175,5 +175,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/huggingface/requirements.rocm.txt b/benchmarks/huggingface/requirements.rocm.txt index efeb3285d..8efd0dc20 100644 --- a/benchmarks/huggingface/requirements.rocm.txt +++ b/benchmarks/huggingface/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-hf.txt benchmarks/huggingface/requirements.in @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -41,7 +41,7 @@ filelock==3.12.4 # pytorch-triton-rocm # torch # transformers -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub @@ -64,7 +64,7 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -84,7 +84,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -169,7 +169,7 @@ tqdm==4.66.1 # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # transformers -transformers==4.34.0 +transformers==4.34.1 # via -r benchmarks/huggingface/requirements.in typing-extensions==4.8.0 # via @@ -177,7 +177,7 @@ typing-extensions==4.8.0 # huggingface-hub # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -185,5 +185,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/rwkv/requirements.cuda.txt b/benchmarks/rwkv/requirements.cuda.txt index 0fec95e69..a0820efd3 100644 --- a/benchmarks/rwkv/requirements.cuda.txt +++ b/benchmarks/rwkv/requirements.cuda.txt @@ -34,7 +34,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp @@ -43,7 +43,7 @@ codefind==0.1.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -deepspeed==0.8.3 +deepspeed==0.11.1 # via -r benchmarks/rwkv/requirements.in executing==1.2.0 # via @@ -59,7 +59,7 @@ frozenlist==1.4.0 # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.6.0 +fsspec[http]==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pytorch-lightning @@ -223,7 +223,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/rwkv/requirements.rocm.txt b/benchmarks/rwkv/requirements.rocm.txt index a649f17ee..35dd8ff95 100644 --- a/benchmarks/rwkv/requirements.rocm.txt +++ b/benchmarks/rwkv/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-rwkv.txt benchmarks/rwkv/requirements.in @@ -14,10 +14,6 @@ aiosignal==1.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp -annotated-types==0.6.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pydantic antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -38,7 +34,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp @@ -51,7 +47,7 @@ codefind==0.1.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera -deepspeed==0.8.3 +deepspeed==0.11.1 # via -r benchmarks/rwkv/requirements.in executing==1.2.0 # via @@ -67,7 +63,7 @@ frozenlist==1.4.0 # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.6.0 +fsspec[http]==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning @@ -95,7 +91,7 @@ lightning-utilities==0.9.0 # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning # torchmetrics -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -120,7 +116,7 @@ multidict==6.0.4 # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # yarl -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -161,14 +157,10 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pydantic==2.4.2 +pydantic==1.10.13 # via - # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/rwkv/requirements.in # deepspeed -pydantic-core==2.10.1 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pydantic pygments==2.16.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -229,11 +221,10 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-rocm-torch.txt # lightning-utilities # pydantic - # pydantic-core # pytorch-lightning # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -241,7 +232,7 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/rwkv/requirements.in yarl==1.9.2 # via diff --git a/benchmarks/stargan/requirements.cuda.txt b/benchmarks/stargan/requirements.cuda.txt index da7691003..b66e81e63 100644 --- a/benchmarks/stargan/requirements.cuda.txt +++ b/benchmarks/stargan/requirements.cuda.txt @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -35,7 +35,7 @@ filelock==3.12.4 # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -147,5 +147,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/stargan/requirements.rocm.txt b/benchmarks/stargan/requirements.rocm.txt index a1a08269f..b4eb2f7e1 100644 --- a/benchmarks/stargan/requirements.rocm.txt +++ b/benchmarks/stargan/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-stargan.txt benchmarks/stargan/requirements.in @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -39,7 +39,7 @@ filelock==3.12.4 # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -56,7 +56,7 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -76,7 +76,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -148,7 +148,7 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-rocm-torch.txt # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -156,5 +156,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/stargan/requirements.in diff --git a/benchmarks/super-slomo/requirements.cuda.txt b/benchmarks/super-slomo/requirements.cuda.txt index 2012a7cf4..a97179600 100644 --- a/benchmarks/super-slomo/requirements.cuda.txt +++ b/benchmarks/super-slomo/requirements.cuda.txt @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -35,7 +35,7 @@ filelock==3.12.4 # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -152,5 +152,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/super-slomo/requirements.rocm.txt b/benchmarks/super-slomo/requirements.rocm.txt index 26f26a039..b96de3447 100644 --- a/benchmarks/super-slomo/requirements.rocm.txt +++ b/benchmarks/super-slomo/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-super-slomo.txt benchmarks/super-slomo/requirements.in @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -39,7 +39,7 @@ filelock==3.12.4 # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -56,7 +56,7 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -76,7 +76,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -153,7 +153,7 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-rocm-torch.txt # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -161,5 +161,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/super-slomo/requirements.in diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index 865830349..9d276bd86 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -36,7 +36,7 @@ filelock==3.12.4 # huggingface-hub # torch # triton -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub @@ -164,5 +164,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/timm/requirements.rocm.txt b/benchmarks/timm/requirements.rocm.txt index f9db05821..fb3c00832 100644 --- a/benchmarks/timm/requirements.rocm.txt +++ b/benchmarks/timm/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-timm.txt benchmarks/timm/requirements.in @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -40,7 +40,7 @@ filelock==3.12.4 # huggingface-hub # pytorch-triton-rocm # torch -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub @@ -60,7 +60,7 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -80,7 +80,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -165,7 +165,7 @@ typing-extensions==4.8.0 # huggingface-hub # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -173,5 +173,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/timm/requirements.in diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index 9c7f06d04..b2770329c 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -35,7 +35,7 @@ filelock==3.12.4 # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -149,5 +149,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/torchvision/requirements.in diff --git a/benchmarks/torchvision/requirements.rocm.txt b/benchmarks/torchvision/requirements.rocm.txt index 8026040ea..03376298c 100644 --- a/benchmarks/torchvision/requirements.rocm.txt +++ b/benchmarks/torchvision/requirements.rocm.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --config=pyproject.toml --output-file=benchmarks/torchvision/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-torchvision.txt benchmarks/torchvision/requirements.in @@ -18,7 +18,7 @@ certifi==2023.7.22 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -charset-normalizer==3.3.0 +charset-normalizer==3.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -39,7 +39,7 @@ filelock==3.12.4 # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -fsspec==2023.6.0 +fsspec==2023.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -56,7 +56,7 @@ jinja2==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.2 +lit==17.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm @@ -76,7 +76,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.1 +networkx==3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -150,7 +150,7 @@ typing-extensions==4.8.0 # -c .pin/../.pin/constraints-rocm-torch.txt # reactivex # torch -urllib3==1.26.17 +urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -158,5 +158,5 @@ varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -voir @ git+https://github.com/breuleux/voir.git +voir==0.2.10 # via -r benchmarks/torchvision/requirements.in diff --git a/constraints/cuda.txt b/constraints/cuda.txt index 68242da32..2f4bcb7f8 100644 --- a/constraints/cuda.txt +++ b/constraints/cuda.txt @@ -1,2 +1 @@ ---extra-index-url https://download.pytorch.org/whl/cu118 -deepspeed==0.8.3 +--extra-index-url https://download.pytorch.org/whl/cu118 \ No newline at end of file diff --git a/constraints/rocm.txt b/constraints/rocm.txt index dd6ef8a59..f76663f04 100644 --- a/constraints/rocm.txt +++ b/constraints/rocm.txt @@ -1,2 +1 @@ ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ -deepspeed==0.8.3 +--extra-index-url https://download.pytorch.org/whl/rocm5.6/ \ No newline at end of file diff --git a/milabench/scripts/milabench_pin.bash b/milabench/scripts/milabench_pin.bash index 694933bff..c2f7ad399 100644 --- a/milabench/scripts/milabench_pin.bash +++ b/milabench/scripts/milabench_pin.bash @@ -5,8 +5,8 @@ -MILABENCH_GPU_ARCH=cuda milabench pin -c constraints/cuda.txt --config config/standard.yaml -MILABENCH_GPU_ARCH=rocm milabench pin -c constraints/rocm.txt --config config/standard.yaml +MILABENCH_GPU_ARCH=cuda milabench pin --config config/standard.yaml --from-scratch --base /tmp +MILABENCH_GPU_ARCH=rocm milabench pin --config config/standard.yaml --from-scratch --base /tmp cd $SLURM_TMPDIR/milabench diff --git a/poetry.lock b/poetry.lock index 6c0c15f1d..942bff025 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "alabaster" version = "0.7.13" description = "A configurable sidebar-enabled Sphinx theme" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "ansicon" version = "1.89.0" description = "Python wrapper for loading Jason Hood's ANSICON" -category = "main" optional = false python-versions = "*" files = [ @@ -28,7 +26,6 @@ files = [ name = "antlr4-python3-runtime" version = "4.9.3" description = "ANTLR 4.9.3 runtime for Python 3.7" -category = "main" optional = false python-versions = "*" files = [ @@ -39,7 +36,6 @@ files = [ name = "argcomplete" version = "1.12.3" description = "Bash tab completion for argparse" -category = "main" optional = false python-versions = "*" files = [ @@ -54,7 +50,6 @@ test = ["coverage", "flake8", "pexpect", "wheel"] name = "asttokens" version = "2.2.1" description = "Annotate AST trees with source code positions" -category = "main" optional = false python-versions = "*" files = [ @@ -72,7 +67,6 @@ test = ["astroid", "pytest"] name = "atomicwrites" version = "1.4.1" description = "Atomic file writes." -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -83,7 +77,6 @@ files = [ name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -102,7 +95,6 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte name = "babel" version = "2.12.1" description = "Internationalization utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -117,7 +109,6 @@ pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} name = "black" version = "23.3.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -167,7 +158,6 @@ uvloop = ["uvloop (>=0.15.2)"] name = "blessed" version = "1.20.0" description = "Easy, practical library for making terminal apps, by providing an elegant, well-documented interface to Colors, Keyboard input, and screen Positioning capabilities." -category = "main" optional = false python-versions = ">=2.7" files = [ @@ -184,7 +174,6 @@ wcwidth = ">=0.1.4" name = "build" version = "0.10.0" description = "A simple, correct Python build frontend" -category = "main" optional = false python-versions = ">= 3.7" files = [ @@ -208,7 +197,6 @@ virtualenv = ["virtualenv (>=20.0.35)"] name = "certifi" version = "2023.5.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -220,7 +208,6 @@ files = [ name = "charset-normalizer" version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -305,7 +292,6 @@ files = [ name = "click" version = "8.1.3" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -320,7 +306,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "codefind" version = "0.1.3" description = "Find code objects and their referents" -category = "main" optional = false python-versions = ">=3.8,<4.0" files = [ @@ -332,7 +317,6 @@ files = [ name = "coleo" version = "0.3.2" description = "The nicest way to develop a command-line interface" -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -351,7 +335,6 @@ yaml = ["pyyaml (>=5.3,<6.0)"] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -363,7 +346,6 @@ files = [ name = "colorlog" version = "6.7.0" description = "Add colours to the output of Python's logging module." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -381,7 +363,6 @@ development = ["black", "flake8", "mypy", "pytest", "types-colorama"] name = "coverage" version = "7.2.7" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -457,7 +438,6 @@ toml = ["tomli"] name = "cp-template" version = "0.3.0" description = "A tool to copy templated directories" -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -473,7 +453,6 @@ pystache = ">=0.6.0,<0.7.0" name = "distlib" version = "0.3.6" description = "Distribution utilities" -category = "main" optional = false python-versions = "*" files = [ @@ -485,7 +464,6 @@ files = [ name = "dnspython" version = "2.3.0" description = "DNS toolkit" -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -506,7 +484,6 @@ wmi = ["wmi (>=1.5.1,<2.0.0)"] name = "docutils" version = "0.17.1" description = "Docutils -- Python Documentation Utilities" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -518,7 +495,6 @@ files = [ name = "executing" version = "1.2.0" description = "Get the currently executing AST node of a frame, and other information" -category = "main" optional = false python-versions = "*" files = [ @@ -533,7 +509,6 @@ tests = ["asttokens", "littleutils", "pytest", "rich"] name = "filelock" version = "3.12.1" description = "A platform independent file lock." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -549,7 +524,6 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "p name = "flake8" version = "4.0.1" description = "the modular source code checker: pep8 pyflakes and co" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -566,7 +540,6 @@ pyflakes = ">=2.4.0,<2.5.0" name = "gitdb" version = "4.0.10" description = "Git Object Database" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -581,7 +554,6 @@ smmap = ">=3.0.1,<6" name = "gitpython" version = "3.1.31" description = "GitPython is a Python library used to interact with Git repositories" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -596,7 +568,6 @@ gitdb = ">=4.0.1,<5" name = "giving" version = "0.4.2" description = "Reactive logging" -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -613,7 +584,6 @@ varname = ">=0.10.0,<0.11.0" name = "greenlet" version = "2.0.2" description = "Lightweight in-process concurrent programming" -category = "main" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" files = [ @@ -622,6 +592,7 @@ files = [ {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, @@ -630,6 +601,7 @@ files = [ {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, @@ -659,6 +631,7 @@ files = [ {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, @@ -667,6 +640,7 @@ files = [ {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, @@ -687,7 +661,6 @@ test = ["objgraph", "psutil"] name = "hrepr" version = "0.4.1" description = "Extensible HTML representation for Python objects." -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -702,7 +675,6 @@ ovld = ">=0.3.2,<0.4.0" name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -714,7 +686,6 @@ files = [ name = "imagesize" version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -726,7 +697,6 @@ files = [ name = "importlib-metadata" version = "6.6.0" description = "Read metadata from Python packages" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -742,11 +712,28 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker perf = ["ipython"] testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] +[[package]] +name = "importlib-resources" +version = "6.1.0" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.1.0-py3-none-any.whl", hash = "sha256:aa50258bbfa56d4e33fbd8aa3ef48ded10d1735f11532b8df95388cc6bdb7e83"}, + {file = "importlib_resources-6.1.0.tar.gz", hash = "sha256:9d48dcccc213325e810fd723e7fbb45ccb39f6cf5c31f00cf2b965f5f10f3cb9"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] + [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -758,7 +745,6 @@ files = [ name = "isort" version = "5.12.0" description = "A Python utility / library to sort Python imports." -category = "dev" optional = false python-versions = ">=3.8.0" files = [ @@ -776,7 +762,6 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"] name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -794,7 +779,6 @@ i18n = ["Babel (>=2.7)"] name = "jinxed" version = "1.2.0" description = "Jinxed Terminal Library" -category = "main" optional = false python-versions = "*" files = [ @@ -809,7 +793,6 @@ ansicon = {version = "*", markers = "platform_system == \"Windows\""} name = "markdown-it-py" version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -834,7 +817,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -858,6 +840,16 @@ files = [ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, @@ -894,7 +886,6 @@ files = [ name = "mccabe" version = "0.6.1" description = "McCabe checker, plugin for flake8" -category = "dev" optional = false python-versions = "*" files = [ @@ -906,7 +897,6 @@ files = [ name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -918,7 +908,6 @@ files = [ name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -930,7 +919,6 @@ files = [ name = "nox" version = "2021.10.1" description = "Flexible test automation." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -952,7 +940,6 @@ tox-to-nox = ["jinja2", "tox"] name = "numpy" version = "1.24.3" description = "Fundamental package for array computing in Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -990,7 +977,6 @@ files = [ name = "omegaconf" version = "2.3.0" description = "A flexible configuration library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -999,14 +985,13 @@ files = [ ] [package.dependencies] -antlr4-python3-runtime = ">=4.9.0,<4.10.0" +antlr4-python3-runtime = "==4.9.*" PyYAML = ">=5.1.0" [[package]] name = "ovld" version = "0.3.2" description = "Overloading Python functions" -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -1018,7 +1003,6 @@ files = [ name = "packaging" version = "23.1" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1030,7 +1014,6 @@ files = [ name = "pandas" version = "1.5.3" description = "Powerful data structures for data analysis, time series, and statistics" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1066,7 +1049,8 @@ files = [ [package.dependencies] numpy = [ {version = ">=1.20.3", markers = "python_version < \"3.10\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] python-dateutil = ">=2.8.1" pytz = ">=2020.1" @@ -1078,7 +1062,6 @@ test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] name = "pathspec" version = "0.9.0" description = "Utility library for gitignore style pattern matching of file paths." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ @@ -1090,7 +1073,6 @@ files = [ name = "pip" version = "23.1.2" description = "The PyPA recommended tool for installing Python packages." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1102,7 +1084,6 @@ files = [ name = "pip-tools" version = "6.13.0" description = "pip-tools keeps your pinned dependencies fresh." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1125,7 +1106,6 @@ testing = ["flit-core (>=2,<4)", "poetry-core (>=1.0.0)", "pytest (>=7.2.0)", "p name = "platformdirs" version = "3.5.3" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1141,7 +1121,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest- name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1157,7 +1136,6 @@ testing = ["pytest", "pytest-benchmark"] name = "psutil" version = "5.9.5" description = "Cross-platform lib for process and system monitoring in Python." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1184,7 +1162,6 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] name = "psycopg2-binary" version = "2.9.6" description = "psycopg2 - Python-PostgreSQL Database Adapter" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -1256,7 +1233,6 @@ files = [ name = "ptera" version = "1.4.1" description = "Call graph addressing library." -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -1272,7 +1248,6 @@ giving = ">=0.4.1,<0.5.0" name = "py" version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -1284,7 +1259,6 @@ files = [ name = "py-cpuinfo" version = "9.0.0" description = "Get CPU info with pure Python" -category = "main" optional = false python-versions = "*" files = [ @@ -1296,7 +1270,6 @@ files = [ name = "pycodestyle" version = "2.8.0" description = "Python style guide checker" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -1308,7 +1281,6 @@ files = [ name = "pyflakes" version = "2.4.0" description = "passive checker of Python programs" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1320,7 +1292,6 @@ files = [ name = "pygments" version = "2.15.1" description = "Pygments is a syntax highlighting package written in Python." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1335,7 +1306,6 @@ plugins = ["importlib-metadata"] name = "pymongo" version = "4.3.3" description = "Python driver for MongoDB " -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1430,7 +1400,6 @@ zstd = ["zstandard"] name = "pynvml" version = "11.5.0" description = "Python Bindings for the NVIDIA Management Library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1442,7 +1411,6 @@ files = [ name = "pyproject-hooks" version = "1.0.0" description = "Wrappers to call pyproject.toml-based build backend hooks." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1457,7 +1425,6 @@ tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} name = "pystache" version = "0.6.0" description = "Mustache for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1472,7 +1439,6 @@ test = ["nose"] name = "pytest" version = "6.2.5" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1497,7 +1463,6 @@ testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xm name = "pytest-cov" version = "3.0.0" description = "Pytest plugin for measuring coverage." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1516,7 +1481,6 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "pytest-datadir" version = "1.4.1" description = "pytest plugin for test data directories and files" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1531,7 +1495,6 @@ pytest = ">=5.0" name = "pytest-regressions" version = "2.4.2" description = "Easy to use fixtures to write regression tests." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1554,7 +1517,6 @@ num = ["numpy", "pandas"] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -1569,7 +1531,6 @@ six = ">=1.5" name = "pytz" version = "2023.3" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -1581,7 +1542,6 @@ files = [ name = "pyyaml" version = "6.0" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1631,7 +1591,6 @@ files = [ name = "reactivex" version = "4.0.4" description = "ReactiveX (Rx) for Python" -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -1646,7 +1605,6 @@ typing-extensions = ">=4.1.1,<5.0.0" name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1668,7 +1626,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "rich" version = "13.4.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -1688,7 +1645,6 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] name = "setuptools" version = "67.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1705,7 +1661,6 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -1717,7 +1672,6 @@ files = [ name = "smmap" version = "5.0.0" description = "A pure Python implementation of a sliding window memory map manager" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1729,7 +1683,6 @@ files = [ name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -category = "dev" optional = false python-versions = "*" files = [ @@ -1741,7 +1694,6 @@ files = [ name = "sphinx" version = "4.5.0" description = "Python documentation generator" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1777,7 +1729,6 @@ test = ["cython", "html5lib", "pytest", "pytest-cov", "typed-ast"] name = "sphinx-rtd-theme" version = "1.2.2" description = "Read the Docs theme for Sphinx" -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -1797,7 +1748,6 @@ dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"] name = "sphinxcontrib-applehelp" version = "1.0.4" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1813,7 +1763,6 @@ test = ["pytest"] name = "sphinxcontrib-devhelp" version = "1.0.2" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1829,7 +1778,6 @@ test = ["pytest"] name = "sphinxcontrib-htmlhelp" version = "2.0.1" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1845,7 +1793,6 @@ test = ["html5lib", "pytest"] name = "sphinxcontrib-jquery" version = "4.1" description = "Extension to include jQuery on newer Sphinx releases" -category = "dev" optional = false python-versions = ">=2.7" files = [ @@ -1860,7 +1807,6 @@ Sphinx = ">=1.8" name = "sphinxcontrib-jsmath" version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1875,7 +1821,6 @@ test = ["flake8", "mypy", "pytest"] name = "sphinxcontrib-qthelp" version = "1.0.3" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1891,7 +1836,6 @@ test = ["pytest"] name = "sphinxcontrib-serializinghtml" version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1907,7 +1851,6 @@ test = ["pytest"] name = "sqlalchemy" version = "2.0.16" description = "Database Abstraction Library" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1986,7 +1929,6 @@ sqlcipher = ["sqlcipher3-binary"] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -1998,7 +1940,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2010,7 +1951,6 @@ files = [ name = "tqdm" version = "4.65.0" description = "Fast, Extensible Progress Meter" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2031,7 +1971,6 @@ telegram = ["requests"] name = "typing-extensions" version = "4.6.3" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2043,7 +1982,6 @@ files = [ name = "urllib3" version = "2.0.3" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2061,7 +1999,6 @@ zstd = ["zstandard (>=0.18.0)"] name = "varname" version = "0.10.0" description = "Dark magics about variable names in python." -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -2079,7 +2016,6 @@ all = ["asttokens (>=2.0.0,<3.0.0)", "pure_eval (<1.0.0)"] name = "virtualenv" version = "20.23.0" description = "Virtual Python Environment builder" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2100,7 +2036,6 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.3)", "coverage-enable-subprocess name = "voir" version = "0.2.10" description = "Instrument, extend and visualize your programs" -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -2120,7 +2055,6 @@ rich = ">=13.3.2,<14.0.0" name = "wcwidth" version = "0.2.6" description = "Measures the displayed width of unicode strings in a terminal" -category = "main" optional = false python-versions = "*" files = [ @@ -2132,7 +2066,6 @@ files = [ name = "wheel" version = "0.40.0" description = "A built-package format for Python" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2147,7 +2080,6 @@ test = ["pytest (>=6.0.0)"] name = "zipp" version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2161,5 +2093,5 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [metadata] lock-version = "2.0" -python-versions = ">=3.8,<3.11" -content-hash = "2852dd9dc4b604714a06f452ee27c295e946ad86b9955ac133da9ca3b92ad1f7" +python-versions = ">=3.8,<4.0" +content-hash = "0407b1f9e231b83ca25d848e4c21033a7016d5825c31a86ce075479b4b419fa8"